Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf
[linux-2.6-microblaze.git] / drivers / android / binder_alloc.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* binder_alloc.c
3  *
4  * Android IPC Subsystem
5  *
6  * Copyright (C) 2007-2017 Google, Inc.
7  */
8
9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10
11 #include <linux/list.h>
12 #include <linux/sched/mm.h>
13 #include <linux/module.h>
14 #include <linux/rtmutex.h>
15 #include <linux/rbtree.h>
16 #include <linux/seq_file.h>
17 #include <linux/vmalloc.h>
18 #include <linux/slab.h>
19 #include <linux/sched.h>
20 #include <linux/list_lru.h>
21 #include <linux/ratelimit.h>
22 #include <asm/cacheflush.h>
23 #include <linux/uaccess.h>
24 #include <linux/highmem.h>
25 #include <linux/sizes.h>
26 #include "binder_alloc.h"
27 #include "binder_trace.h"
28
29 struct list_lru binder_alloc_lru;
30
31 static DEFINE_MUTEX(binder_alloc_mmap_lock);
32
33 enum {
34         BINDER_DEBUG_USER_ERROR             = 1U << 0,
35         BINDER_DEBUG_OPEN_CLOSE             = 1U << 1,
36         BINDER_DEBUG_BUFFER_ALLOC           = 1U << 2,
37         BINDER_DEBUG_BUFFER_ALLOC_ASYNC     = 1U << 3,
38 };
39 static uint32_t binder_alloc_debug_mask = BINDER_DEBUG_USER_ERROR;
40
41 module_param_named(debug_mask, binder_alloc_debug_mask,
42                    uint, 0644);
43
44 #define binder_alloc_debug(mask, x...) \
45         do { \
46                 if (binder_alloc_debug_mask & mask) \
47                         pr_info_ratelimited(x); \
48         } while (0)
49
50 static struct binder_buffer *binder_buffer_next(struct binder_buffer *buffer)
51 {
52         return list_entry(buffer->entry.next, struct binder_buffer, entry);
53 }
54
55 static struct binder_buffer *binder_buffer_prev(struct binder_buffer *buffer)
56 {
57         return list_entry(buffer->entry.prev, struct binder_buffer, entry);
58 }
59
60 static size_t binder_alloc_buffer_size(struct binder_alloc *alloc,
61                                        struct binder_buffer *buffer)
62 {
63         if (list_is_last(&buffer->entry, &alloc->buffers))
64                 return alloc->buffer + alloc->buffer_size - buffer->user_data;
65         return binder_buffer_next(buffer)->user_data - buffer->user_data;
66 }
67
68 static void binder_insert_free_buffer(struct binder_alloc *alloc,
69                                       struct binder_buffer *new_buffer)
70 {
71         struct rb_node **p = &alloc->free_buffers.rb_node;
72         struct rb_node *parent = NULL;
73         struct binder_buffer *buffer;
74         size_t buffer_size;
75         size_t new_buffer_size;
76
77         BUG_ON(!new_buffer->free);
78
79         new_buffer_size = binder_alloc_buffer_size(alloc, new_buffer);
80
81         binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
82                      "%d: add free buffer, size %zd, at %pK\n",
83                       alloc->pid, new_buffer_size, new_buffer);
84
85         while (*p) {
86                 parent = *p;
87                 buffer = rb_entry(parent, struct binder_buffer, rb_node);
88                 BUG_ON(!buffer->free);
89
90                 buffer_size = binder_alloc_buffer_size(alloc, buffer);
91
92                 if (new_buffer_size < buffer_size)
93                         p = &parent->rb_left;
94                 else
95                         p = &parent->rb_right;
96         }
97         rb_link_node(&new_buffer->rb_node, parent, p);
98         rb_insert_color(&new_buffer->rb_node, &alloc->free_buffers);
99 }
100
101 static void binder_insert_allocated_buffer_locked(
102                 struct binder_alloc *alloc, struct binder_buffer *new_buffer)
103 {
104         struct rb_node **p = &alloc->allocated_buffers.rb_node;
105         struct rb_node *parent = NULL;
106         struct binder_buffer *buffer;
107
108         BUG_ON(new_buffer->free);
109
110         while (*p) {
111                 parent = *p;
112                 buffer = rb_entry(parent, struct binder_buffer, rb_node);
113                 BUG_ON(buffer->free);
114
115                 if (new_buffer->user_data < buffer->user_data)
116                         p = &parent->rb_left;
117                 else if (new_buffer->user_data > buffer->user_data)
118                         p = &parent->rb_right;
119                 else
120                         BUG();
121         }
122         rb_link_node(&new_buffer->rb_node, parent, p);
123         rb_insert_color(&new_buffer->rb_node, &alloc->allocated_buffers);
124 }
125
126 static struct binder_buffer *binder_alloc_prepare_to_free_locked(
127                 struct binder_alloc *alloc,
128                 uintptr_t user_ptr)
129 {
130         struct rb_node *n = alloc->allocated_buffers.rb_node;
131         struct binder_buffer *buffer;
132         void __user *uptr;
133
134         uptr = (void __user *)user_ptr;
135
136         while (n) {
137                 buffer = rb_entry(n, struct binder_buffer, rb_node);
138                 BUG_ON(buffer->free);
139
140                 if (uptr < buffer->user_data)
141                         n = n->rb_left;
142                 else if (uptr > buffer->user_data)
143                         n = n->rb_right;
144                 else {
145                         /*
146                          * Guard against user threads attempting to
147                          * free the buffer when in use by kernel or
148                          * after it's already been freed.
149                          */
150                         if (!buffer->allow_user_free)
151                                 return ERR_PTR(-EPERM);
152                         buffer->allow_user_free = 0;
153                         return buffer;
154                 }
155         }
156         return NULL;
157 }
158
159 /**
160  * binder_alloc_prepare_to_free() - get buffer given user ptr
161  * @alloc:      binder_alloc for this proc
162  * @user_ptr:   User pointer to buffer data
163  *
164  * Validate userspace pointer to buffer data and return buffer corresponding to
165  * that user pointer. Search the rb tree for buffer that matches user data
166  * pointer.
167  *
168  * Return:      Pointer to buffer or NULL
169  */
170 struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc,
171                                                    uintptr_t user_ptr)
172 {
173         struct binder_buffer *buffer;
174
175         mutex_lock(&alloc->mutex);
176         buffer = binder_alloc_prepare_to_free_locked(alloc, user_ptr);
177         mutex_unlock(&alloc->mutex);
178         return buffer;
179 }
180
181 static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
182                                     void __user *start, void __user *end)
183 {
184         void __user *page_addr;
185         unsigned long user_page_addr;
186         struct binder_lru_page *page;
187         struct vm_area_struct *vma = NULL;
188         struct mm_struct *mm = NULL;
189         bool need_mm = false;
190
191         binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
192                      "%d: %s pages %pK-%pK\n", alloc->pid,
193                      allocate ? "allocate" : "free", start, end);
194
195         if (end <= start)
196                 return 0;
197
198         trace_binder_update_page_range(alloc, allocate, start, end);
199
200         if (allocate == 0)
201                 goto free_range;
202
203         for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
204                 page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE];
205                 if (!page->page_ptr) {
206                         need_mm = true;
207                         break;
208                 }
209         }
210
211         if (need_mm && mmget_not_zero(alloc->vma_vm_mm))
212                 mm = alloc->vma_vm_mm;
213
214         if (mm) {
215                 down_read(&mm->mmap_sem);
216                 vma = alloc->vma;
217         }
218
219         if (!vma && need_mm) {
220                 binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
221                                    "%d: binder_alloc_buf failed to map pages in userspace, no vma\n",
222                                    alloc->pid);
223                 goto err_no_vma;
224         }
225
226         for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
227                 int ret;
228                 bool on_lru;
229                 size_t index;
230
231                 index = (page_addr - alloc->buffer) / PAGE_SIZE;
232                 page = &alloc->pages[index];
233
234                 if (page->page_ptr) {
235                         trace_binder_alloc_lru_start(alloc, index);
236
237                         on_lru = list_lru_del(&binder_alloc_lru, &page->lru);
238                         WARN_ON(!on_lru);
239
240                         trace_binder_alloc_lru_end(alloc, index);
241                         continue;
242                 }
243
244                 if (WARN_ON(!vma))
245                         goto err_page_ptr_cleared;
246
247                 trace_binder_alloc_page_start(alloc, index);
248                 page->page_ptr = alloc_page(GFP_KERNEL |
249                                             __GFP_HIGHMEM |
250                                             __GFP_ZERO);
251                 if (!page->page_ptr) {
252                         pr_err("%d: binder_alloc_buf failed for page at %pK\n",
253                                 alloc->pid, page_addr);
254                         goto err_alloc_page_failed;
255                 }
256                 page->alloc = alloc;
257                 INIT_LIST_HEAD(&page->lru);
258
259                 user_page_addr = (uintptr_t)page_addr;
260                 ret = vm_insert_page(vma, user_page_addr, page[0].page_ptr);
261                 if (ret) {
262                         pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n",
263                                alloc->pid, user_page_addr);
264                         goto err_vm_insert_page_failed;
265                 }
266
267                 if (index + 1 > alloc->pages_high)
268                         alloc->pages_high = index + 1;
269
270                 trace_binder_alloc_page_end(alloc, index);
271                 /* vm_insert_page does not seem to increment the refcount */
272         }
273         if (mm) {
274                 up_read(&mm->mmap_sem);
275                 mmput(mm);
276         }
277         return 0;
278
279 free_range:
280         for (page_addr = end - PAGE_SIZE; page_addr >= start;
281              page_addr -= PAGE_SIZE) {
282                 bool ret;
283                 size_t index;
284
285                 index = (page_addr - alloc->buffer) / PAGE_SIZE;
286                 page = &alloc->pages[index];
287
288                 trace_binder_free_lru_start(alloc, index);
289
290                 ret = list_lru_add(&binder_alloc_lru, &page->lru);
291                 WARN_ON(!ret);
292
293                 trace_binder_free_lru_end(alloc, index);
294                 continue;
295
296 err_vm_insert_page_failed:
297                 __free_page(page->page_ptr);
298                 page->page_ptr = NULL;
299 err_alloc_page_failed:
300 err_page_ptr_cleared:
301                 ;
302         }
303 err_no_vma:
304         if (mm) {
305                 up_read(&mm->mmap_sem);
306                 mmput(mm);
307         }
308         return vma ? -ENOMEM : -ESRCH;
309 }
310
311
312 static inline void binder_alloc_set_vma(struct binder_alloc *alloc,
313                 struct vm_area_struct *vma)
314 {
315         if (vma)
316                 alloc->vma_vm_mm = vma->vm_mm;
317         /*
318          * If we see alloc->vma is not NULL, buffer data structures set up
319          * completely. Look at smp_rmb side binder_alloc_get_vma.
320          * We also want to guarantee new alloc->vma_vm_mm is always visible
321          * if alloc->vma is set.
322          */
323         smp_wmb();
324         alloc->vma = vma;
325 }
326
327 static inline struct vm_area_struct *binder_alloc_get_vma(
328                 struct binder_alloc *alloc)
329 {
330         struct vm_area_struct *vma = NULL;
331
332         if (alloc->vma) {
333                 /* Look at description in binder_alloc_set_vma */
334                 smp_rmb();
335                 vma = alloc->vma;
336         }
337         return vma;
338 }
339
340 static struct binder_buffer *binder_alloc_new_buf_locked(
341                                 struct binder_alloc *alloc,
342                                 size_t data_size,
343                                 size_t offsets_size,
344                                 size_t extra_buffers_size,
345                                 int is_async)
346 {
347         struct rb_node *n = alloc->free_buffers.rb_node;
348         struct binder_buffer *buffer;
349         size_t buffer_size;
350         struct rb_node *best_fit = NULL;
351         void __user *has_page_addr;
352         void __user *end_page_addr;
353         size_t size, data_offsets_size;
354         int ret;
355
356         if (!binder_alloc_get_vma(alloc)) {
357                 binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
358                                    "%d: binder_alloc_buf, no vma\n",
359                                    alloc->pid);
360                 return ERR_PTR(-ESRCH);
361         }
362
363         data_offsets_size = ALIGN(data_size, sizeof(void *)) +
364                 ALIGN(offsets_size, sizeof(void *));
365
366         if (data_offsets_size < data_size || data_offsets_size < offsets_size) {
367                 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
368                                 "%d: got transaction with invalid size %zd-%zd\n",
369                                 alloc->pid, data_size, offsets_size);
370                 return ERR_PTR(-EINVAL);
371         }
372         size = data_offsets_size + ALIGN(extra_buffers_size, sizeof(void *));
373         if (size < data_offsets_size || size < extra_buffers_size) {
374                 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
375                                 "%d: got transaction with invalid extra_buffers_size %zd\n",
376                                 alloc->pid, extra_buffers_size);
377                 return ERR_PTR(-EINVAL);
378         }
379         if (is_async &&
380             alloc->free_async_space < size + sizeof(struct binder_buffer)) {
381                 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
382                              "%d: binder_alloc_buf size %zd failed, no async space left\n",
383                               alloc->pid, size);
384                 return ERR_PTR(-ENOSPC);
385         }
386
387         /* Pad 0-size buffers so they get assigned unique addresses */
388         size = max(size, sizeof(void *));
389
390         while (n) {
391                 buffer = rb_entry(n, struct binder_buffer, rb_node);
392                 BUG_ON(!buffer->free);
393                 buffer_size = binder_alloc_buffer_size(alloc, buffer);
394
395                 if (size < buffer_size) {
396                         best_fit = n;
397                         n = n->rb_left;
398                 } else if (size > buffer_size)
399                         n = n->rb_right;
400                 else {
401                         best_fit = n;
402                         break;
403                 }
404         }
405         if (best_fit == NULL) {
406                 size_t allocated_buffers = 0;
407                 size_t largest_alloc_size = 0;
408                 size_t total_alloc_size = 0;
409                 size_t free_buffers = 0;
410                 size_t largest_free_size = 0;
411                 size_t total_free_size = 0;
412
413                 for (n = rb_first(&alloc->allocated_buffers); n != NULL;
414                      n = rb_next(n)) {
415                         buffer = rb_entry(n, struct binder_buffer, rb_node);
416                         buffer_size = binder_alloc_buffer_size(alloc, buffer);
417                         allocated_buffers++;
418                         total_alloc_size += buffer_size;
419                         if (buffer_size > largest_alloc_size)
420                                 largest_alloc_size = buffer_size;
421                 }
422                 for (n = rb_first(&alloc->free_buffers); n != NULL;
423                      n = rb_next(n)) {
424                         buffer = rb_entry(n, struct binder_buffer, rb_node);
425                         buffer_size = binder_alloc_buffer_size(alloc, buffer);
426                         free_buffers++;
427                         total_free_size += buffer_size;
428                         if (buffer_size > largest_free_size)
429                                 largest_free_size = buffer_size;
430                 }
431                 binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
432                                    "%d: binder_alloc_buf size %zd failed, no address space\n",
433                                    alloc->pid, size);
434                 binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
435                                    "allocated: %zd (num: %zd largest: %zd), free: %zd (num: %zd largest: %zd)\n",
436                                    total_alloc_size, allocated_buffers,
437                                    largest_alloc_size, total_free_size,
438                                    free_buffers, largest_free_size);
439                 return ERR_PTR(-ENOSPC);
440         }
441         if (n == NULL) {
442                 buffer = rb_entry(best_fit, struct binder_buffer, rb_node);
443                 buffer_size = binder_alloc_buffer_size(alloc, buffer);
444         }
445
446         binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
447                      "%d: binder_alloc_buf size %zd got buffer %pK size %zd\n",
448                       alloc->pid, size, buffer, buffer_size);
449
450         has_page_addr = (void __user *)
451                 (((uintptr_t)buffer->user_data + buffer_size) & PAGE_MASK);
452         WARN_ON(n && buffer_size != size);
453         end_page_addr =
454                 (void __user *)PAGE_ALIGN((uintptr_t)buffer->user_data + size);
455         if (end_page_addr > has_page_addr)
456                 end_page_addr = has_page_addr;
457         ret = binder_update_page_range(alloc, 1, (void __user *)
458                 PAGE_ALIGN((uintptr_t)buffer->user_data), end_page_addr);
459         if (ret)
460                 return ERR_PTR(ret);
461
462         if (buffer_size != size) {
463                 struct binder_buffer *new_buffer;
464
465                 new_buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
466                 if (!new_buffer) {
467                         pr_err("%s: %d failed to alloc new buffer struct\n",
468                                __func__, alloc->pid);
469                         goto err_alloc_buf_struct_failed;
470                 }
471                 new_buffer->user_data = (u8 __user *)buffer->user_data + size;
472                 list_add(&new_buffer->entry, &buffer->entry);
473                 new_buffer->free = 1;
474                 binder_insert_free_buffer(alloc, new_buffer);
475         }
476
477         rb_erase(best_fit, &alloc->free_buffers);
478         buffer->free = 0;
479         buffer->allow_user_free = 0;
480         binder_insert_allocated_buffer_locked(alloc, buffer);
481         binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
482                      "%d: binder_alloc_buf size %zd got %pK\n",
483                       alloc->pid, size, buffer);
484         buffer->data_size = data_size;
485         buffer->offsets_size = offsets_size;
486         buffer->async_transaction = is_async;
487         buffer->extra_buffers_size = extra_buffers_size;
488         if (is_async) {
489                 alloc->free_async_space -= size + sizeof(struct binder_buffer);
490                 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
491                              "%d: binder_alloc_buf size %zd async free %zd\n",
492                               alloc->pid, size, alloc->free_async_space);
493         }
494         return buffer;
495
496 err_alloc_buf_struct_failed:
497         binder_update_page_range(alloc, 0, (void __user *)
498                                  PAGE_ALIGN((uintptr_t)buffer->user_data),
499                                  end_page_addr);
500         return ERR_PTR(-ENOMEM);
501 }
502
503 /**
504  * binder_alloc_new_buf() - Allocate a new binder buffer
505  * @alloc:              binder_alloc for this proc
506  * @data_size:          size of user data buffer
507  * @offsets_size:       user specified buffer offset
508  * @extra_buffers_size: size of extra space for meta-data (eg, security context)
509  * @is_async:           buffer for async transaction
510  *
511  * Allocate a new buffer given the requested sizes. Returns
512  * the kernel version of the buffer pointer. The size allocated
513  * is the sum of the three given sizes (each rounded up to
514  * pointer-sized boundary)
515  *
516  * Return:      The allocated buffer or %NULL if error
517  */
518 struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
519                                            size_t data_size,
520                                            size_t offsets_size,
521                                            size_t extra_buffers_size,
522                                            int is_async)
523 {
524         struct binder_buffer *buffer;
525
526         mutex_lock(&alloc->mutex);
527         buffer = binder_alloc_new_buf_locked(alloc, data_size, offsets_size,
528                                              extra_buffers_size, is_async);
529         mutex_unlock(&alloc->mutex);
530         return buffer;
531 }
532
533 static void __user *buffer_start_page(struct binder_buffer *buffer)
534 {
535         return (void __user *)((uintptr_t)buffer->user_data & PAGE_MASK);
536 }
537
538 static void __user *prev_buffer_end_page(struct binder_buffer *buffer)
539 {
540         return (void __user *)
541                 (((uintptr_t)(buffer->user_data) - 1) & PAGE_MASK);
542 }
543
544 static void binder_delete_free_buffer(struct binder_alloc *alloc,
545                                       struct binder_buffer *buffer)
546 {
547         struct binder_buffer *prev, *next = NULL;
548         bool to_free = true;
549         BUG_ON(alloc->buffers.next == &buffer->entry);
550         prev = binder_buffer_prev(buffer);
551         BUG_ON(!prev->free);
552         if (prev_buffer_end_page(prev) == buffer_start_page(buffer)) {
553                 to_free = false;
554                 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
555                                    "%d: merge free, buffer %pK share page with %pK\n",
556                                    alloc->pid, buffer->user_data,
557                                    prev->user_data);
558         }
559
560         if (!list_is_last(&buffer->entry, &alloc->buffers)) {
561                 next = binder_buffer_next(buffer);
562                 if (buffer_start_page(next) == buffer_start_page(buffer)) {
563                         to_free = false;
564                         binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
565                                            "%d: merge free, buffer %pK share page with %pK\n",
566                                            alloc->pid,
567                                            buffer->user_data,
568                                            next->user_data);
569                 }
570         }
571
572         if (PAGE_ALIGNED(buffer->user_data)) {
573                 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
574                                    "%d: merge free, buffer start %pK is page aligned\n",
575                                    alloc->pid, buffer->user_data);
576                 to_free = false;
577         }
578
579         if (to_free) {
580                 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
581                                    "%d: merge free, buffer %pK do not share page with %pK or %pK\n",
582                                    alloc->pid, buffer->user_data,
583                                    prev->user_data,
584                                    next ? next->user_data : NULL);
585                 binder_update_page_range(alloc, 0, buffer_start_page(buffer),
586                                          buffer_start_page(buffer) + PAGE_SIZE);
587         }
588         list_del(&buffer->entry);
589         kfree(buffer);
590 }
591
592 static void binder_free_buf_locked(struct binder_alloc *alloc,
593                                    struct binder_buffer *buffer)
594 {
595         size_t size, buffer_size;
596
597         buffer_size = binder_alloc_buffer_size(alloc, buffer);
598
599         size = ALIGN(buffer->data_size, sizeof(void *)) +
600                 ALIGN(buffer->offsets_size, sizeof(void *)) +
601                 ALIGN(buffer->extra_buffers_size, sizeof(void *));
602
603         binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
604                      "%d: binder_free_buf %pK size %zd buffer_size %zd\n",
605                       alloc->pid, buffer, size, buffer_size);
606
607         BUG_ON(buffer->free);
608         BUG_ON(size > buffer_size);
609         BUG_ON(buffer->transaction != NULL);
610         BUG_ON(buffer->user_data < alloc->buffer);
611         BUG_ON(buffer->user_data > alloc->buffer + alloc->buffer_size);
612
613         if (buffer->async_transaction) {
614                 alloc->free_async_space += size + sizeof(struct binder_buffer);
615
616                 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
617                              "%d: binder_free_buf size %zd async free %zd\n",
618                               alloc->pid, size, alloc->free_async_space);
619         }
620
621         binder_update_page_range(alloc, 0,
622                 (void __user *)PAGE_ALIGN((uintptr_t)buffer->user_data),
623                 (void __user *)(((uintptr_t)
624                           buffer->user_data + buffer_size) & PAGE_MASK));
625
626         rb_erase(&buffer->rb_node, &alloc->allocated_buffers);
627         buffer->free = 1;
628         if (!list_is_last(&buffer->entry, &alloc->buffers)) {
629                 struct binder_buffer *next = binder_buffer_next(buffer);
630
631                 if (next->free) {
632                         rb_erase(&next->rb_node, &alloc->free_buffers);
633                         binder_delete_free_buffer(alloc, next);
634                 }
635         }
636         if (alloc->buffers.next != &buffer->entry) {
637                 struct binder_buffer *prev = binder_buffer_prev(buffer);
638
639                 if (prev->free) {
640                         binder_delete_free_buffer(alloc, buffer);
641                         rb_erase(&prev->rb_node, &alloc->free_buffers);
642                         buffer = prev;
643                 }
644         }
645         binder_insert_free_buffer(alloc, buffer);
646 }
647
648 /**
649  * binder_alloc_free_buf() - free a binder buffer
650  * @alloc:      binder_alloc for this proc
651  * @buffer:     kernel pointer to buffer
652  *
653  * Free the buffer allocated via binder_alloc_new_buffer()
654  */
655 void binder_alloc_free_buf(struct binder_alloc *alloc,
656                             struct binder_buffer *buffer)
657 {
658         mutex_lock(&alloc->mutex);
659         binder_free_buf_locked(alloc, buffer);
660         mutex_unlock(&alloc->mutex);
661 }
662
663 /**
664  * binder_alloc_mmap_handler() - map virtual address space for proc
665  * @alloc:      alloc structure for this proc
666  * @vma:        vma passed to mmap()
667  *
668  * Called by binder_mmap() to initialize the space specified in
669  * vma for allocating binder buffers
670  *
671  * Return:
672  *      0 = success
673  *      -EBUSY = address space already mapped
674  *      -ENOMEM = failed to map memory to given address space
675  */
676 int binder_alloc_mmap_handler(struct binder_alloc *alloc,
677                               struct vm_area_struct *vma)
678 {
679         int ret;
680         const char *failure_string;
681         struct binder_buffer *buffer;
682
683         mutex_lock(&binder_alloc_mmap_lock);
684         if (alloc->buffer) {
685                 ret = -EBUSY;
686                 failure_string = "already mapped";
687                 goto err_already_mapped;
688         }
689
690         alloc->buffer = (void __user *)vma->vm_start;
691         mutex_unlock(&binder_alloc_mmap_lock);
692
693         alloc->buffer_size = min_t(unsigned long, vma->vm_end - vma->vm_start,
694                                    SZ_4M);
695         alloc->pages = kcalloc(alloc->buffer_size / PAGE_SIZE,
696                                sizeof(alloc->pages[0]),
697                                GFP_KERNEL);
698         if (alloc->pages == NULL) {
699                 ret = -ENOMEM;
700                 failure_string = "alloc page array";
701                 goto err_alloc_pages_failed;
702         }
703
704         buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
705         if (!buffer) {
706                 ret = -ENOMEM;
707                 failure_string = "alloc buffer struct";
708                 goto err_alloc_buf_struct_failed;
709         }
710
711         buffer->user_data = alloc->buffer;
712         list_add(&buffer->entry, &alloc->buffers);
713         buffer->free = 1;
714         binder_insert_free_buffer(alloc, buffer);
715         alloc->free_async_space = alloc->buffer_size / 2;
716         binder_alloc_set_vma(alloc, vma);
717         mmgrab(alloc->vma_vm_mm);
718
719         return 0;
720
721 err_alloc_buf_struct_failed:
722         kfree(alloc->pages);
723         alloc->pages = NULL;
724 err_alloc_pages_failed:
725         mutex_lock(&binder_alloc_mmap_lock);
726         alloc->buffer = NULL;
727 err_already_mapped:
728         mutex_unlock(&binder_alloc_mmap_lock);
729         binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
730                            "%s: %d %lx-%lx %s failed %d\n", __func__,
731                            alloc->pid, vma->vm_start, vma->vm_end,
732                            failure_string, ret);
733         return ret;
734 }
735
736
737 void binder_alloc_deferred_release(struct binder_alloc *alloc)
738 {
739         struct rb_node *n;
740         int buffers, page_count;
741         struct binder_buffer *buffer;
742
743         buffers = 0;
744         mutex_lock(&alloc->mutex);
745         BUG_ON(alloc->vma);
746
747         while ((n = rb_first(&alloc->allocated_buffers))) {
748                 buffer = rb_entry(n, struct binder_buffer, rb_node);
749
750                 /* Transaction should already have been freed */
751                 BUG_ON(buffer->transaction);
752
753                 binder_free_buf_locked(alloc, buffer);
754                 buffers++;
755         }
756
757         while (!list_empty(&alloc->buffers)) {
758                 buffer = list_first_entry(&alloc->buffers,
759                                           struct binder_buffer, entry);
760                 WARN_ON(!buffer->free);
761
762                 list_del(&buffer->entry);
763                 WARN_ON_ONCE(!list_empty(&alloc->buffers));
764                 kfree(buffer);
765         }
766
767         page_count = 0;
768         if (alloc->pages) {
769                 int i;
770
771                 for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
772                         void __user *page_addr;
773                         bool on_lru;
774
775                         if (!alloc->pages[i].page_ptr)
776                                 continue;
777
778                         on_lru = list_lru_del(&binder_alloc_lru,
779                                               &alloc->pages[i].lru);
780                         page_addr = alloc->buffer + i * PAGE_SIZE;
781                         binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
782                                      "%s: %d: page %d at %pK %s\n",
783                                      __func__, alloc->pid, i, page_addr,
784                                      on_lru ? "on lru" : "active");
785                         __free_page(alloc->pages[i].page_ptr);
786                         page_count++;
787                 }
788                 kfree(alloc->pages);
789         }
790         mutex_unlock(&alloc->mutex);
791         if (alloc->vma_vm_mm)
792                 mmdrop(alloc->vma_vm_mm);
793
794         binder_alloc_debug(BINDER_DEBUG_OPEN_CLOSE,
795                      "%s: %d buffers %d, pages %d\n",
796                      __func__, alloc->pid, buffers, page_count);
797 }
798
799 static void print_binder_buffer(struct seq_file *m, const char *prefix,
800                                 struct binder_buffer *buffer)
801 {
802         seq_printf(m, "%s %d: %pK size %zd:%zd:%zd %s\n",
803                    prefix, buffer->debug_id, buffer->user_data,
804                    buffer->data_size, buffer->offsets_size,
805                    buffer->extra_buffers_size,
806                    buffer->transaction ? "active" : "delivered");
807 }
808
809 /**
810  * binder_alloc_print_allocated() - print buffer info
811  * @m:     seq_file for output via seq_printf()
812  * @alloc: binder_alloc for this proc
813  *
814  * Prints information about every buffer associated with
815  * the binder_alloc state to the given seq_file
816  */
817 void binder_alloc_print_allocated(struct seq_file *m,
818                                   struct binder_alloc *alloc)
819 {
820         struct rb_node *n;
821
822         mutex_lock(&alloc->mutex);
823         for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n))
824                 print_binder_buffer(m, "  buffer",
825                                     rb_entry(n, struct binder_buffer, rb_node));
826         mutex_unlock(&alloc->mutex);
827 }
828
829 /**
830  * binder_alloc_print_pages() - print page usage
831  * @m:     seq_file for output via seq_printf()
832  * @alloc: binder_alloc for this proc
833  */
834 void binder_alloc_print_pages(struct seq_file *m,
835                               struct binder_alloc *alloc)
836 {
837         struct binder_lru_page *page;
838         int i;
839         int active = 0;
840         int lru = 0;
841         int free = 0;
842
843         mutex_lock(&alloc->mutex);
844         for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
845                 page = &alloc->pages[i];
846                 if (!page->page_ptr)
847                         free++;
848                 else if (list_empty(&page->lru))
849                         active++;
850                 else
851                         lru++;
852         }
853         mutex_unlock(&alloc->mutex);
854         seq_printf(m, "  pages: %d:%d:%d\n", active, lru, free);
855         seq_printf(m, "  pages high watermark: %zu\n", alloc->pages_high);
856 }
857
858 /**
859  * binder_alloc_get_allocated_count() - return count of buffers
860  * @alloc: binder_alloc for this proc
861  *
862  * Return: count of allocated buffers
863  */
864 int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
865 {
866         struct rb_node *n;
867         int count = 0;
868
869         mutex_lock(&alloc->mutex);
870         for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n))
871                 count++;
872         mutex_unlock(&alloc->mutex);
873         return count;
874 }
875
876
877 /**
878  * binder_alloc_vma_close() - invalidate address space
879  * @alloc: binder_alloc for this proc
880  *
881  * Called from binder_vma_close() when releasing address space.
882  * Clears alloc->vma to prevent new incoming transactions from
883  * allocating more buffers.
884  */
885 void binder_alloc_vma_close(struct binder_alloc *alloc)
886 {
887         binder_alloc_set_vma(alloc, NULL);
888 }
889
890 /**
891  * binder_alloc_free_page() - shrinker callback to free pages
892  * @item:   item to free
893  * @lock:   lock protecting the item
894  * @cb_arg: callback argument
895  *
896  * Called from list_lru_walk() in binder_shrink_scan() to free
897  * up pages when the system is under memory pressure.
898  */
899 enum lru_status binder_alloc_free_page(struct list_head *item,
900                                        struct list_lru_one *lru,
901                                        spinlock_t *lock,
902                                        void *cb_arg)
903         __must_hold(lock)
904 {
905         struct mm_struct *mm = NULL;
906         struct binder_lru_page *page = container_of(item,
907                                                     struct binder_lru_page,
908                                                     lru);
909         struct binder_alloc *alloc;
910         uintptr_t page_addr;
911         size_t index;
912         struct vm_area_struct *vma;
913
914         alloc = page->alloc;
915         if (!mutex_trylock(&alloc->mutex))
916                 goto err_get_alloc_mutex_failed;
917
918         if (!page->page_ptr)
919                 goto err_page_already_freed;
920
921         index = page - alloc->pages;
922         page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE;
923
924         mm = alloc->vma_vm_mm;
925         if (!mmget_not_zero(mm))
926                 goto err_mmget;
927         if (!down_read_trylock(&mm->mmap_sem))
928                 goto err_down_read_mmap_sem_failed;
929         vma = binder_alloc_get_vma(alloc);
930
931         list_lru_isolate(lru, item);
932         spin_unlock(lock);
933
934         if (vma) {
935                 trace_binder_unmap_user_start(alloc, index);
936
937                 zap_page_range(vma, page_addr, PAGE_SIZE);
938
939                 trace_binder_unmap_user_end(alloc, index);
940         }
941         up_read(&mm->mmap_sem);
942         mmput(mm);
943
944         trace_binder_unmap_kernel_start(alloc, index);
945
946         __free_page(page->page_ptr);
947         page->page_ptr = NULL;
948
949         trace_binder_unmap_kernel_end(alloc, index);
950
951         spin_lock(lock);
952         mutex_unlock(&alloc->mutex);
953         return LRU_REMOVED_RETRY;
954
955 err_down_read_mmap_sem_failed:
956         mmput_async(mm);
957 err_mmget:
958 err_page_already_freed:
959         mutex_unlock(&alloc->mutex);
960 err_get_alloc_mutex_failed:
961         return LRU_SKIP;
962 }
963
964 static unsigned long
965 binder_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
966 {
967         unsigned long ret = list_lru_count(&binder_alloc_lru);
968         return ret;
969 }
970
971 static unsigned long
972 binder_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
973 {
974         unsigned long ret;
975
976         ret = list_lru_walk(&binder_alloc_lru, binder_alloc_free_page,
977                             NULL, sc->nr_to_scan);
978         return ret;
979 }
980
981 static struct shrinker binder_shrinker = {
982         .count_objects = binder_shrink_count,
983         .scan_objects = binder_shrink_scan,
984         .seeks = DEFAULT_SEEKS,
985 };
986
987 /**
988  * binder_alloc_init() - called by binder_open() for per-proc initialization
989  * @alloc: binder_alloc for this proc
990  *
991  * Called from binder_open() to initialize binder_alloc fields for
992  * new binder proc
993  */
994 void binder_alloc_init(struct binder_alloc *alloc)
995 {
996         alloc->pid = current->group_leader->pid;
997         mutex_init(&alloc->mutex);
998         INIT_LIST_HEAD(&alloc->buffers);
999 }
1000
1001 int binder_alloc_shrinker_init(void)
1002 {
1003         int ret = list_lru_init(&binder_alloc_lru);
1004
1005         if (ret == 0) {
1006                 ret = register_shrinker(&binder_shrinker);
1007                 if (ret)
1008                         list_lru_destroy(&binder_alloc_lru);
1009         }
1010         return ret;
1011 }
1012
1013 /**
1014  * check_buffer() - verify that buffer/offset is safe to access
1015  * @alloc: binder_alloc for this proc
1016  * @buffer: binder buffer to be accessed
1017  * @offset: offset into @buffer data
1018  * @bytes: bytes to access from offset
1019  *
1020  * Check that the @offset/@bytes are within the size of the given
1021  * @buffer and that the buffer is currently active and not freeable.
1022  * Offsets must also be multiples of sizeof(u32). The kernel is
1023  * allowed to touch the buffer in two cases:
1024  *
1025  * 1) when the buffer is being created:
1026  *     (buffer->free == 0 && buffer->allow_user_free == 0)
1027  * 2) when the buffer is being torn down:
1028  *     (buffer->free == 0 && buffer->transaction == NULL).
1029  *
1030  * Return: true if the buffer is safe to access
1031  */
1032 static inline bool check_buffer(struct binder_alloc *alloc,
1033                                 struct binder_buffer *buffer,
1034                                 binder_size_t offset, size_t bytes)
1035 {
1036         size_t buffer_size = binder_alloc_buffer_size(alloc, buffer);
1037
1038         return buffer_size >= bytes &&
1039                 offset <= buffer_size - bytes &&
1040                 IS_ALIGNED(offset, sizeof(u32)) &&
1041                 !buffer->free &&
1042                 (!buffer->allow_user_free || !buffer->transaction);
1043 }
1044
1045 /**
1046  * binder_alloc_get_page() - get kernel pointer for given buffer offset
1047  * @alloc: binder_alloc for this proc
1048  * @buffer: binder buffer to be accessed
1049  * @buffer_offset: offset into @buffer data
1050  * @pgoffp: address to copy final page offset to
1051  *
1052  * Lookup the struct page corresponding to the address
1053  * at @buffer_offset into @buffer->user_data. If @pgoffp is not
1054  * NULL, the byte-offset into the page is written there.
1055  *
1056  * The caller is responsible to ensure that the offset points
1057  * to a valid address within the @buffer and that @buffer is
1058  * not freeable by the user. Since it can't be freed, we are
1059  * guaranteed that the corresponding elements of @alloc->pages[]
1060  * cannot change.
1061  *
1062  * Return: struct page
1063  */
1064 static struct page *binder_alloc_get_page(struct binder_alloc *alloc,
1065                                           struct binder_buffer *buffer,
1066                                           binder_size_t buffer_offset,
1067                                           pgoff_t *pgoffp)
1068 {
1069         binder_size_t buffer_space_offset = buffer_offset +
1070                 (buffer->user_data - alloc->buffer);
1071         pgoff_t pgoff = buffer_space_offset & ~PAGE_MASK;
1072         size_t index = buffer_space_offset >> PAGE_SHIFT;
1073         struct binder_lru_page *lru_page;
1074
1075         lru_page = &alloc->pages[index];
1076         *pgoffp = pgoff;
1077         return lru_page->page_ptr;
1078 }
1079
1080 /**
1081  * binder_alloc_copy_user_to_buffer() - copy src user to tgt user
1082  * @alloc: binder_alloc for this proc
1083  * @buffer: binder buffer to be accessed
1084  * @buffer_offset: offset into @buffer data
1085  * @from: userspace pointer to source buffer
1086  * @bytes: bytes to copy
1087  *
1088  * Copy bytes from source userspace to target buffer.
1089  *
1090  * Return: bytes remaining to be copied
1091  */
1092 unsigned long
1093 binder_alloc_copy_user_to_buffer(struct binder_alloc *alloc,
1094                                  struct binder_buffer *buffer,
1095                                  binder_size_t buffer_offset,
1096                                  const void __user *from,
1097                                  size_t bytes)
1098 {
1099         if (!check_buffer(alloc, buffer, buffer_offset, bytes))
1100                 return bytes;
1101
1102         while (bytes) {
1103                 unsigned long size;
1104                 unsigned long ret;
1105                 struct page *page;
1106                 pgoff_t pgoff;
1107                 void *kptr;
1108
1109                 page = binder_alloc_get_page(alloc, buffer,
1110                                              buffer_offset, &pgoff);
1111                 size = min_t(size_t, bytes, PAGE_SIZE - pgoff);
1112                 kptr = kmap(page) + pgoff;
1113                 ret = copy_from_user(kptr, from, size);
1114                 kunmap(page);
1115                 if (ret)
1116                         return bytes - size + ret;
1117                 bytes -= size;
1118                 from += size;
1119                 buffer_offset += size;
1120         }
1121         return 0;
1122 }
1123
1124 static int binder_alloc_do_buffer_copy(struct binder_alloc *alloc,
1125                                        bool to_buffer,
1126                                        struct binder_buffer *buffer,
1127                                        binder_size_t buffer_offset,
1128                                        void *ptr,
1129                                        size_t bytes)
1130 {
1131         /* All copies must be 32-bit aligned and 32-bit size */
1132         if (!check_buffer(alloc, buffer, buffer_offset, bytes))
1133                 return -EINVAL;
1134
1135         while (bytes) {
1136                 unsigned long size;
1137                 struct page *page;
1138                 pgoff_t pgoff;
1139                 void *tmpptr;
1140                 void *base_ptr;
1141
1142                 page = binder_alloc_get_page(alloc, buffer,
1143                                              buffer_offset, &pgoff);
1144                 size = min_t(size_t, bytes, PAGE_SIZE - pgoff);
1145                 base_ptr = kmap_atomic(page);
1146                 tmpptr = base_ptr + pgoff;
1147                 if (to_buffer)
1148                         memcpy(tmpptr, ptr, size);
1149                 else
1150                         memcpy(ptr, tmpptr, size);
1151                 /*
1152                  * kunmap_atomic() takes care of flushing the cache
1153                  * if this device has VIVT cache arch
1154                  */
1155                 kunmap_atomic(base_ptr);
1156                 bytes -= size;
1157                 pgoff = 0;
1158                 ptr = ptr + size;
1159                 buffer_offset += size;
1160         }
1161         return 0;
1162 }
1163
1164 int binder_alloc_copy_to_buffer(struct binder_alloc *alloc,
1165                                 struct binder_buffer *buffer,
1166                                 binder_size_t buffer_offset,
1167                                 void *src,
1168                                 size_t bytes)
1169 {
1170         return binder_alloc_do_buffer_copy(alloc, true, buffer, buffer_offset,
1171                                            src, bytes);
1172 }
1173
1174 int binder_alloc_copy_from_buffer(struct binder_alloc *alloc,
1175                                   void *dest,
1176                                   struct binder_buffer *buffer,
1177                                   binder_size_t buffer_offset,
1178                                   size_t bytes)
1179 {
1180         return binder_alloc_do_buffer_copy(alloc, false, buffer, buffer_offset,
1181                                            dest, bytes);
1182 }
1183