Merge tag 'mtd/for-5.11' of git://git.kernel.org/pub/scm/linux/kernel/git/mtd/linux
[linux-2.6-microblaze.git] / drivers / android / binder_alloc.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* binder_alloc.c
3  *
4  * Android IPC Subsystem
5  *
6  * Copyright (C) 2007-2017 Google, Inc.
7  */
8
9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10
11 #include <linux/list.h>
12 #include <linux/sched/mm.h>
13 #include <linux/module.h>
14 #include <linux/rtmutex.h>
15 #include <linux/rbtree.h>
16 #include <linux/seq_file.h>
17 #include <linux/vmalloc.h>
18 #include <linux/slab.h>
19 #include <linux/sched.h>
20 #include <linux/list_lru.h>
21 #include <linux/ratelimit.h>
22 #include <asm/cacheflush.h>
23 #include <linux/uaccess.h>
24 #include <linux/highmem.h>
25 #include <linux/sizes.h>
26 #include "binder_alloc.h"
27 #include "binder_trace.h"
28
29 struct list_lru binder_alloc_lru;
30
31 static DEFINE_MUTEX(binder_alloc_mmap_lock);
32
33 enum {
34         BINDER_DEBUG_USER_ERROR             = 1U << 0,
35         BINDER_DEBUG_OPEN_CLOSE             = 1U << 1,
36         BINDER_DEBUG_BUFFER_ALLOC           = 1U << 2,
37         BINDER_DEBUG_BUFFER_ALLOC_ASYNC     = 1U << 3,
38 };
39 static uint32_t binder_alloc_debug_mask = BINDER_DEBUG_USER_ERROR;
40
41 module_param_named(debug_mask, binder_alloc_debug_mask,
42                    uint, 0644);
43
44 #define binder_alloc_debug(mask, x...) \
45         do { \
46                 if (binder_alloc_debug_mask & mask) \
47                         pr_info_ratelimited(x); \
48         } while (0)
49
50 static struct binder_buffer *binder_buffer_next(struct binder_buffer *buffer)
51 {
52         return list_entry(buffer->entry.next, struct binder_buffer, entry);
53 }
54
55 static struct binder_buffer *binder_buffer_prev(struct binder_buffer *buffer)
56 {
57         return list_entry(buffer->entry.prev, struct binder_buffer, entry);
58 }
59
60 static size_t binder_alloc_buffer_size(struct binder_alloc *alloc,
61                                        struct binder_buffer *buffer)
62 {
63         if (list_is_last(&buffer->entry, &alloc->buffers))
64                 return alloc->buffer + alloc->buffer_size - buffer->user_data;
65         return binder_buffer_next(buffer)->user_data - buffer->user_data;
66 }
67
68 static void binder_insert_free_buffer(struct binder_alloc *alloc,
69                                       struct binder_buffer *new_buffer)
70 {
71         struct rb_node **p = &alloc->free_buffers.rb_node;
72         struct rb_node *parent = NULL;
73         struct binder_buffer *buffer;
74         size_t buffer_size;
75         size_t new_buffer_size;
76
77         BUG_ON(!new_buffer->free);
78
79         new_buffer_size = binder_alloc_buffer_size(alloc, new_buffer);
80
81         binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
82                      "%d: add free buffer, size %zd, at %pK\n",
83                       alloc->pid, new_buffer_size, new_buffer);
84
85         while (*p) {
86                 parent = *p;
87                 buffer = rb_entry(parent, struct binder_buffer, rb_node);
88                 BUG_ON(!buffer->free);
89
90                 buffer_size = binder_alloc_buffer_size(alloc, buffer);
91
92                 if (new_buffer_size < buffer_size)
93                         p = &parent->rb_left;
94                 else
95                         p = &parent->rb_right;
96         }
97         rb_link_node(&new_buffer->rb_node, parent, p);
98         rb_insert_color(&new_buffer->rb_node, &alloc->free_buffers);
99 }
100
101 static void binder_insert_allocated_buffer_locked(
102                 struct binder_alloc *alloc, struct binder_buffer *new_buffer)
103 {
104         struct rb_node **p = &alloc->allocated_buffers.rb_node;
105         struct rb_node *parent = NULL;
106         struct binder_buffer *buffer;
107
108         BUG_ON(new_buffer->free);
109
110         while (*p) {
111                 parent = *p;
112                 buffer = rb_entry(parent, struct binder_buffer, rb_node);
113                 BUG_ON(buffer->free);
114
115                 if (new_buffer->user_data < buffer->user_data)
116                         p = &parent->rb_left;
117                 else if (new_buffer->user_data > buffer->user_data)
118                         p = &parent->rb_right;
119                 else
120                         BUG();
121         }
122         rb_link_node(&new_buffer->rb_node, parent, p);
123         rb_insert_color(&new_buffer->rb_node, &alloc->allocated_buffers);
124 }
125
126 static struct binder_buffer *binder_alloc_prepare_to_free_locked(
127                 struct binder_alloc *alloc,
128                 uintptr_t user_ptr)
129 {
130         struct rb_node *n = alloc->allocated_buffers.rb_node;
131         struct binder_buffer *buffer;
132         void __user *uptr;
133
134         uptr = (void __user *)user_ptr;
135
136         while (n) {
137                 buffer = rb_entry(n, struct binder_buffer, rb_node);
138                 BUG_ON(buffer->free);
139
140                 if (uptr < buffer->user_data)
141                         n = n->rb_left;
142                 else if (uptr > buffer->user_data)
143                         n = n->rb_right;
144                 else {
145                         /*
146                          * Guard against user threads attempting to
147                          * free the buffer when in use by kernel or
148                          * after it's already been freed.
149                          */
150                         if (!buffer->allow_user_free)
151                                 return ERR_PTR(-EPERM);
152                         buffer->allow_user_free = 0;
153                         return buffer;
154                 }
155         }
156         return NULL;
157 }
158
159 /**
160  * binder_alloc_prepare_to_free() - get buffer given user ptr
161  * @alloc:      binder_alloc for this proc
162  * @user_ptr:   User pointer to buffer data
163  *
164  * Validate userspace pointer to buffer data and return buffer corresponding to
165  * that user pointer. Search the rb tree for buffer that matches user data
166  * pointer.
167  *
168  * Return:      Pointer to buffer or NULL
169  */
170 struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc,
171                                                    uintptr_t user_ptr)
172 {
173         struct binder_buffer *buffer;
174
175         mutex_lock(&alloc->mutex);
176         buffer = binder_alloc_prepare_to_free_locked(alloc, user_ptr);
177         mutex_unlock(&alloc->mutex);
178         return buffer;
179 }
180
181 static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
182                                     void __user *start, void __user *end)
183 {
184         void __user *page_addr;
185         unsigned long user_page_addr;
186         struct binder_lru_page *page;
187         struct vm_area_struct *vma = NULL;
188         struct mm_struct *mm = NULL;
189         bool need_mm = false;
190
191         binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
192                      "%d: %s pages %pK-%pK\n", alloc->pid,
193                      allocate ? "allocate" : "free", start, end);
194
195         if (end <= start)
196                 return 0;
197
198         trace_binder_update_page_range(alloc, allocate, start, end);
199
200         if (allocate == 0)
201                 goto free_range;
202
203         for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
204                 page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE];
205                 if (!page->page_ptr) {
206                         need_mm = true;
207                         break;
208                 }
209         }
210
211         if (need_mm && mmget_not_zero(alloc->vma_vm_mm))
212                 mm = alloc->vma_vm_mm;
213
214         if (mm) {
215                 mmap_read_lock(mm);
216                 vma = alloc->vma;
217         }
218
219         if (!vma && need_mm) {
220                 binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
221                                    "%d: binder_alloc_buf failed to map pages in userspace, no vma\n",
222                                    alloc->pid);
223                 goto err_no_vma;
224         }
225
226         for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
227                 int ret;
228                 bool on_lru;
229                 size_t index;
230
231                 index = (page_addr - alloc->buffer) / PAGE_SIZE;
232                 page = &alloc->pages[index];
233
234                 if (page->page_ptr) {
235                         trace_binder_alloc_lru_start(alloc, index);
236
237                         on_lru = list_lru_del(&binder_alloc_lru, &page->lru);
238                         WARN_ON(!on_lru);
239
240                         trace_binder_alloc_lru_end(alloc, index);
241                         continue;
242                 }
243
244                 if (WARN_ON(!vma))
245                         goto err_page_ptr_cleared;
246
247                 trace_binder_alloc_page_start(alloc, index);
248                 page->page_ptr = alloc_page(GFP_KERNEL |
249                                             __GFP_HIGHMEM |
250                                             __GFP_ZERO);
251                 if (!page->page_ptr) {
252                         pr_err("%d: binder_alloc_buf failed for page at %pK\n",
253                                 alloc->pid, page_addr);
254                         goto err_alloc_page_failed;
255                 }
256                 page->alloc = alloc;
257                 INIT_LIST_HEAD(&page->lru);
258
259                 user_page_addr = (uintptr_t)page_addr;
260                 ret = vm_insert_page(vma, user_page_addr, page[0].page_ptr);
261                 if (ret) {
262                         pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n",
263                                alloc->pid, user_page_addr);
264                         goto err_vm_insert_page_failed;
265                 }
266
267                 if (index + 1 > alloc->pages_high)
268                         alloc->pages_high = index + 1;
269
270                 trace_binder_alloc_page_end(alloc, index);
271         }
272         if (mm) {
273                 mmap_read_unlock(mm);
274                 mmput(mm);
275         }
276         return 0;
277
278 free_range:
279         for (page_addr = end - PAGE_SIZE; 1; page_addr -= PAGE_SIZE) {
280                 bool ret;
281                 size_t index;
282
283                 index = (page_addr - alloc->buffer) / PAGE_SIZE;
284                 page = &alloc->pages[index];
285
286                 trace_binder_free_lru_start(alloc, index);
287
288                 ret = list_lru_add(&binder_alloc_lru, &page->lru);
289                 WARN_ON(!ret);
290
291                 trace_binder_free_lru_end(alloc, index);
292                 if (page_addr == start)
293                         break;
294                 continue;
295
296 err_vm_insert_page_failed:
297                 __free_page(page->page_ptr);
298                 page->page_ptr = NULL;
299 err_alloc_page_failed:
300 err_page_ptr_cleared:
301                 if (page_addr == start)
302                         break;
303         }
304 err_no_vma:
305         if (mm) {
306                 mmap_read_unlock(mm);
307                 mmput(mm);
308         }
309         return vma ? -ENOMEM : -ESRCH;
310 }
311
312
313 static inline void binder_alloc_set_vma(struct binder_alloc *alloc,
314                 struct vm_area_struct *vma)
315 {
316         if (vma)
317                 alloc->vma_vm_mm = vma->vm_mm;
318         /*
319          * If we see alloc->vma is not NULL, buffer data structures set up
320          * completely. Look at smp_rmb side binder_alloc_get_vma.
321          * We also want to guarantee new alloc->vma_vm_mm is always visible
322          * if alloc->vma is set.
323          */
324         smp_wmb();
325         alloc->vma = vma;
326 }
327
328 static inline struct vm_area_struct *binder_alloc_get_vma(
329                 struct binder_alloc *alloc)
330 {
331         struct vm_area_struct *vma = NULL;
332
333         if (alloc->vma) {
334                 /* Look at description in binder_alloc_set_vma */
335                 smp_rmb();
336                 vma = alloc->vma;
337         }
338         return vma;
339 }
340
341 static void debug_low_async_space_locked(struct binder_alloc *alloc, int pid)
342 {
343         /*
344          * Find the amount and size of buffers allocated by the current caller;
345          * The idea is that once we cross the threshold, whoever is responsible
346          * for the low async space is likely to try to send another async txn,
347          * and at some point we'll catch them in the act. This is more efficient
348          * than keeping a map per pid.
349          */
350         struct rb_node *n;
351         struct binder_buffer *buffer;
352         size_t total_alloc_size = 0;
353         size_t num_buffers = 0;
354
355         for (n = rb_first(&alloc->allocated_buffers); n != NULL;
356                  n = rb_next(n)) {
357                 buffer = rb_entry(n, struct binder_buffer, rb_node);
358                 if (buffer->pid != pid)
359                         continue;
360                 if (!buffer->async_transaction)
361                         continue;
362                 total_alloc_size += binder_alloc_buffer_size(alloc, buffer)
363                         + sizeof(struct binder_buffer);
364                 num_buffers++;
365         }
366
367         /*
368          * Warn if this pid has more than 50 transactions, or more than 50% of
369          * async space (which is 25% of total buffer size).
370          */
371         if (num_buffers > 50 || total_alloc_size > alloc->buffer_size / 4) {
372                 binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
373                              "%d: pid %d spamming oneway? %zd buffers allocated for a total size of %zd\n",
374                               alloc->pid, pid, num_buffers, total_alloc_size);
375         }
376 }
377
378 static struct binder_buffer *binder_alloc_new_buf_locked(
379                                 struct binder_alloc *alloc,
380                                 size_t data_size,
381                                 size_t offsets_size,
382                                 size_t extra_buffers_size,
383                                 int is_async,
384                                 int pid)
385 {
386         struct rb_node *n = alloc->free_buffers.rb_node;
387         struct binder_buffer *buffer;
388         size_t buffer_size;
389         struct rb_node *best_fit = NULL;
390         void __user *has_page_addr;
391         void __user *end_page_addr;
392         size_t size, data_offsets_size;
393         int ret;
394
395         if (!binder_alloc_get_vma(alloc)) {
396                 binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
397                                    "%d: binder_alloc_buf, no vma\n",
398                                    alloc->pid);
399                 return ERR_PTR(-ESRCH);
400         }
401
402         data_offsets_size = ALIGN(data_size, sizeof(void *)) +
403                 ALIGN(offsets_size, sizeof(void *));
404
405         if (data_offsets_size < data_size || data_offsets_size < offsets_size) {
406                 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
407                                 "%d: got transaction with invalid size %zd-%zd\n",
408                                 alloc->pid, data_size, offsets_size);
409                 return ERR_PTR(-EINVAL);
410         }
411         size = data_offsets_size + ALIGN(extra_buffers_size, sizeof(void *));
412         if (size < data_offsets_size || size < extra_buffers_size) {
413                 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
414                                 "%d: got transaction with invalid extra_buffers_size %zd\n",
415                                 alloc->pid, extra_buffers_size);
416                 return ERR_PTR(-EINVAL);
417         }
418         if (is_async &&
419             alloc->free_async_space < size + sizeof(struct binder_buffer)) {
420                 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
421                              "%d: binder_alloc_buf size %zd failed, no async space left\n",
422                               alloc->pid, size);
423                 return ERR_PTR(-ENOSPC);
424         }
425
426         /* Pad 0-size buffers so they get assigned unique addresses */
427         size = max(size, sizeof(void *));
428
429         while (n) {
430                 buffer = rb_entry(n, struct binder_buffer, rb_node);
431                 BUG_ON(!buffer->free);
432                 buffer_size = binder_alloc_buffer_size(alloc, buffer);
433
434                 if (size < buffer_size) {
435                         best_fit = n;
436                         n = n->rb_left;
437                 } else if (size > buffer_size)
438                         n = n->rb_right;
439                 else {
440                         best_fit = n;
441                         break;
442                 }
443         }
444         if (best_fit == NULL) {
445                 size_t allocated_buffers = 0;
446                 size_t largest_alloc_size = 0;
447                 size_t total_alloc_size = 0;
448                 size_t free_buffers = 0;
449                 size_t largest_free_size = 0;
450                 size_t total_free_size = 0;
451
452                 for (n = rb_first(&alloc->allocated_buffers); n != NULL;
453                      n = rb_next(n)) {
454                         buffer = rb_entry(n, struct binder_buffer, rb_node);
455                         buffer_size = binder_alloc_buffer_size(alloc, buffer);
456                         allocated_buffers++;
457                         total_alloc_size += buffer_size;
458                         if (buffer_size > largest_alloc_size)
459                                 largest_alloc_size = buffer_size;
460                 }
461                 for (n = rb_first(&alloc->free_buffers); n != NULL;
462                      n = rb_next(n)) {
463                         buffer = rb_entry(n, struct binder_buffer, rb_node);
464                         buffer_size = binder_alloc_buffer_size(alloc, buffer);
465                         free_buffers++;
466                         total_free_size += buffer_size;
467                         if (buffer_size > largest_free_size)
468                                 largest_free_size = buffer_size;
469                 }
470                 binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
471                                    "%d: binder_alloc_buf size %zd failed, no address space\n",
472                                    alloc->pid, size);
473                 binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
474                                    "allocated: %zd (num: %zd largest: %zd), free: %zd (num: %zd largest: %zd)\n",
475                                    total_alloc_size, allocated_buffers,
476                                    largest_alloc_size, total_free_size,
477                                    free_buffers, largest_free_size);
478                 return ERR_PTR(-ENOSPC);
479         }
480         if (n == NULL) {
481                 buffer = rb_entry(best_fit, struct binder_buffer, rb_node);
482                 buffer_size = binder_alloc_buffer_size(alloc, buffer);
483         }
484
485         binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
486                      "%d: binder_alloc_buf size %zd got buffer %pK size %zd\n",
487                       alloc->pid, size, buffer, buffer_size);
488
489         has_page_addr = (void __user *)
490                 (((uintptr_t)buffer->user_data + buffer_size) & PAGE_MASK);
491         WARN_ON(n && buffer_size != size);
492         end_page_addr =
493                 (void __user *)PAGE_ALIGN((uintptr_t)buffer->user_data + size);
494         if (end_page_addr > has_page_addr)
495                 end_page_addr = has_page_addr;
496         ret = binder_update_page_range(alloc, 1, (void __user *)
497                 PAGE_ALIGN((uintptr_t)buffer->user_data), end_page_addr);
498         if (ret)
499                 return ERR_PTR(ret);
500
501         if (buffer_size != size) {
502                 struct binder_buffer *new_buffer;
503
504                 new_buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
505                 if (!new_buffer) {
506                         pr_err("%s: %d failed to alloc new buffer struct\n",
507                                __func__, alloc->pid);
508                         goto err_alloc_buf_struct_failed;
509                 }
510                 new_buffer->user_data = (u8 __user *)buffer->user_data + size;
511                 list_add(&new_buffer->entry, &buffer->entry);
512                 new_buffer->free = 1;
513                 binder_insert_free_buffer(alloc, new_buffer);
514         }
515
516         rb_erase(best_fit, &alloc->free_buffers);
517         buffer->free = 0;
518         buffer->allow_user_free = 0;
519         binder_insert_allocated_buffer_locked(alloc, buffer);
520         binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
521                      "%d: binder_alloc_buf size %zd got %pK\n",
522                       alloc->pid, size, buffer);
523         buffer->data_size = data_size;
524         buffer->offsets_size = offsets_size;
525         buffer->async_transaction = is_async;
526         buffer->extra_buffers_size = extra_buffers_size;
527         buffer->pid = pid;
528         if (is_async) {
529                 alloc->free_async_space -= size + sizeof(struct binder_buffer);
530                 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
531                              "%d: binder_alloc_buf size %zd async free %zd\n",
532                               alloc->pid, size, alloc->free_async_space);
533                 if (alloc->free_async_space < alloc->buffer_size / 10) {
534                         /*
535                          * Start detecting spammers once we have less than 20%
536                          * of async space left (which is less than 10% of total
537                          * buffer size).
538                          */
539                         debug_low_async_space_locked(alloc, pid);
540                 }
541         }
542         return buffer;
543
544 err_alloc_buf_struct_failed:
545         binder_update_page_range(alloc, 0, (void __user *)
546                                  PAGE_ALIGN((uintptr_t)buffer->user_data),
547                                  end_page_addr);
548         return ERR_PTR(-ENOMEM);
549 }
550
551 /**
552  * binder_alloc_new_buf() - Allocate a new binder buffer
553  * @alloc:              binder_alloc for this proc
554  * @data_size:          size of user data buffer
555  * @offsets_size:       user specified buffer offset
556  * @extra_buffers_size: size of extra space for meta-data (eg, security context)
557  * @is_async:           buffer for async transaction
558  * @pid:                                pid to attribute allocation to (used for debugging)
559  *
560  * Allocate a new buffer given the requested sizes. Returns
561  * the kernel version of the buffer pointer. The size allocated
562  * is the sum of the three given sizes (each rounded up to
563  * pointer-sized boundary)
564  *
565  * Return:      The allocated buffer or %NULL if error
566  */
567 struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
568                                            size_t data_size,
569                                            size_t offsets_size,
570                                            size_t extra_buffers_size,
571                                            int is_async,
572                                            int pid)
573 {
574         struct binder_buffer *buffer;
575
576         mutex_lock(&alloc->mutex);
577         buffer = binder_alloc_new_buf_locked(alloc, data_size, offsets_size,
578                                              extra_buffers_size, is_async, pid);
579         mutex_unlock(&alloc->mutex);
580         return buffer;
581 }
582
583 static void __user *buffer_start_page(struct binder_buffer *buffer)
584 {
585         return (void __user *)((uintptr_t)buffer->user_data & PAGE_MASK);
586 }
587
588 static void __user *prev_buffer_end_page(struct binder_buffer *buffer)
589 {
590         return (void __user *)
591                 (((uintptr_t)(buffer->user_data) - 1) & PAGE_MASK);
592 }
593
594 static void binder_delete_free_buffer(struct binder_alloc *alloc,
595                                       struct binder_buffer *buffer)
596 {
597         struct binder_buffer *prev, *next = NULL;
598         bool to_free = true;
599
600         BUG_ON(alloc->buffers.next == &buffer->entry);
601         prev = binder_buffer_prev(buffer);
602         BUG_ON(!prev->free);
603         if (prev_buffer_end_page(prev) == buffer_start_page(buffer)) {
604                 to_free = false;
605                 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
606                                    "%d: merge free, buffer %pK share page with %pK\n",
607                                    alloc->pid, buffer->user_data,
608                                    prev->user_data);
609         }
610
611         if (!list_is_last(&buffer->entry, &alloc->buffers)) {
612                 next = binder_buffer_next(buffer);
613                 if (buffer_start_page(next) == buffer_start_page(buffer)) {
614                         to_free = false;
615                         binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
616                                            "%d: merge free, buffer %pK share page with %pK\n",
617                                            alloc->pid,
618                                            buffer->user_data,
619                                            next->user_data);
620                 }
621         }
622
623         if (PAGE_ALIGNED(buffer->user_data)) {
624                 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
625                                    "%d: merge free, buffer start %pK is page aligned\n",
626                                    alloc->pid, buffer->user_data);
627                 to_free = false;
628         }
629
630         if (to_free) {
631                 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
632                                    "%d: merge free, buffer %pK do not share page with %pK or %pK\n",
633                                    alloc->pid, buffer->user_data,
634                                    prev->user_data,
635                                    next ? next->user_data : NULL);
636                 binder_update_page_range(alloc, 0, buffer_start_page(buffer),
637                                          buffer_start_page(buffer) + PAGE_SIZE);
638         }
639         list_del(&buffer->entry);
640         kfree(buffer);
641 }
642
643 static void binder_free_buf_locked(struct binder_alloc *alloc,
644                                    struct binder_buffer *buffer)
645 {
646         size_t size, buffer_size;
647
648         buffer_size = binder_alloc_buffer_size(alloc, buffer);
649
650         size = ALIGN(buffer->data_size, sizeof(void *)) +
651                 ALIGN(buffer->offsets_size, sizeof(void *)) +
652                 ALIGN(buffer->extra_buffers_size, sizeof(void *));
653
654         binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
655                      "%d: binder_free_buf %pK size %zd buffer_size %zd\n",
656                       alloc->pid, buffer, size, buffer_size);
657
658         BUG_ON(buffer->free);
659         BUG_ON(size > buffer_size);
660         BUG_ON(buffer->transaction != NULL);
661         BUG_ON(buffer->user_data < alloc->buffer);
662         BUG_ON(buffer->user_data > alloc->buffer + alloc->buffer_size);
663
664         if (buffer->async_transaction) {
665                 alloc->free_async_space += size + sizeof(struct binder_buffer);
666
667                 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
668                              "%d: binder_free_buf size %zd async free %zd\n",
669                               alloc->pid, size, alloc->free_async_space);
670         }
671
672         binder_update_page_range(alloc, 0,
673                 (void __user *)PAGE_ALIGN((uintptr_t)buffer->user_data),
674                 (void __user *)(((uintptr_t)
675                           buffer->user_data + buffer_size) & PAGE_MASK));
676
677         rb_erase(&buffer->rb_node, &alloc->allocated_buffers);
678         buffer->free = 1;
679         if (!list_is_last(&buffer->entry, &alloc->buffers)) {
680                 struct binder_buffer *next = binder_buffer_next(buffer);
681
682                 if (next->free) {
683                         rb_erase(&next->rb_node, &alloc->free_buffers);
684                         binder_delete_free_buffer(alloc, next);
685                 }
686         }
687         if (alloc->buffers.next != &buffer->entry) {
688                 struct binder_buffer *prev = binder_buffer_prev(buffer);
689
690                 if (prev->free) {
691                         binder_delete_free_buffer(alloc, buffer);
692                         rb_erase(&prev->rb_node, &alloc->free_buffers);
693                         buffer = prev;
694                 }
695         }
696         binder_insert_free_buffer(alloc, buffer);
697 }
698
699 static void binder_alloc_clear_buf(struct binder_alloc *alloc,
700                                    struct binder_buffer *buffer);
701 /**
702  * binder_alloc_free_buf() - free a binder buffer
703  * @alloc:      binder_alloc for this proc
704  * @buffer:     kernel pointer to buffer
705  *
706  * Free the buffer allocated via binder_alloc_new_buf()
707  */
708 void binder_alloc_free_buf(struct binder_alloc *alloc,
709                             struct binder_buffer *buffer)
710 {
711         /*
712          * We could eliminate the call to binder_alloc_clear_buf()
713          * from binder_alloc_deferred_release() by moving this to
714          * binder_alloc_free_buf_locked(). However, that could
715          * increase contention for the alloc mutex if clear_on_free
716          * is used frequently for large buffers. The mutex is not
717          * needed for correctness here.
718          */
719         if (buffer->clear_on_free) {
720                 binder_alloc_clear_buf(alloc, buffer);
721                 buffer->clear_on_free = false;
722         }
723         mutex_lock(&alloc->mutex);
724         binder_free_buf_locked(alloc, buffer);
725         mutex_unlock(&alloc->mutex);
726 }
727
728 /**
729  * binder_alloc_mmap_handler() - map virtual address space for proc
730  * @alloc:      alloc structure for this proc
731  * @vma:        vma passed to mmap()
732  *
733  * Called by binder_mmap() to initialize the space specified in
734  * vma for allocating binder buffers
735  *
736  * Return:
737  *      0 = success
738  *      -EBUSY = address space already mapped
739  *      -ENOMEM = failed to map memory to given address space
740  */
741 int binder_alloc_mmap_handler(struct binder_alloc *alloc,
742                               struct vm_area_struct *vma)
743 {
744         int ret;
745         const char *failure_string;
746         struct binder_buffer *buffer;
747
748         mutex_lock(&binder_alloc_mmap_lock);
749         if (alloc->buffer_size) {
750                 ret = -EBUSY;
751                 failure_string = "already mapped";
752                 goto err_already_mapped;
753         }
754         alloc->buffer_size = min_t(unsigned long, vma->vm_end - vma->vm_start,
755                                    SZ_4M);
756         mutex_unlock(&binder_alloc_mmap_lock);
757
758         alloc->buffer = (void __user *)vma->vm_start;
759
760         alloc->pages = kcalloc(alloc->buffer_size / PAGE_SIZE,
761                                sizeof(alloc->pages[0]),
762                                GFP_KERNEL);
763         if (alloc->pages == NULL) {
764                 ret = -ENOMEM;
765                 failure_string = "alloc page array";
766                 goto err_alloc_pages_failed;
767         }
768
769         buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
770         if (!buffer) {
771                 ret = -ENOMEM;
772                 failure_string = "alloc buffer struct";
773                 goto err_alloc_buf_struct_failed;
774         }
775
776         buffer->user_data = alloc->buffer;
777         list_add(&buffer->entry, &alloc->buffers);
778         buffer->free = 1;
779         binder_insert_free_buffer(alloc, buffer);
780         alloc->free_async_space = alloc->buffer_size / 2;
781         binder_alloc_set_vma(alloc, vma);
782         mmgrab(alloc->vma_vm_mm);
783
784         return 0;
785
786 err_alloc_buf_struct_failed:
787         kfree(alloc->pages);
788         alloc->pages = NULL;
789 err_alloc_pages_failed:
790         alloc->buffer = NULL;
791         mutex_lock(&binder_alloc_mmap_lock);
792         alloc->buffer_size = 0;
793 err_already_mapped:
794         mutex_unlock(&binder_alloc_mmap_lock);
795         binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
796                            "%s: %d %lx-%lx %s failed %d\n", __func__,
797                            alloc->pid, vma->vm_start, vma->vm_end,
798                            failure_string, ret);
799         return ret;
800 }
801
802
803 void binder_alloc_deferred_release(struct binder_alloc *alloc)
804 {
805         struct rb_node *n;
806         int buffers, page_count;
807         struct binder_buffer *buffer;
808
809         buffers = 0;
810         mutex_lock(&alloc->mutex);
811         BUG_ON(alloc->vma);
812
813         while ((n = rb_first(&alloc->allocated_buffers))) {
814                 buffer = rb_entry(n, struct binder_buffer, rb_node);
815
816                 /* Transaction should already have been freed */
817                 BUG_ON(buffer->transaction);
818
819                 if (buffer->clear_on_free) {
820                         binder_alloc_clear_buf(alloc, buffer);
821                         buffer->clear_on_free = false;
822                 }
823                 binder_free_buf_locked(alloc, buffer);
824                 buffers++;
825         }
826
827         while (!list_empty(&alloc->buffers)) {
828                 buffer = list_first_entry(&alloc->buffers,
829                                           struct binder_buffer, entry);
830                 WARN_ON(!buffer->free);
831
832                 list_del(&buffer->entry);
833                 WARN_ON_ONCE(!list_empty(&alloc->buffers));
834                 kfree(buffer);
835         }
836
837         page_count = 0;
838         if (alloc->pages) {
839                 int i;
840
841                 for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
842                         void __user *page_addr;
843                         bool on_lru;
844
845                         if (!alloc->pages[i].page_ptr)
846                                 continue;
847
848                         on_lru = list_lru_del(&binder_alloc_lru,
849                                               &alloc->pages[i].lru);
850                         page_addr = alloc->buffer + i * PAGE_SIZE;
851                         binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
852                                      "%s: %d: page %d at %pK %s\n",
853                                      __func__, alloc->pid, i, page_addr,
854                                      on_lru ? "on lru" : "active");
855                         __free_page(alloc->pages[i].page_ptr);
856                         page_count++;
857                 }
858                 kfree(alloc->pages);
859         }
860         mutex_unlock(&alloc->mutex);
861         if (alloc->vma_vm_mm)
862                 mmdrop(alloc->vma_vm_mm);
863
864         binder_alloc_debug(BINDER_DEBUG_OPEN_CLOSE,
865                      "%s: %d buffers %d, pages %d\n",
866                      __func__, alloc->pid, buffers, page_count);
867 }
868
869 static void print_binder_buffer(struct seq_file *m, const char *prefix,
870                                 struct binder_buffer *buffer)
871 {
872         seq_printf(m, "%s %d: %pK size %zd:%zd:%zd %s\n",
873                    prefix, buffer->debug_id, buffer->user_data,
874                    buffer->data_size, buffer->offsets_size,
875                    buffer->extra_buffers_size,
876                    buffer->transaction ? "active" : "delivered");
877 }
878
879 /**
880  * binder_alloc_print_allocated() - print buffer info
881  * @m:     seq_file for output via seq_printf()
882  * @alloc: binder_alloc for this proc
883  *
884  * Prints information about every buffer associated with
885  * the binder_alloc state to the given seq_file
886  */
887 void binder_alloc_print_allocated(struct seq_file *m,
888                                   struct binder_alloc *alloc)
889 {
890         struct rb_node *n;
891
892         mutex_lock(&alloc->mutex);
893         for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n))
894                 print_binder_buffer(m, "  buffer",
895                                     rb_entry(n, struct binder_buffer, rb_node));
896         mutex_unlock(&alloc->mutex);
897 }
898
899 /**
900  * binder_alloc_print_pages() - print page usage
901  * @m:     seq_file for output via seq_printf()
902  * @alloc: binder_alloc for this proc
903  */
904 void binder_alloc_print_pages(struct seq_file *m,
905                               struct binder_alloc *alloc)
906 {
907         struct binder_lru_page *page;
908         int i;
909         int active = 0;
910         int lru = 0;
911         int free = 0;
912
913         mutex_lock(&alloc->mutex);
914         /*
915          * Make sure the binder_alloc is fully initialized, otherwise we might
916          * read inconsistent state.
917          */
918         if (binder_alloc_get_vma(alloc) != NULL) {
919                 for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
920                         page = &alloc->pages[i];
921                         if (!page->page_ptr)
922                                 free++;
923                         else if (list_empty(&page->lru))
924                                 active++;
925                         else
926                                 lru++;
927                 }
928         }
929         mutex_unlock(&alloc->mutex);
930         seq_printf(m, "  pages: %d:%d:%d\n", active, lru, free);
931         seq_printf(m, "  pages high watermark: %zu\n", alloc->pages_high);
932 }
933
934 /**
935  * binder_alloc_get_allocated_count() - return count of buffers
936  * @alloc: binder_alloc for this proc
937  *
938  * Return: count of allocated buffers
939  */
940 int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
941 {
942         struct rb_node *n;
943         int count = 0;
944
945         mutex_lock(&alloc->mutex);
946         for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n))
947                 count++;
948         mutex_unlock(&alloc->mutex);
949         return count;
950 }
951
952
953 /**
954  * binder_alloc_vma_close() - invalidate address space
955  * @alloc: binder_alloc for this proc
956  *
957  * Called from binder_vma_close() when releasing address space.
958  * Clears alloc->vma to prevent new incoming transactions from
959  * allocating more buffers.
960  */
961 void binder_alloc_vma_close(struct binder_alloc *alloc)
962 {
963         binder_alloc_set_vma(alloc, NULL);
964 }
965
966 /**
967  * binder_alloc_free_page() - shrinker callback to free pages
968  * @item:   item to free
969  * @lock:   lock protecting the item
970  * @cb_arg: callback argument
971  *
972  * Called from list_lru_walk() in binder_shrink_scan() to free
973  * up pages when the system is under memory pressure.
974  */
975 enum lru_status binder_alloc_free_page(struct list_head *item,
976                                        struct list_lru_one *lru,
977                                        spinlock_t *lock,
978                                        void *cb_arg)
979         __must_hold(lock)
980 {
981         struct mm_struct *mm = NULL;
982         struct binder_lru_page *page = container_of(item,
983                                                     struct binder_lru_page,
984                                                     lru);
985         struct binder_alloc *alloc;
986         uintptr_t page_addr;
987         size_t index;
988         struct vm_area_struct *vma;
989
990         alloc = page->alloc;
991         if (!mutex_trylock(&alloc->mutex))
992                 goto err_get_alloc_mutex_failed;
993
994         if (!page->page_ptr)
995                 goto err_page_already_freed;
996
997         index = page - alloc->pages;
998         page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE;
999
1000         mm = alloc->vma_vm_mm;
1001         if (!mmget_not_zero(mm))
1002                 goto err_mmget;
1003         if (!mmap_read_trylock(mm))
1004                 goto err_mmap_read_lock_failed;
1005         vma = binder_alloc_get_vma(alloc);
1006
1007         list_lru_isolate(lru, item);
1008         spin_unlock(lock);
1009
1010         if (vma) {
1011                 trace_binder_unmap_user_start(alloc, index);
1012
1013                 zap_page_range(vma, page_addr, PAGE_SIZE);
1014
1015                 trace_binder_unmap_user_end(alloc, index);
1016         }
1017         mmap_read_unlock(mm);
1018         mmput_async(mm);
1019
1020         trace_binder_unmap_kernel_start(alloc, index);
1021
1022         __free_page(page->page_ptr);
1023         page->page_ptr = NULL;
1024
1025         trace_binder_unmap_kernel_end(alloc, index);
1026
1027         spin_lock(lock);
1028         mutex_unlock(&alloc->mutex);
1029         return LRU_REMOVED_RETRY;
1030
1031 err_mmap_read_lock_failed:
1032         mmput_async(mm);
1033 err_mmget:
1034 err_page_already_freed:
1035         mutex_unlock(&alloc->mutex);
1036 err_get_alloc_mutex_failed:
1037         return LRU_SKIP;
1038 }
1039
1040 static unsigned long
1041 binder_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
1042 {
1043         unsigned long ret = list_lru_count(&binder_alloc_lru);
1044         return ret;
1045 }
1046
1047 static unsigned long
1048 binder_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
1049 {
1050         unsigned long ret;
1051
1052         ret = list_lru_walk(&binder_alloc_lru, binder_alloc_free_page,
1053                             NULL, sc->nr_to_scan);
1054         return ret;
1055 }
1056
1057 static struct shrinker binder_shrinker = {
1058         .count_objects = binder_shrink_count,
1059         .scan_objects = binder_shrink_scan,
1060         .seeks = DEFAULT_SEEKS,
1061 };
1062
1063 /**
1064  * binder_alloc_init() - called by binder_open() for per-proc initialization
1065  * @alloc: binder_alloc for this proc
1066  *
1067  * Called from binder_open() to initialize binder_alloc fields for
1068  * new binder proc
1069  */
1070 void binder_alloc_init(struct binder_alloc *alloc)
1071 {
1072         alloc->pid = current->group_leader->pid;
1073         mutex_init(&alloc->mutex);
1074         INIT_LIST_HEAD(&alloc->buffers);
1075 }
1076
1077 int binder_alloc_shrinker_init(void)
1078 {
1079         int ret = list_lru_init(&binder_alloc_lru);
1080
1081         if (ret == 0) {
1082                 ret = register_shrinker(&binder_shrinker);
1083                 if (ret)
1084                         list_lru_destroy(&binder_alloc_lru);
1085         }
1086         return ret;
1087 }
1088
1089 /**
1090  * check_buffer() - verify that buffer/offset is safe to access
1091  * @alloc: binder_alloc for this proc
1092  * @buffer: binder buffer to be accessed
1093  * @offset: offset into @buffer data
1094  * @bytes: bytes to access from offset
1095  *
1096  * Check that the @offset/@bytes are within the size of the given
1097  * @buffer and that the buffer is currently active and not freeable.
1098  * Offsets must also be multiples of sizeof(u32). The kernel is
1099  * allowed to touch the buffer in two cases:
1100  *
1101  * 1) when the buffer is being created:
1102  *     (buffer->free == 0 && buffer->allow_user_free == 0)
1103  * 2) when the buffer is being torn down:
1104  *     (buffer->free == 0 && buffer->transaction == NULL).
1105  *
1106  * Return: true if the buffer is safe to access
1107  */
1108 static inline bool check_buffer(struct binder_alloc *alloc,
1109                                 struct binder_buffer *buffer,
1110                                 binder_size_t offset, size_t bytes)
1111 {
1112         size_t buffer_size = binder_alloc_buffer_size(alloc, buffer);
1113
1114         return buffer_size >= bytes &&
1115                 offset <= buffer_size - bytes &&
1116                 IS_ALIGNED(offset, sizeof(u32)) &&
1117                 !buffer->free &&
1118                 (!buffer->allow_user_free || !buffer->transaction);
1119 }
1120
1121 /**
1122  * binder_alloc_get_page() - get kernel pointer for given buffer offset
1123  * @alloc: binder_alloc for this proc
1124  * @buffer: binder buffer to be accessed
1125  * @buffer_offset: offset into @buffer data
1126  * @pgoffp: address to copy final page offset to
1127  *
1128  * Lookup the struct page corresponding to the address
1129  * at @buffer_offset into @buffer->user_data. If @pgoffp is not
1130  * NULL, the byte-offset into the page is written there.
1131  *
1132  * The caller is responsible to ensure that the offset points
1133  * to a valid address within the @buffer and that @buffer is
1134  * not freeable by the user. Since it can't be freed, we are
1135  * guaranteed that the corresponding elements of @alloc->pages[]
1136  * cannot change.
1137  *
1138  * Return: struct page
1139  */
1140 static struct page *binder_alloc_get_page(struct binder_alloc *alloc,
1141                                           struct binder_buffer *buffer,
1142                                           binder_size_t buffer_offset,
1143                                           pgoff_t *pgoffp)
1144 {
1145         binder_size_t buffer_space_offset = buffer_offset +
1146                 (buffer->user_data - alloc->buffer);
1147         pgoff_t pgoff = buffer_space_offset & ~PAGE_MASK;
1148         size_t index = buffer_space_offset >> PAGE_SHIFT;
1149         struct binder_lru_page *lru_page;
1150
1151         lru_page = &alloc->pages[index];
1152         *pgoffp = pgoff;
1153         return lru_page->page_ptr;
1154 }
1155
1156 /**
1157  * binder_alloc_clear_buf() - zero out buffer
1158  * @alloc: binder_alloc for this proc
1159  * @buffer: binder buffer to be cleared
1160  *
1161  * memset the given buffer to 0
1162  */
1163 static void binder_alloc_clear_buf(struct binder_alloc *alloc,
1164                                    struct binder_buffer *buffer)
1165 {
1166         size_t bytes = binder_alloc_buffer_size(alloc, buffer);
1167         binder_size_t buffer_offset = 0;
1168
1169         while (bytes) {
1170                 unsigned long size;
1171                 struct page *page;
1172                 pgoff_t pgoff;
1173                 void *kptr;
1174
1175                 page = binder_alloc_get_page(alloc, buffer,
1176                                              buffer_offset, &pgoff);
1177                 size = min_t(size_t, bytes, PAGE_SIZE - pgoff);
1178                 kptr = kmap(page) + pgoff;
1179                 memset(kptr, 0, size);
1180                 kunmap(page);
1181                 bytes -= size;
1182                 buffer_offset += size;
1183         }
1184 }
1185
1186 /**
1187  * binder_alloc_copy_user_to_buffer() - copy src user to tgt user
1188  * @alloc: binder_alloc for this proc
1189  * @buffer: binder buffer to be accessed
1190  * @buffer_offset: offset into @buffer data
1191  * @from: userspace pointer to source buffer
1192  * @bytes: bytes to copy
1193  *
1194  * Copy bytes from source userspace to target buffer.
1195  *
1196  * Return: bytes remaining to be copied
1197  */
1198 unsigned long
1199 binder_alloc_copy_user_to_buffer(struct binder_alloc *alloc,
1200                                  struct binder_buffer *buffer,
1201                                  binder_size_t buffer_offset,
1202                                  const void __user *from,
1203                                  size_t bytes)
1204 {
1205         if (!check_buffer(alloc, buffer, buffer_offset, bytes))
1206                 return bytes;
1207
1208         while (bytes) {
1209                 unsigned long size;
1210                 unsigned long ret;
1211                 struct page *page;
1212                 pgoff_t pgoff;
1213                 void *kptr;
1214
1215                 page = binder_alloc_get_page(alloc, buffer,
1216                                              buffer_offset, &pgoff);
1217                 size = min_t(size_t, bytes, PAGE_SIZE - pgoff);
1218                 kptr = kmap(page) + pgoff;
1219                 ret = copy_from_user(kptr, from, size);
1220                 kunmap(page);
1221                 if (ret)
1222                         return bytes - size + ret;
1223                 bytes -= size;
1224                 from += size;
1225                 buffer_offset += size;
1226         }
1227         return 0;
1228 }
1229
1230 static int binder_alloc_do_buffer_copy(struct binder_alloc *alloc,
1231                                        bool to_buffer,
1232                                        struct binder_buffer *buffer,
1233                                        binder_size_t buffer_offset,
1234                                        void *ptr,
1235                                        size_t bytes)
1236 {
1237         /* All copies must be 32-bit aligned and 32-bit size */
1238         if (!check_buffer(alloc, buffer, buffer_offset, bytes))
1239                 return -EINVAL;
1240
1241         while (bytes) {
1242                 unsigned long size;
1243                 struct page *page;
1244                 pgoff_t pgoff;
1245                 void *tmpptr;
1246                 void *base_ptr;
1247
1248                 page = binder_alloc_get_page(alloc, buffer,
1249                                              buffer_offset, &pgoff);
1250                 size = min_t(size_t, bytes, PAGE_SIZE - pgoff);
1251                 base_ptr = kmap_atomic(page);
1252                 tmpptr = base_ptr + pgoff;
1253                 if (to_buffer)
1254                         memcpy(tmpptr, ptr, size);
1255                 else
1256                         memcpy(ptr, tmpptr, size);
1257                 /*
1258                  * kunmap_atomic() takes care of flushing the cache
1259                  * if this device has VIVT cache arch
1260                  */
1261                 kunmap_atomic(base_ptr);
1262                 bytes -= size;
1263                 pgoff = 0;
1264                 ptr = ptr + size;
1265                 buffer_offset += size;
1266         }
1267         return 0;
1268 }
1269
1270 int binder_alloc_copy_to_buffer(struct binder_alloc *alloc,
1271                                 struct binder_buffer *buffer,
1272                                 binder_size_t buffer_offset,
1273                                 void *src,
1274                                 size_t bytes)
1275 {
1276         return binder_alloc_do_buffer_copy(alloc, true, buffer, buffer_offset,
1277                                            src, bytes);
1278 }
1279
1280 int binder_alloc_copy_from_buffer(struct binder_alloc *alloc,
1281                                   void *dest,
1282                                   struct binder_buffer *buffer,
1283                                   binder_size_t buffer_offset,
1284                                   size_t bytes)
1285 {
1286         return binder_alloc_do_buffer_copy(alloc, false, buffer, buffer_offset,
1287                                            dest, bytes);
1288 }
1289