Merge tag 'pstore-v5.11-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/kees...
[linux-2.6-microblaze.git] / drivers / android / binder_alloc.c
index 6960969..7caf74a 100644 (file)
@@ -338,12 +338,50 @@ static inline struct vm_area_struct *binder_alloc_get_vma(
        return vma;
 }
 
+static void debug_low_async_space_locked(struct binder_alloc *alloc, int pid)
+{
+       /*
+        * Find the amount and size of buffers allocated by the current caller;
+        * The idea is that once we cross the threshold, whoever is responsible
+        * for the low async space is likely to try to send another async txn,
+        * and at some point we'll catch them in the act. This is more efficient
+        * than keeping a map per pid.
+        */
+       struct rb_node *n;
+       struct binder_buffer *buffer;
+       size_t total_alloc_size = 0;
+       size_t num_buffers = 0;
+
+       for (n = rb_first(&alloc->allocated_buffers); n != NULL;
+                n = rb_next(n)) {
+               buffer = rb_entry(n, struct binder_buffer, rb_node);
+               if (buffer->pid != pid)
+                       continue;
+               if (!buffer->async_transaction)
+                       continue;
+               total_alloc_size += binder_alloc_buffer_size(alloc, buffer)
+                       + sizeof(struct binder_buffer);
+               num_buffers++;
+       }
+
+       /*
+        * Warn if this pid has more than 50 transactions, or more than 50% of
+        * async space (which is 25% of total buffer size).
+        */
+       if (num_buffers > 50 || total_alloc_size > alloc->buffer_size / 4) {
+               binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
+                            "%d: pid %d spamming oneway? %zd buffers allocated for a total size of %zd\n",
+                             alloc->pid, pid, num_buffers, total_alloc_size);
+       }
+}
+
 static struct binder_buffer *binder_alloc_new_buf_locked(
                                struct binder_alloc *alloc,
                                size_t data_size,
                                size_t offsets_size,
                                size_t extra_buffers_size,
-                               int is_async)
+                               int is_async,
+                               int pid)
 {
        struct rb_node *n = alloc->free_buffers.rb_node;
        struct binder_buffer *buffer;
@@ -486,11 +524,20 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
        buffer->offsets_size = offsets_size;
        buffer->async_transaction = is_async;
        buffer->extra_buffers_size = extra_buffers_size;
+       buffer->pid = pid;
        if (is_async) {
                alloc->free_async_space -= size + sizeof(struct binder_buffer);
                binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
                             "%d: binder_alloc_buf size %zd async free %zd\n",
                              alloc->pid, size, alloc->free_async_space);
+               if (alloc->free_async_space < alloc->buffer_size / 10) {
+                       /*
+                        * Start detecting spammers once we have less than 20%
+                        * of async space left (which is less than 10% of total
+                        * buffer size).
+                        */
+                       debug_low_async_space_locked(alloc, pid);
+               }
        }
        return buffer;
 
@@ -508,6 +555,7 @@ err_alloc_buf_struct_failed:
  * @offsets_size:       user specified buffer offset
  * @extra_buffers_size: size of extra space for meta-data (eg, security context)
  * @is_async:           buffer for async transaction
+ * @pid:                               pid to attribute allocation to (used for debugging)
  *
  * Allocate a new buffer given the requested sizes. Returns
  * the kernel version of the buffer pointer. The size allocated
@@ -520,13 +568,14 @@ struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
                                           size_t data_size,
                                           size_t offsets_size,
                                           size_t extra_buffers_size,
-                                          int is_async)
+                                          int is_async,
+                                          int pid)
 {
        struct binder_buffer *buffer;
 
        mutex_lock(&alloc->mutex);
        buffer = binder_alloc_new_buf_locked(alloc, data_size, offsets_size,
-                                            extra_buffers_size, is_async);
+                                            extra_buffers_size, is_async, pid);
        mutex_unlock(&alloc->mutex);
        return buffer;
 }
@@ -647,16 +696,30 @@ static void binder_free_buf_locked(struct binder_alloc *alloc,
        binder_insert_free_buffer(alloc, buffer);
 }
 
+static void binder_alloc_clear_buf(struct binder_alloc *alloc,
+                                  struct binder_buffer *buffer);
 /**
  * binder_alloc_free_buf() - free a binder buffer
  * @alloc:     binder_alloc for this proc
  * @buffer:    kernel pointer to buffer
  *
- * Free the buffer allocated via binder_alloc_new_buffer()
+ * Free the buffer allocated via binder_alloc_new_buf()
  */
 void binder_alloc_free_buf(struct binder_alloc *alloc,
                            struct binder_buffer *buffer)
 {
+       /*
+        * We could eliminate the call to binder_alloc_clear_buf()
+        * from binder_alloc_deferred_release() by moving this to
+        * binder_alloc_free_buf_locked(). However, that could
+        * increase contention for the alloc mutex if clear_on_free
+        * is used frequently for large buffers. The mutex is not
+        * needed for correctness here.
+        */
+       if (buffer->clear_on_free) {
+               binder_alloc_clear_buf(alloc, buffer);
+               buffer->clear_on_free = false;
+       }
        mutex_lock(&alloc->mutex);
        binder_free_buf_locked(alloc, buffer);
        mutex_unlock(&alloc->mutex);
@@ -753,6 +816,10 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
                /* Transaction should already have been freed */
                BUG_ON(buffer->transaction);
 
+               if (buffer->clear_on_free) {
+                       binder_alloc_clear_buf(alloc, buffer);
+                       buffer->clear_on_free = false;
+               }
                binder_free_buf_locked(alloc, buffer);
                buffers++;
        }
@@ -1086,6 +1153,36 @@ static struct page *binder_alloc_get_page(struct binder_alloc *alloc,
        return lru_page->page_ptr;
 }
 
+/**
+ * binder_alloc_clear_buf() - zero out buffer
+ * @alloc: binder_alloc for this proc
+ * @buffer: binder buffer to be cleared
+ *
+ * memset the given buffer to 0
+ */
+static void binder_alloc_clear_buf(struct binder_alloc *alloc,
+                                  struct binder_buffer *buffer)
+{
+       size_t bytes = binder_alloc_buffer_size(alloc, buffer);
+       binder_size_t buffer_offset = 0;
+
+       while (bytes) {
+               unsigned long size;
+               struct page *page;
+               pgoff_t pgoff;
+               void *kptr;
+
+               page = binder_alloc_get_page(alloc, buffer,
+                                            buffer_offset, &pgoff);
+               size = min_t(size_t, bytes, PAGE_SIZE - pgoff);
+               kptr = kmap(page) + pgoff;
+               memset(kptr, 0, size);
+               kunmap(page);
+               bytes -= size;
+               buffer_offset += size;
+       }
+}
+
 /**
  * binder_alloc_copy_user_to_buffer() - copy src user to tgt user
  * @alloc: binder_alloc for this proc