binder: reverse locking order in shrinker callback
authorCarlos Llamas <cmllamas@google.com>
Fri, 1 Dec 2023 17:21:56 +0000 (17:21 +0000)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 5 Dec 2023 00:23:41 +0000 (09:23 +0900)
The locking order currently requires the alloc->mutex to be acquired
first followed by the mmap lock. However, the alloc->mutex is converted
into a spinlock in subsequent commits so the order needs to be reversed
to avoid nesting the sleeping mmap lock under the spinlock.

The shrinker's callback binder_alloc_free_page() is the only place that
needs to be reordered since other functions have been refactored and no
longer nest these locks.

Some minor cosmetic changes are also included in this patch.

Signed-off-by: Carlos Llamas <cmllamas@google.com>
Link: https://lore.kernel.org/r/20231201172212.1813387-28-cmllamas@google.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
drivers/android/binder_alloc.c

index 5783675..a3e5663 100644 (file)
@@ -1061,35 +1061,39 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
                                       void *cb_arg)
        __must_hold(lock)
 {
-       struct mm_struct *mm = NULL;
-       struct binder_lru_page *page = container_of(item,
-                                                   struct binder_lru_page,
-                                                   lru);
-       struct binder_alloc *alloc;
+       struct binder_lru_page *page = container_of(item, typeof(*page), lru);
+       struct binder_alloc *alloc = page->alloc;
+       struct mm_struct *mm = alloc->mm;
+       struct vm_area_struct *vma;
+       struct page *page_to_free;
        unsigned long page_addr;
        size_t index;
-       struct vm_area_struct *vma;
 
-       alloc = page->alloc;
+       if (!mmget_not_zero(mm))
+               goto err_mmget;
+       if (!mmap_read_trylock(mm))
+               goto err_mmap_read_lock_failed;
        if (!mutex_trylock(&alloc->mutex))
                goto err_get_alloc_mutex_failed;
-
        if (!page->page_ptr)
                goto err_page_already_freed;
 
        index = page - alloc->pages;
        page_addr = alloc->buffer + index * PAGE_SIZE;
 
-       mm = alloc->mm;
-       if (!mmget_not_zero(mm))
-               goto err_mmget;
-       if (!mmap_read_trylock(mm))
-               goto err_mmap_read_lock_failed;
        vma = vma_lookup(mm, page_addr);
        if (vma && vma != binder_alloc_get_vma(alloc))
                goto err_invalid_vma;
 
+       trace_binder_unmap_kernel_start(alloc, index);
+
+       page_to_free = page->page_ptr;
+       page->page_ptr = NULL;
+
+       trace_binder_unmap_kernel_end(alloc, index);
+
        list_lru_isolate(lru, item);
+       mutex_unlock(&alloc->mutex);
        spin_unlock(lock);
 
        if (vma) {
@@ -1099,28 +1103,22 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
 
                trace_binder_unmap_user_end(alloc, index);
        }
+
        mmap_read_unlock(mm);
        mmput_async(mm);
-
-       trace_binder_unmap_kernel_start(alloc, index);
-
-       __free_page(page->page_ptr);
-       page->page_ptr = NULL;
-
-       trace_binder_unmap_kernel_end(alloc, index);
+       __free_page(page_to_free);
 
        spin_lock(lock);
-       mutex_unlock(&alloc->mutex);
        return LRU_REMOVED_RETRY;
 
 err_invalid_vma:
+err_page_already_freed:
+       mutex_unlock(&alloc->mutex);
+err_get_alloc_mutex_failed:
        mmap_read_unlock(mm);
 err_mmap_read_lock_failed:
        mmput_async(mm);
 err_mmget:
-err_page_already_freed:
-       mutex_unlock(&alloc->mutex);
-err_get_alloc_mutex_failed:
        return LRU_SKIP;
 }