io_uring: hold mmap_sem for mm->locked_vm manipulation
authorJens Axboe <axboe@kernel.dk>
Thu, 17 Dec 2020 14:53:33 +0000 (07:53 -0700)
committerJens Axboe <axboe@kernel.dk>
Thu, 17 Dec 2020 14:53:33 +0000 (07:53 -0700)
The kernel doesn't seem to have clear rules around this, but various
spots are using the mmap_sem to serialize access to modifying the
locked_vm count. Play it safe and lock the mm for write when accounting
or unaccounting locked memory.

Signed-off-by: Jens Axboe <axboe@kernel.dk>
fs/io_uring.c

index 6a4560c..2d07d35 100644 (file)
@@ -8157,10 +8157,13 @@ static void io_unaccount_mem(struct io_ring_ctx *ctx, unsigned long nr_pages,
                __io_unaccount_mem(ctx->user, nr_pages);
 
        if (ctx->mm_account) {
-               if (acct == ACCT_LOCKED)
+               if (acct == ACCT_LOCKED) {
+                       mmap_write_lock(ctx->mm_account);
                        ctx->mm_account->locked_vm -= nr_pages;
-               else if (acct == ACCT_PINNED)
+                       mmap_write_unlock(ctx->mm_account);
+               }else if (acct == ACCT_PINNED) {
                        atomic64_sub(nr_pages, &ctx->mm_account->pinned_vm);
+               }
        }
 }
 
@@ -8176,10 +8179,13 @@ static int io_account_mem(struct io_ring_ctx *ctx, unsigned long nr_pages,
        }
 
        if (ctx->mm_account) {
-               if (acct == ACCT_LOCKED)
+               if (acct == ACCT_LOCKED) {
+                       mmap_write_lock(ctx->mm_account);
                        ctx->mm_account->locked_vm += nr_pages;
-               else if (acct == ACCT_PINNED)
+                       mmap_write_unlock(ctx->mm_account);
+               } else if (acct == ACCT_PINNED) {
                        atomic64_add(nr_pages, &ctx->mm_account->pinned_vm);
+               }
        }
 
        return 0;