bcachefs: GFP_NOIO -> GFP_NOFS
authorKent Overstreet <kent.overstreet@linux.dev>
Sun, 28 May 2023 22:02:38 +0000 (18:02 -0400)
committerKent Overstreet <kent.overstreet@linux.dev>
Sun, 22 Oct 2023 21:10:03 +0000 (17:10 -0400)
GFP_NOIO dates from the bcache days, when we operated under the block
layer. Now, GFP_NOFS is more appropriate, so switch all GFP_NOIO uses to
GFP_NOFS.

Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
fs/bcachefs/btree_io.c
fs/bcachefs/btree_update_interior.c
fs/bcachefs/buckets.c
fs/bcachefs/compress.c
fs/bcachefs/debug.c
fs/bcachefs/ec.c
fs/bcachefs/io.c
fs/bcachefs/journal_io.c
fs/bcachefs/journal_reclaim.c
fs/bcachefs/keylist.c

index 0a7a18e..27a2a7b 100644 (file)
@@ -117,7 +117,7 @@ static void *btree_bounce_alloc(struct bch_fs *c, size_t size,
        p = vpmalloc(size, __GFP_NOWARN|GFP_NOWAIT);
        if (!p) {
                *used_mempool = true;
-               p = mempool_alloc(&c->btree_bounce_pool, GFP_NOIO);
+               p = mempool_alloc(&c->btree_bounce_pool, GFP_NOFS);
        }
        memalloc_nofs_restore(flags);
        return p;
@@ -937,7 +937,7 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
        /* We might get called multiple times on read retry: */
        b->written = 0;
 
-       iter = mempool_alloc(&c->fill_iter, GFP_NOIO);
+       iter = mempool_alloc(&c->fill_iter, GFP_NOFS);
        sort_iter_init(iter, b);
        iter->size = (btree_blocks(c) + 1) * 2;
 
@@ -1580,7 +1580,7 @@ void bch2_btree_node_read(struct bch_fs *c, struct btree *b,
        bio = bio_alloc_bioset(NULL,
                               buf_pages(b->data, btree_bytes(c)),
                               REQ_OP_READ|REQ_SYNC|REQ_META,
-                              GFP_NOIO,
+                              GFP_NOFS,
                               &c->btree_bio);
        rb = container_of(bio, struct btree_read_bio, bio);
        rb->c                   = c;
@@ -2077,7 +2077,7 @@ do_write:
        wbio = container_of(bio_alloc_bioset(NULL,
                                buf_pages(data, sectors_to_write << 9),
                                REQ_OP_WRITE|REQ_META,
-                               GFP_NOIO,
+                               GFP_NOFS,
                                &c->btree_bio),
                            struct btree_write_bio, wbio.bio);
        wbio_init(&wbio->wbio.bio);
index 1319337..db0d09b 100644 (file)
@@ -1092,7 +1092,7 @@ bch2_btree_update_start(struct btree_trans *trans, struct btree_path *path,
                }
        }
 
-       as = mempool_alloc(&c->btree_interior_update_pool, GFP_NOIO);
+       as = mempool_alloc(&c->btree_interior_update_pool, GFP_NOFS);
        memset(as, 0, sizeof(*as));
        closure_init(&as->cl, NULL);
        as->c           = c;
index adf3bd0..405c532 100644 (file)
@@ -433,12 +433,12 @@ replicas_deltas_realloc(struct btree_trans *trans, unsigned more)
        WARN_ON_ONCE(alloc_size > REPLICAS_DELTA_LIST_MAX);
 
        if (!d || d->used + more > d->size) {
-               d = krealloc(d, alloc_size, GFP_NOIO|__GFP_ZERO);
+               d = krealloc(d, alloc_size, GFP_NOFS|__GFP_ZERO);
 
                BUG_ON(!d && alloc_size > REPLICAS_DELTA_LIST_MAX);
 
                if (!d) {
-                       d = mempool_alloc(&trans->c->replicas_delta_pool, GFP_NOIO);
+                       d = mempool_alloc(&trans->c->replicas_delta_pool, GFP_NOFS);
                        memset(d, 0, REPLICAS_DELTA_LIST_MAX);
 
                        if (trans->fs_usage_deltas)
index 6bec384..38a3475 100644 (file)
@@ -28,11 +28,11 @@ static struct bbuf __bounce_alloc(struct bch_fs *c, unsigned size, int rw)
 
        BUG_ON(size > c->opts.encoded_extent_max);
 
-       b = kmalloc(size, GFP_NOIO|__GFP_NOWARN);
+       b = kmalloc(size, GFP_NOFS|__GFP_NOWARN);
        if (b)
                return (struct bbuf) { .b = b, .type = BB_KMALLOC, .rw = rw };
 
-       b = mempool_alloc(&c->compression_bounce[rw], GFP_NOIO);
+       b = mempool_alloc(&c->compression_bounce[rw], GFP_NOFS);
        if (b)
                return (struct bbuf) { .b = b, .type = BB_MEMPOOL, .rw = rw };
 
@@ -94,7 +94,7 @@ static struct bbuf __bio_map_or_bounce(struct bch_fs *c, struct bio *bio,
        BUG_ON(DIV_ROUND_UP(start.bi_size, PAGE_SIZE) > nr_pages);
 
        pages = nr_pages > ARRAY_SIZE(stack_pages)
-               ? kmalloc_array(nr_pages, sizeof(struct page *), GFP_NOIO)
+               ? kmalloc_array(nr_pages, sizeof(struct page *), GFP_NOFS)
                : stack_pages;
        if (!pages)
                goto bounce;
@@ -177,7 +177,7 @@ static int __bio_uncompress(struct bch_fs *c, struct bio *src,
                        .avail_out      = dst_len,
                };
 
-               workspace = mempool_alloc(&c->decompress_workspace, GFP_NOIO);
+               workspace = mempool_alloc(&c->decompress_workspace, GFP_NOFS);
 
                zlib_set_workspace(&strm, workspace);
                zlib_inflateInit2(&strm, -MAX_WBITS);
@@ -196,7 +196,7 @@ static int __bio_uncompress(struct bch_fs *c, struct bio *src,
                if (real_src_len > src_len - 4)
                        goto err;
 
-               workspace = mempool_alloc(&c->decompress_workspace, GFP_NOIO);
+               workspace = mempool_alloc(&c->decompress_workspace, GFP_NOFS);
                ctx = zstd_init_dctx(workspace, zstd_dctx_workspace_bound());
 
                ret = zstd_decompress_dctx(ctx,
@@ -382,7 +382,7 @@ static unsigned __bio_compress(struct bch_fs *c,
        dst_data = bio_map_or_bounce(c, dst, WRITE);
        src_data = bio_map_or_bounce(c, src, READ);
 
-       workspace = mempool_alloc(&c->compress_workspace[compression_type], GFP_NOIO);
+       workspace = mempool_alloc(&c->compress_workspace[compression_type], GFP_NOFS);
 
        *src_len = src->bi_iter.bi_size;
        *dst_len = dst->bi_iter.bi_size;
index d1563ca..8981acc 100644 (file)
@@ -47,7 +47,7 @@ static bool bch2_btree_verify_replica(struct bch_fs *c, struct btree *b,
        bio = bio_alloc_bioset(ca->disk_sb.bdev,
                               buf_pages(n_sorted, btree_bytes(c)),
                               REQ_OP_READ|REQ_META,
-                              GFP_NOIO,
+                              GFP_NOFS,
                               &c->btree_bio);
        bio->bi_iter.bi_sector  = pick.ptr.offset;
        bch2_bio_map(bio, n_sorted, btree_bytes(c));
@@ -211,7 +211,7 @@ void bch2_btree_node_ondisk_to_text(struct printbuf *out, struct bch_fs *c,
        bio = bio_alloc_bioset(ca->disk_sb.bdev,
                               buf_pages(n_ondisk, btree_bytes(c)),
                               REQ_OP_READ|REQ_META,
-                              GFP_NOIO,
+                              GFP_NOFS,
                               &c->btree_bio);
        bio->bi_iter.bi_sector  = pick.ptr.offset;
        bch2_bio_map(bio, n_ondisk, btree_bytes(c));
index 439fa54..1c35fa1 100644 (file)
@@ -485,7 +485,7 @@ int bch2_ec_read_extent(struct bch_fs *c, struct bch_read_bio *rbio)
 
        BUG_ON(!rbio->pick.has_ec);
 
-       buf = kzalloc(sizeof(*buf), GFP_NOIO);
+       buf = kzalloc(sizeof(*buf), GFP_NOFS);
        if (!buf)
                return -BCH_ERR_ENOMEM_ec_read_extent;
 
index 11ed864..0f8d529 100644 (file)
@@ -163,7 +163,7 @@ static struct page *__bio_alloc_page_pool(struct bch_fs *c, bool *using_mempool)
        struct page *page;
 
        if (likely(!*using_mempool)) {
-               page = alloc_page(GFP_NOIO);
+               page = alloc_page(GFP_NOFS);
                if (unlikely(!page)) {
                        mutex_lock(&c->bio_bounce_pages_lock);
                        *using_mempool = true;
@@ -172,7 +172,7 @@ static struct page *__bio_alloc_page_pool(struct bch_fs *c, bool *using_mempool)
                }
        } else {
 pool_alloc:
-               page = mempool_alloc(&c->bio_bounce_pages, GFP_NOIO);
+               page = mempool_alloc(&c->bio_bounce_pages, GFP_NOFS);
        }
 
        return page;
@@ -660,7 +660,7 @@ void bch2_submit_wbio_replicas(struct bch_write_bio *wbio, struct bch_fs *c,
 
                if (to_entry(ptr + 1) < ptrs.end) {
                        n = to_wbio(bio_alloc_clone(NULL, &wbio->bio,
-                                               GFP_NOIO, &ca->replica_set));
+                                               GFP_NOFS, &ca->replica_set));
 
                        n->bio.bi_end_io        = wbio->bio.bi_end_io;
                        n->bio.bi_private       = wbio->bio.bi_private;
@@ -976,7 +976,7 @@ static struct bio *bch2_write_bio_alloc(struct bch_fs *c,
        pages = min(pages, BIO_MAX_VECS);
 
        bio = bio_alloc_bioset(NULL, pages, 0,
-                              GFP_NOIO, &c->bio_write);
+                              GFP_NOFS, &c->bio_write);
        wbio                    = wbio_init(bio);
        wbio->put_bio           = true;
        /* copy WRITE_SYNC flag */
@@ -1314,7 +1314,7 @@ static int bch2_write_extent(struct bch_write_op *op, struct write_point *wp,
                BUG_ON(total_output != total_input);
 
                dst = bio_split(src, total_input >> 9,
-                               GFP_NOIO, &c->bio_write);
+                               GFP_NOFS, &c->bio_write);
                wbio_init(dst)->put_bio = true;
                /* copy WRITE_SYNC flag */
                dst->bi_opf             = src->bi_opf;
@@ -2013,7 +2013,7 @@ static struct promote_op *__promote_alloc(struct btree_trans *trans,
        if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_promote))
                return NULL;
 
-       op = kzalloc(sizeof(*op) + sizeof(struct bio_vec) * pages, GFP_NOIO);
+       op = kzalloc(sizeof(*op) + sizeof(struct bio_vec) * pages, GFP_NOFS);
        if (!op)
                goto err;
 
@@ -2026,7 +2026,7 @@ static struct promote_op *__promote_alloc(struct btree_trans *trans,
         */
        *rbio = kzalloc(sizeof(struct bch_read_bio) +
                        sizeof(struct bio_vec) * pages,
-                       GFP_NOIO);
+                       GFP_NOFS);
        if (!*rbio)
                goto err;
 
@@ -2034,7 +2034,7 @@ static struct promote_op *__promote_alloc(struct btree_trans *trans,
        bio_init(&(*rbio)->bio, NULL, (*rbio)->bio.bi_inline_vecs, pages, 0);
 
        if (bch2_bio_alloc_pages(&(*rbio)->bio, sectors << 9,
-                                GFP_NOIO))
+                                GFP_NOFS))
                goto err;
 
        (*rbio)->bounce         = true;
@@ -2746,7 +2746,7 @@ get_bio:
                rbio = rbio_init(bio_alloc_bioset(NULL,
                                                  DIV_ROUND_UP(sectors, PAGE_SECTORS),
                                                  0,
-                                                 GFP_NOIO,
+                                                 GFP_NOFS,
                                                  &c->bio_read_split),
                                 orig->opts);
 
@@ -2762,7 +2762,7 @@ get_bio:
                 * from the whole bio, in which case we don't want to retry and
                 * lose the error)
                 */
-               rbio = rbio_init(bio_alloc_clone(NULL, &orig->bio, GFP_NOIO,
+               rbio = rbio_init(bio_alloc_clone(NULL, &orig->bio, GFP_NOFS,
                                                 &c->bio_read_split),
                                 orig->opts);
                rbio->bio.bi_iter = iter;
index b455ef0..8dc3786 100644 (file)
@@ -1438,7 +1438,7 @@ static void journal_buf_realloc(struct journal *j, struct journal_buf *buf)
        if (buf->buf_size >= new_size)
                return;
 
-       new_buf = kvpmalloc(new_size, GFP_NOIO|__GFP_NOWARN);
+       new_buf = kvpmalloc(new_size, GFP_NOFS|__GFP_NOWARN);
        if (!new_buf)
                return;
 
index 29d843e..2c7f8ac 100644 (file)
@@ -271,7 +271,7 @@ void bch2_journal_do_discards(struct journal *j)
                                blkdev_issue_discard(ca->disk_sb.bdev,
                                        bucket_to_sector(ca,
                                                ja->buckets[ja->discard_idx]),
-                                       ca->mi.bucket_size, GFP_NOIO);
+                                       ca->mi.bucket_size, GFP_NOFS);
 
                        spin_lock(&j->lock);
                        ja->discard_idx = (ja->discard_idx + 1) % ja->nr;
index cf5998e..5699cd4 100644 (file)
@@ -18,7 +18,7 @@ int bch2_keylist_realloc(struct keylist *l, u64 *inline_u64s,
            (old_buf && roundup_pow_of_two(oldsize) == newsize))
                return 0;
 
-       new_keys = krealloc(old_buf, sizeof(u64) * newsize, GFP_NOIO);
+       new_keys = krealloc(old_buf, sizeof(u64) * newsize, GFP_NOFS);
        if (!new_keys)
                return -ENOMEM;