Merge tag 'hyperv-next-signed-20240320' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-2.6-microblaze.git] / fs / ext4 / readpage.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * linux/fs/ext4/readpage.c
4  *
5  * Copyright (C) 2002, Linus Torvalds.
6  * Copyright (C) 2015, Google, Inc.
7  *
8  * This was originally taken from fs/mpage.c
9  *
10  * The ext4_mpage_readpages() function here is intended to
11  * replace mpage_readahead() in the general case, not just for
12  * encrypted files.  It has some limitations (see below), where it
13  * will fall back to read_block_full_page(), but these limitations
14  * should only be hit when page_size != block_size.
15  *
16  * This will allow us to attach a callback function to support ext4
17  * encryption.
18  *
19  * If anything unusual happens, such as:
20  *
21  * - encountering a page which has buffers
22  * - encountering a page which has a non-hole after a hole
23  * - encountering a page with non-contiguous blocks
24  *
25  * then this code just gives up and calls the buffer_head-based read function.
26  * It does handle a page which has holes at the end - that is a common case:
27  * the end-of-file on blocksize < PAGE_SIZE setups.
28  *
29  */
30
31 #include <linux/kernel.h>
32 #include <linux/export.h>
33 #include <linux/mm.h>
34 #include <linux/kdev_t.h>
35 #include <linux/gfp.h>
36 #include <linux/bio.h>
37 #include <linux/fs.h>
38 #include <linux/buffer_head.h>
39 #include <linux/blkdev.h>
40 #include <linux/highmem.h>
41 #include <linux/prefetch.h>
42 #include <linux/mpage.h>
43 #include <linux/writeback.h>
44 #include <linux/backing-dev.h>
45 #include <linux/pagevec.h>
46
47 #include "ext4.h"
48
49 #define NUM_PREALLOC_POST_READ_CTXS     128
50
51 static struct kmem_cache *bio_post_read_ctx_cache;
52 static mempool_t *bio_post_read_ctx_pool;
53
54 /* postprocessing steps for read bios */
55 enum bio_post_read_step {
56         STEP_INITIAL = 0,
57         STEP_DECRYPT,
58         STEP_VERITY,
59         STEP_MAX,
60 };
61
62 struct bio_post_read_ctx {
63         struct bio *bio;
64         struct work_struct work;
65         unsigned int cur_step;
66         unsigned int enabled_steps;
67 };
68
69 static void __read_end_io(struct bio *bio)
70 {
71         struct folio_iter fi;
72
73         bio_for_each_folio_all(fi, bio)
74                 folio_end_read(fi.folio, bio->bi_status == 0);
75         if (bio->bi_private)
76                 mempool_free(bio->bi_private, bio_post_read_ctx_pool);
77         bio_put(bio);
78 }
79
80 static void bio_post_read_processing(struct bio_post_read_ctx *ctx);
81
82 static void decrypt_work(struct work_struct *work)
83 {
84         struct bio_post_read_ctx *ctx =
85                 container_of(work, struct bio_post_read_ctx, work);
86         struct bio *bio = ctx->bio;
87
88         if (fscrypt_decrypt_bio(bio))
89                 bio_post_read_processing(ctx);
90         else
91                 __read_end_io(bio);
92 }
93
94 static void verity_work(struct work_struct *work)
95 {
96         struct bio_post_read_ctx *ctx =
97                 container_of(work, struct bio_post_read_ctx, work);
98         struct bio *bio = ctx->bio;
99
100         /*
101          * fsverity_verify_bio() may call readahead() again, and although verity
102          * will be disabled for that, decryption may still be needed, causing
103          * another bio_post_read_ctx to be allocated.  So to guarantee that
104          * mempool_alloc() never deadlocks we must free the current ctx first.
105          * This is safe because verity is the last post-read step.
106          */
107         BUILD_BUG_ON(STEP_VERITY + 1 != STEP_MAX);
108         mempool_free(ctx, bio_post_read_ctx_pool);
109         bio->bi_private = NULL;
110
111         fsverity_verify_bio(bio);
112
113         __read_end_io(bio);
114 }
115
116 static void bio_post_read_processing(struct bio_post_read_ctx *ctx)
117 {
118         /*
119          * We use different work queues for decryption and for verity because
120          * verity may require reading metadata pages that need decryption, and
121          * we shouldn't recurse to the same workqueue.
122          */
123         switch (++ctx->cur_step) {
124         case STEP_DECRYPT:
125                 if (ctx->enabled_steps & (1 << STEP_DECRYPT)) {
126                         INIT_WORK(&ctx->work, decrypt_work);
127                         fscrypt_enqueue_decrypt_work(&ctx->work);
128                         return;
129                 }
130                 ctx->cur_step++;
131                 fallthrough;
132         case STEP_VERITY:
133                 if (ctx->enabled_steps & (1 << STEP_VERITY)) {
134                         INIT_WORK(&ctx->work, verity_work);
135                         fsverity_enqueue_verify_work(&ctx->work);
136                         return;
137                 }
138                 ctx->cur_step++;
139                 fallthrough;
140         default:
141                 __read_end_io(ctx->bio);
142         }
143 }
144
145 static bool bio_post_read_required(struct bio *bio)
146 {
147         return bio->bi_private && !bio->bi_status;
148 }
149
150 /*
151  * I/O completion handler for multipage BIOs.
152  *
153  * The mpage code never puts partial pages into a BIO (except for end-of-file).
154  * If a page does not map to a contiguous run of blocks then it simply falls
155  * back to block_read_full_folio().
156  *
157  * Why is this?  If a page's completion depends on a number of different BIOs
158  * which can complete in any order (or at the same time) then determining the
159  * status of that page is hard.  See end_buffer_async_read() for the details.
160  * There is no point in duplicating all that complexity.
161  */
162 static void mpage_end_io(struct bio *bio)
163 {
164         if (bio_post_read_required(bio)) {
165                 struct bio_post_read_ctx *ctx = bio->bi_private;
166
167                 ctx->cur_step = STEP_INITIAL;
168                 bio_post_read_processing(ctx);
169                 return;
170         }
171         __read_end_io(bio);
172 }
173
174 static inline bool ext4_need_verity(const struct inode *inode, pgoff_t idx)
175 {
176         return fsverity_active(inode) &&
177                idx < DIV_ROUND_UP(inode->i_size, PAGE_SIZE);
178 }
179
180 static void ext4_set_bio_post_read_ctx(struct bio *bio,
181                                        const struct inode *inode,
182                                        pgoff_t first_idx)
183 {
184         unsigned int post_read_steps = 0;
185
186         if (fscrypt_inode_uses_fs_layer_crypto(inode))
187                 post_read_steps |= 1 << STEP_DECRYPT;
188
189         if (ext4_need_verity(inode, first_idx))
190                 post_read_steps |= 1 << STEP_VERITY;
191
192         if (post_read_steps) {
193                 /* Due to the mempool, this never fails. */
194                 struct bio_post_read_ctx *ctx =
195                         mempool_alloc(bio_post_read_ctx_pool, GFP_NOFS);
196
197                 ctx->bio = bio;
198                 ctx->enabled_steps = post_read_steps;
199                 bio->bi_private = ctx;
200         }
201 }
202
203 static inline loff_t ext4_readpage_limit(struct inode *inode)
204 {
205         if (IS_ENABLED(CONFIG_FS_VERITY) && IS_VERITY(inode))
206                 return inode->i_sb->s_maxbytes;
207
208         return i_size_read(inode);
209 }
210
211 int ext4_mpage_readpages(struct inode *inode,
212                 struct readahead_control *rac, struct folio *folio)
213 {
214         struct bio *bio = NULL;
215         sector_t last_block_in_bio = 0;
216
217         const unsigned blkbits = inode->i_blkbits;
218         const unsigned blocks_per_page = PAGE_SIZE >> blkbits;
219         const unsigned blocksize = 1 << blkbits;
220         sector_t next_block;
221         sector_t block_in_file;
222         sector_t last_block;
223         sector_t last_block_in_file;
224         sector_t blocks[MAX_BUF_PER_PAGE];
225         unsigned page_block;
226         struct block_device *bdev = inode->i_sb->s_bdev;
227         int length;
228         unsigned relative_block = 0;
229         struct ext4_map_blocks map;
230         unsigned int nr_pages = rac ? readahead_count(rac) : 1;
231
232         map.m_pblk = 0;
233         map.m_lblk = 0;
234         map.m_len = 0;
235         map.m_flags = 0;
236
237         for (; nr_pages; nr_pages--) {
238                 int fully_mapped = 1;
239                 unsigned first_hole = blocks_per_page;
240
241                 if (rac)
242                         folio = readahead_folio(rac);
243                 prefetchw(&folio->flags);
244
245                 if (folio_buffers(folio))
246                         goto confused;
247
248                 block_in_file = next_block =
249                         (sector_t)folio->index << (PAGE_SHIFT - blkbits);
250                 last_block = block_in_file + nr_pages * blocks_per_page;
251                 last_block_in_file = (ext4_readpage_limit(inode) +
252                                       blocksize - 1) >> blkbits;
253                 if (last_block > last_block_in_file)
254                         last_block = last_block_in_file;
255                 page_block = 0;
256
257                 /*
258                  * Map blocks using the previous result first.
259                  */
260                 if ((map.m_flags & EXT4_MAP_MAPPED) &&
261                     block_in_file > map.m_lblk &&
262                     block_in_file < (map.m_lblk + map.m_len)) {
263                         unsigned map_offset = block_in_file - map.m_lblk;
264                         unsigned last = map.m_len - map_offset;
265
266                         for (relative_block = 0; ; relative_block++) {
267                                 if (relative_block == last) {
268                                         /* needed? */
269                                         map.m_flags &= ~EXT4_MAP_MAPPED;
270                                         break;
271                                 }
272                                 if (page_block == blocks_per_page)
273                                         break;
274                                 blocks[page_block] = map.m_pblk + map_offset +
275                                         relative_block;
276                                 page_block++;
277                                 block_in_file++;
278                         }
279                 }
280
281                 /*
282                  * Then do more ext4_map_blocks() calls until we are
283                  * done with this folio.
284                  */
285                 while (page_block < blocks_per_page) {
286                         if (block_in_file < last_block) {
287                                 map.m_lblk = block_in_file;
288                                 map.m_len = last_block - block_in_file;
289
290                                 if (ext4_map_blocks(NULL, inode, &map, 0) < 0) {
291                                 set_error_page:
292                                         folio_set_error(folio);
293                                         folio_zero_segment(folio, 0,
294                                                           folio_size(folio));
295                                         folio_unlock(folio);
296                                         goto next_page;
297                                 }
298                         }
299                         if ((map.m_flags & EXT4_MAP_MAPPED) == 0) {
300                                 fully_mapped = 0;
301                                 if (first_hole == blocks_per_page)
302                                         first_hole = page_block;
303                                 page_block++;
304                                 block_in_file++;
305                                 continue;
306                         }
307                         if (first_hole != blocks_per_page)
308                                 goto confused;          /* hole -> non-hole */
309
310                         /* Contiguous blocks? */
311                         if (page_block && blocks[page_block-1] != map.m_pblk-1)
312                                 goto confused;
313                         for (relative_block = 0; ; relative_block++) {
314                                 if (relative_block == map.m_len) {
315                                         /* needed? */
316                                         map.m_flags &= ~EXT4_MAP_MAPPED;
317                                         break;
318                                 } else if (page_block == blocks_per_page)
319                                         break;
320                                 blocks[page_block] = map.m_pblk+relative_block;
321                                 page_block++;
322                                 block_in_file++;
323                         }
324                 }
325                 if (first_hole != blocks_per_page) {
326                         folio_zero_segment(folio, first_hole << blkbits,
327                                           folio_size(folio));
328                         if (first_hole == 0) {
329                                 if (ext4_need_verity(inode, folio->index) &&
330                                     !fsverity_verify_folio(folio))
331                                         goto set_error_page;
332                                 folio_end_read(folio, true);
333                                 continue;
334                         }
335                 } else if (fully_mapped) {
336                         folio_set_mappedtodisk(folio);
337                 }
338
339                 /*
340                  * This folio will go to BIO.  Do we need to send this
341                  * BIO off first?
342                  */
343                 if (bio && (last_block_in_bio != blocks[0] - 1 ||
344                             !fscrypt_mergeable_bio(bio, inode, next_block))) {
345                 submit_and_realloc:
346                         submit_bio(bio);
347                         bio = NULL;
348                 }
349                 if (bio == NULL) {
350                         /*
351                          * bio_alloc will _always_ be able to allocate a bio if
352                          * __GFP_DIRECT_RECLAIM is set, see bio_alloc_bioset().
353                          */
354                         bio = bio_alloc(bdev, bio_max_segs(nr_pages),
355                                         REQ_OP_READ, GFP_KERNEL);
356                         fscrypt_set_bio_crypt_ctx(bio, inode, next_block,
357                                                   GFP_KERNEL);
358                         ext4_set_bio_post_read_ctx(bio, inode, folio->index);
359                         bio->bi_iter.bi_sector = blocks[0] << (blkbits - 9);
360                         bio->bi_end_io = mpage_end_io;
361                         if (rac)
362                                 bio->bi_opf |= REQ_RAHEAD;
363                 }
364
365                 length = first_hole << blkbits;
366                 if (!bio_add_folio(bio, folio, length, 0))
367                         goto submit_and_realloc;
368
369                 if (((map.m_flags & EXT4_MAP_BOUNDARY) &&
370                      (relative_block == map.m_len)) ||
371                     (first_hole != blocks_per_page)) {
372                         submit_bio(bio);
373                         bio = NULL;
374                 } else
375                         last_block_in_bio = blocks[blocks_per_page - 1];
376                 continue;
377         confused:
378                 if (bio) {
379                         submit_bio(bio);
380                         bio = NULL;
381                 }
382                 if (!folio_test_uptodate(folio))
383                         block_read_full_folio(folio, ext4_get_block);
384                 else
385                         folio_unlock(folio);
386 next_page:
387                 ; /* A label shall be followed by a statement until C23 */
388         }
389         if (bio)
390                 submit_bio(bio);
391         return 0;
392 }
393
394 int __init ext4_init_post_read_processing(void)
395 {
396         bio_post_read_ctx_cache = KMEM_CACHE(bio_post_read_ctx, SLAB_RECLAIM_ACCOUNT);
397
398         if (!bio_post_read_ctx_cache)
399                 goto fail;
400         bio_post_read_ctx_pool =
401                 mempool_create_slab_pool(NUM_PREALLOC_POST_READ_CTXS,
402                                          bio_post_read_ctx_cache);
403         if (!bio_post_read_ctx_pool)
404                 goto fail_free_cache;
405         return 0;
406
407 fail_free_cache:
408         kmem_cache_destroy(bio_post_read_ctx_cache);
409 fail:
410         return -ENOMEM;
411 }
412
413 void ext4_exit_post_read_processing(void)
414 {
415         mempool_destroy(bio_post_read_ctx_pool);
416         kmem_cache_destroy(bio_post_read_ctx_cache);
417 }