Merge branch 'regmap-5.6' into regmap-next
[linux-2.6-microblaze.git] / fs / ext4 / readpage.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * linux/fs/ext4/readpage.c
4  *
5  * Copyright (C) 2002, Linus Torvalds.
6  * Copyright (C) 2015, Google, Inc.
7  *
8  * This was originally taken from fs/mpage.c
9  *
10  * The intent is the ext4_mpage_readpages() function here is intended
11  * to replace mpage_readpages() in the general case, not just for
12  * encrypted files.  It has some limitations (see below), where it
13  * will fall back to read_block_full_page(), but these limitations
14  * should only be hit when page_size != block_size.
15  *
16  * This will allow us to attach a callback function to support ext4
17  * encryption.
18  *
19  * If anything unusual happens, such as:
20  *
21  * - encountering a page which has buffers
22  * - encountering a page which has a non-hole after a hole
23  * - encountering a page with non-contiguous blocks
24  *
25  * then this code just gives up and calls the buffer_head-based read function.
26  * It does handle a page which has holes at the end - that is a common case:
27  * the end-of-file on blocksize < PAGE_SIZE setups.
28  *
29  */
30
31 #include <linux/kernel.h>
32 #include <linux/export.h>
33 #include <linux/mm.h>
34 #include <linux/kdev_t.h>
35 #include <linux/gfp.h>
36 #include <linux/bio.h>
37 #include <linux/fs.h>
38 #include <linux/buffer_head.h>
39 #include <linux/blkdev.h>
40 #include <linux/highmem.h>
41 #include <linux/prefetch.h>
42 #include <linux/mpage.h>
43 #include <linux/writeback.h>
44 #include <linux/backing-dev.h>
45 #include <linux/pagevec.h>
46 #include <linux/cleancache.h>
47
48 #include "ext4.h"
49
50 #define NUM_PREALLOC_POST_READ_CTXS     128
51
52 static struct kmem_cache *bio_post_read_ctx_cache;
53 static mempool_t *bio_post_read_ctx_pool;
54
55 /* postprocessing steps for read bios */
56 enum bio_post_read_step {
57         STEP_INITIAL = 0,
58         STEP_DECRYPT,
59         STEP_VERITY,
60 };
61
62 struct bio_post_read_ctx {
63         struct bio *bio;
64         struct work_struct work;
65         unsigned int cur_step;
66         unsigned int enabled_steps;
67 };
68
69 static void __read_end_io(struct bio *bio)
70 {
71         struct page *page;
72         struct bio_vec *bv;
73         struct bvec_iter_all iter_all;
74
75         bio_for_each_segment_all(bv, bio, iter_all) {
76                 page = bv->bv_page;
77
78                 /* PG_error was set if any post_read step failed */
79                 if (bio->bi_status || PageError(page)) {
80                         ClearPageUptodate(page);
81                         /* will re-read again later */
82                         ClearPageError(page);
83                 } else {
84                         SetPageUptodate(page);
85                 }
86                 unlock_page(page);
87         }
88         if (bio->bi_private)
89                 mempool_free(bio->bi_private, bio_post_read_ctx_pool);
90         bio_put(bio);
91 }
92
93 static void bio_post_read_processing(struct bio_post_read_ctx *ctx);
94
95 static void decrypt_work(struct work_struct *work)
96 {
97         struct bio_post_read_ctx *ctx =
98                 container_of(work, struct bio_post_read_ctx, work);
99
100         fscrypt_decrypt_bio(ctx->bio);
101
102         bio_post_read_processing(ctx);
103 }
104
105 static void verity_work(struct work_struct *work)
106 {
107         struct bio_post_read_ctx *ctx =
108                 container_of(work, struct bio_post_read_ctx, work);
109
110         fsverity_verify_bio(ctx->bio);
111
112         bio_post_read_processing(ctx);
113 }
114
115 static void bio_post_read_processing(struct bio_post_read_ctx *ctx)
116 {
117         /*
118          * We use different work queues for decryption and for verity because
119          * verity may require reading metadata pages that need decryption, and
120          * we shouldn't recurse to the same workqueue.
121          */
122         switch (++ctx->cur_step) {
123         case STEP_DECRYPT:
124                 if (ctx->enabled_steps & (1 << STEP_DECRYPT)) {
125                         INIT_WORK(&ctx->work, decrypt_work);
126                         fscrypt_enqueue_decrypt_work(&ctx->work);
127                         return;
128                 }
129                 ctx->cur_step++;
130                 /* fall-through */
131         case STEP_VERITY:
132                 if (ctx->enabled_steps & (1 << STEP_VERITY)) {
133                         INIT_WORK(&ctx->work, verity_work);
134                         fsverity_enqueue_verify_work(&ctx->work);
135                         return;
136                 }
137                 ctx->cur_step++;
138                 /* fall-through */
139         default:
140                 __read_end_io(ctx->bio);
141         }
142 }
143
144 static bool bio_post_read_required(struct bio *bio)
145 {
146         return bio->bi_private && !bio->bi_status;
147 }
148
149 /*
150  * I/O completion handler for multipage BIOs.
151  *
152  * The mpage code never puts partial pages into a BIO (except for end-of-file).
153  * If a page does not map to a contiguous run of blocks then it simply falls
154  * back to block_read_full_page().
155  *
156  * Why is this?  If a page's completion depends on a number of different BIOs
157  * which can complete in any order (or at the same time) then determining the
158  * status of that page is hard.  See end_buffer_async_read() for the details.
159  * There is no point in duplicating all that complexity.
160  */
161 static void mpage_end_io(struct bio *bio)
162 {
163         if (bio_post_read_required(bio)) {
164                 struct bio_post_read_ctx *ctx = bio->bi_private;
165
166                 ctx->cur_step = STEP_INITIAL;
167                 bio_post_read_processing(ctx);
168                 return;
169         }
170         __read_end_io(bio);
171 }
172
173 static inline bool ext4_need_verity(const struct inode *inode, pgoff_t idx)
174 {
175         return fsverity_active(inode) &&
176                idx < DIV_ROUND_UP(inode->i_size, PAGE_SIZE);
177 }
178
179 static struct bio_post_read_ctx *get_bio_post_read_ctx(struct inode *inode,
180                                                        struct bio *bio,
181                                                        pgoff_t first_idx)
182 {
183         unsigned int post_read_steps = 0;
184         struct bio_post_read_ctx *ctx = NULL;
185
186         if (IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode))
187                 post_read_steps |= 1 << STEP_DECRYPT;
188
189         if (ext4_need_verity(inode, first_idx))
190                 post_read_steps |= 1 << STEP_VERITY;
191
192         if (post_read_steps) {
193                 ctx = mempool_alloc(bio_post_read_ctx_pool, GFP_NOFS);
194                 if (!ctx)
195                         return ERR_PTR(-ENOMEM);
196                 ctx->bio = bio;
197                 ctx->enabled_steps = post_read_steps;
198                 bio->bi_private = ctx;
199         }
200         return ctx;
201 }
202
203 static inline loff_t ext4_readpage_limit(struct inode *inode)
204 {
205         if (IS_ENABLED(CONFIG_FS_VERITY) &&
206             (IS_VERITY(inode) || ext4_verity_in_progress(inode)))
207                 return inode->i_sb->s_maxbytes;
208
209         return i_size_read(inode);
210 }
211
212 int ext4_mpage_readpages(struct address_space *mapping,
213                          struct list_head *pages, struct page *page,
214                          unsigned nr_pages, bool is_readahead)
215 {
216         struct bio *bio = NULL;
217         sector_t last_block_in_bio = 0;
218
219         struct inode *inode = mapping->host;
220         const unsigned blkbits = inode->i_blkbits;
221         const unsigned blocks_per_page = PAGE_SIZE >> blkbits;
222         const unsigned blocksize = 1 << blkbits;
223         sector_t block_in_file;
224         sector_t last_block;
225         sector_t last_block_in_file;
226         sector_t blocks[MAX_BUF_PER_PAGE];
227         unsigned page_block;
228         struct block_device *bdev = inode->i_sb->s_bdev;
229         int length;
230         unsigned relative_block = 0;
231         struct ext4_map_blocks map;
232
233         map.m_pblk = 0;
234         map.m_lblk = 0;
235         map.m_len = 0;
236         map.m_flags = 0;
237
238         for (; nr_pages; nr_pages--) {
239                 int fully_mapped = 1;
240                 unsigned first_hole = blocks_per_page;
241
242                 if (pages) {
243                         page = lru_to_page(pages);
244
245                         prefetchw(&page->flags);
246                         list_del(&page->lru);
247                         if (add_to_page_cache_lru(page, mapping, page->index,
248                                   readahead_gfp_mask(mapping)))
249                                 goto next_page;
250                 }
251
252                 if (page_has_buffers(page))
253                         goto confused;
254
255                 block_in_file = (sector_t)page->index << (PAGE_SHIFT - blkbits);
256                 last_block = block_in_file + nr_pages * blocks_per_page;
257                 last_block_in_file = (ext4_readpage_limit(inode) +
258                                       blocksize - 1) >> blkbits;
259                 if (last_block > last_block_in_file)
260                         last_block = last_block_in_file;
261                 page_block = 0;
262
263                 /*
264                  * Map blocks using the previous result first.
265                  */
266                 if ((map.m_flags & EXT4_MAP_MAPPED) &&
267                     block_in_file > map.m_lblk &&
268                     block_in_file < (map.m_lblk + map.m_len)) {
269                         unsigned map_offset = block_in_file - map.m_lblk;
270                         unsigned last = map.m_len - map_offset;
271
272                         for (relative_block = 0; ; relative_block++) {
273                                 if (relative_block == last) {
274                                         /* needed? */
275                                         map.m_flags &= ~EXT4_MAP_MAPPED;
276                                         break;
277                                 }
278                                 if (page_block == blocks_per_page)
279                                         break;
280                                 blocks[page_block] = map.m_pblk + map_offset +
281                                         relative_block;
282                                 page_block++;
283                                 block_in_file++;
284                         }
285                 }
286
287                 /*
288                  * Then do more ext4_map_blocks() calls until we are
289                  * done with this page.
290                  */
291                 while (page_block < blocks_per_page) {
292                         if (block_in_file < last_block) {
293                                 map.m_lblk = block_in_file;
294                                 map.m_len = last_block - block_in_file;
295
296                                 if (ext4_map_blocks(NULL, inode, &map, 0) < 0) {
297                                 set_error_page:
298                                         SetPageError(page);
299                                         zero_user_segment(page, 0,
300                                                           PAGE_SIZE);
301                                         unlock_page(page);
302                                         goto next_page;
303                                 }
304                         }
305                         if ((map.m_flags & EXT4_MAP_MAPPED) == 0) {
306                                 fully_mapped = 0;
307                                 if (first_hole == blocks_per_page)
308                                         first_hole = page_block;
309                                 page_block++;
310                                 block_in_file++;
311                                 continue;
312                         }
313                         if (first_hole != blocks_per_page)
314                                 goto confused;          /* hole -> non-hole */
315
316                         /* Contiguous blocks? */
317                         if (page_block && blocks[page_block-1] != map.m_pblk-1)
318                                 goto confused;
319                         for (relative_block = 0; ; relative_block++) {
320                                 if (relative_block == map.m_len) {
321                                         /* needed? */
322                                         map.m_flags &= ~EXT4_MAP_MAPPED;
323                                         break;
324                                 } else if (page_block == blocks_per_page)
325                                         break;
326                                 blocks[page_block] = map.m_pblk+relative_block;
327                                 page_block++;
328                                 block_in_file++;
329                         }
330                 }
331                 if (first_hole != blocks_per_page) {
332                         zero_user_segment(page, first_hole << blkbits,
333                                           PAGE_SIZE);
334                         if (first_hole == 0) {
335                                 if (ext4_need_verity(inode, page->index) &&
336                                     !fsverity_verify_page(page))
337                                         goto set_error_page;
338                                 SetPageUptodate(page);
339                                 unlock_page(page);
340                                 goto next_page;
341                         }
342                 } else if (fully_mapped) {
343                         SetPageMappedToDisk(page);
344                 }
345                 if (fully_mapped && blocks_per_page == 1 &&
346                     !PageUptodate(page) && cleancache_get_page(page) == 0) {
347                         SetPageUptodate(page);
348                         goto confused;
349                 }
350
351                 /*
352                  * This page will go to BIO.  Do we need to send this
353                  * BIO off first?
354                  */
355                 if (bio && (last_block_in_bio != blocks[0] - 1)) {
356                 submit_and_realloc:
357                         submit_bio(bio);
358                         bio = NULL;
359                 }
360                 if (bio == NULL) {
361                         struct bio_post_read_ctx *ctx;
362
363                         /*
364                          * bio_alloc will _always_ be able to allocate a bio if
365                          * __GFP_DIRECT_RECLAIM is set, see bio_alloc_bioset().
366                          */
367                         bio = bio_alloc(GFP_KERNEL,
368                                 min_t(int, nr_pages, BIO_MAX_PAGES));
369                         ctx = get_bio_post_read_ctx(inode, bio, page->index);
370                         if (IS_ERR(ctx)) {
371                                 bio_put(bio);
372                                 bio = NULL;
373                                 goto set_error_page;
374                         }
375                         bio_set_dev(bio, bdev);
376                         bio->bi_iter.bi_sector = blocks[0] << (blkbits - 9);
377                         bio->bi_end_io = mpage_end_io;
378                         bio->bi_private = ctx;
379                         bio_set_op_attrs(bio, REQ_OP_READ,
380                                                 is_readahead ? REQ_RAHEAD : 0);
381                 }
382
383                 length = first_hole << blkbits;
384                 if (bio_add_page(bio, page, length, 0) < length)
385                         goto submit_and_realloc;
386
387                 if (((map.m_flags & EXT4_MAP_BOUNDARY) &&
388                      (relative_block == map.m_len)) ||
389                     (first_hole != blocks_per_page)) {
390                         submit_bio(bio);
391                         bio = NULL;
392                 } else
393                         last_block_in_bio = blocks[blocks_per_page - 1];
394                 goto next_page;
395         confused:
396                 if (bio) {
397                         submit_bio(bio);
398                         bio = NULL;
399                 }
400                 if (!PageUptodate(page))
401                         block_read_full_page(page, ext4_get_block);
402                 else
403                         unlock_page(page);
404         next_page:
405                 if (pages)
406                         put_page(page);
407         }
408         BUG_ON(pages && !list_empty(pages));
409         if (bio)
410                 submit_bio(bio);
411         return 0;
412 }
413
414 int __init ext4_init_post_read_processing(void)
415 {
416         bio_post_read_ctx_cache =
417                 kmem_cache_create("ext4_bio_post_read_ctx",
418                                   sizeof(struct bio_post_read_ctx), 0, 0, NULL);
419         if (!bio_post_read_ctx_cache)
420                 goto fail;
421         bio_post_read_ctx_pool =
422                 mempool_create_slab_pool(NUM_PREALLOC_POST_READ_CTXS,
423                                          bio_post_read_ctx_cache);
424         if (!bio_post_read_ctx_pool)
425                 goto fail_free_cache;
426         return 0;
427
428 fail_free_cache:
429         kmem_cache_destroy(bio_post_read_ctx_cache);
430 fail:
431         return -ENOMEM;
432 }
433
434 void ext4_exit_post_read_processing(void)
435 {
436         mempool_destroy(bio_post_read_ctx_pool);
437         kmem_cache_destroy(bio_post_read_ctx_cache);
438 }