Merge branch 'for-linus-5.3' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad...
[linux-2.6-microblaze.git] / fs / nilfs2 / page.c
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * page.c - buffer/page management specific to NILFS
4  *
5  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
6  *
7  * Written by Ryusuke Konishi and Seiji Kihara.
8  */
9
10 #include <linux/pagemap.h>
11 #include <linux/writeback.h>
12 #include <linux/swap.h>
13 #include <linux/bitops.h>
14 #include <linux/page-flags.h>
15 #include <linux/list.h>
16 #include <linux/highmem.h>
17 #include <linux/pagevec.h>
18 #include <linux/gfp.h>
19 #include "nilfs.h"
20 #include "page.h"
21 #include "mdt.h"
22
23
24 #define NILFS_BUFFER_INHERENT_BITS                                      \
25         (BIT(BH_Uptodate) | BIT(BH_Mapped) | BIT(BH_NILFS_Node) |       \
26          BIT(BH_NILFS_Volatile) | BIT(BH_NILFS_Checked))
27
28 static struct buffer_head *
29 __nilfs_get_page_block(struct page *page, unsigned long block, pgoff_t index,
30                        int blkbits, unsigned long b_state)
31
32 {
33         unsigned long first_block;
34         struct buffer_head *bh;
35
36         if (!page_has_buffers(page))
37                 create_empty_buffers(page, 1 << blkbits, b_state);
38
39         first_block = (unsigned long)index << (PAGE_SHIFT - blkbits);
40         bh = nilfs_page_get_nth_block(page, block - first_block);
41
42         touch_buffer(bh);
43         wait_on_buffer(bh);
44         return bh;
45 }
46
47 struct buffer_head *nilfs_grab_buffer(struct inode *inode,
48                                       struct address_space *mapping,
49                                       unsigned long blkoff,
50                                       unsigned long b_state)
51 {
52         int blkbits = inode->i_blkbits;
53         pgoff_t index = blkoff >> (PAGE_SHIFT - blkbits);
54         struct page *page;
55         struct buffer_head *bh;
56
57         page = grab_cache_page(mapping, index);
58         if (unlikely(!page))
59                 return NULL;
60
61         bh = __nilfs_get_page_block(page, blkoff, index, blkbits, b_state);
62         if (unlikely(!bh)) {
63                 unlock_page(page);
64                 put_page(page);
65                 return NULL;
66         }
67         return bh;
68 }
69
70 /**
71  * nilfs_forget_buffer - discard dirty state
72  * @inode: owner inode of the buffer
73  * @bh: buffer head of the buffer to be discarded
74  */
75 void nilfs_forget_buffer(struct buffer_head *bh)
76 {
77         struct page *page = bh->b_page;
78         const unsigned long clear_bits =
79                 (BIT(BH_Uptodate) | BIT(BH_Dirty) | BIT(BH_Mapped) |
80                  BIT(BH_Async_Write) | BIT(BH_NILFS_Volatile) |
81                  BIT(BH_NILFS_Checked) | BIT(BH_NILFS_Redirected));
82
83         lock_buffer(bh);
84         set_mask_bits(&bh->b_state, clear_bits, 0);
85         if (nilfs_page_buffers_clean(page))
86                 __nilfs_clear_page_dirty(page);
87
88         bh->b_blocknr = -1;
89         ClearPageUptodate(page);
90         ClearPageMappedToDisk(page);
91         unlock_buffer(bh);
92         brelse(bh);
93 }
94
95 /**
96  * nilfs_copy_buffer -- copy buffer data and flags
97  * @dbh: destination buffer
98  * @sbh: source buffer
99  */
100 void nilfs_copy_buffer(struct buffer_head *dbh, struct buffer_head *sbh)
101 {
102         void *kaddr0, *kaddr1;
103         unsigned long bits;
104         struct page *spage = sbh->b_page, *dpage = dbh->b_page;
105         struct buffer_head *bh;
106
107         kaddr0 = kmap_atomic(spage);
108         kaddr1 = kmap_atomic(dpage);
109         memcpy(kaddr1 + bh_offset(dbh), kaddr0 + bh_offset(sbh), sbh->b_size);
110         kunmap_atomic(kaddr1);
111         kunmap_atomic(kaddr0);
112
113         dbh->b_state = sbh->b_state & NILFS_BUFFER_INHERENT_BITS;
114         dbh->b_blocknr = sbh->b_blocknr;
115         dbh->b_bdev = sbh->b_bdev;
116
117         bh = dbh;
118         bits = sbh->b_state & (BIT(BH_Uptodate) | BIT(BH_Mapped));
119         while ((bh = bh->b_this_page) != dbh) {
120                 lock_buffer(bh);
121                 bits &= bh->b_state;
122                 unlock_buffer(bh);
123         }
124         if (bits & BIT(BH_Uptodate))
125                 SetPageUptodate(dpage);
126         else
127                 ClearPageUptodate(dpage);
128         if (bits & BIT(BH_Mapped))
129                 SetPageMappedToDisk(dpage);
130         else
131                 ClearPageMappedToDisk(dpage);
132 }
133
134 /**
135  * nilfs_page_buffers_clean - check if a page has dirty buffers or not.
136  * @page: page to be checked
137  *
138  * nilfs_page_buffers_clean() returns zero if the page has dirty buffers.
139  * Otherwise, it returns non-zero value.
140  */
141 int nilfs_page_buffers_clean(struct page *page)
142 {
143         struct buffer_head *bh, *head;
144
145         bh = head = page_buffers(page);
146         do {
147                 if (buffer_dirty(bh))
148                         return 0;
149                 bh = bh->b_this_page;
150         } while (bh != head);
151         return 1;
152 }
153
154 void nilfs_page_bug(struct page *page)
155 {
156         struct address_space *m;
157         unsigned long ino;
158
159         if (unlikely(!page)) {
160                 printk(KERN_CRIT "NILFS_PAGE_BUG(NULL)\n");
161                 return;
162         }
163
164         m = page->mapping;
165         ino = m ? m->host->i_ino : 0;
166
167         printk(KERN_CRIT "NILFS_PAGE_BUG(%p): cnt=%d index#=%llu flags=0x%lx "
168                "mapping=%p ino=%lu\n",
169                page, page_ref_count(page),
170                (unsigned long long)page->index, page->flags, m, ino);
171
172         if (page_has_buffers(page)) {
173                 struct buffer_head *bh, *head;
174                 int i = 0;
175
176                 bh = head = page_buffers(page);
177                 do {
178                         printk(KERN_CRIT
179                                " BH[%d] %p: cnt=%d block#=%llu state=0x%lx\n",
180                                i++, bh, atomic_read(&bh->b_count),
181                                (unsigned long long)bh->b_blocknr, bh->b_state);
182                         bh = bh->b_this_page;
183                 } while (bh != head);
184         }
185 }
186
187 /**
188  * nilfs_copy_page -- copy the page with buffers
189  * @dst: destination page
190  * @src: source page
191  * @copy_dirty: flag whether to copy dirty states on the page's buffer heads.
192  *
193  * This function is for both data pages and btnode pages.  The dirty flag
194  * should be treated by caller.  The page must not be under i/o.
195  * Both src and dst page must be locked
196  */
197 static void nilfs_copy_page(struct page *dst, struct page *src, int copy_dirty)
198 {
199         struct buffer_head *dbh, *dbufs, *sbh, *sbufs;
200         unsigned long mask = NILFS_BUFFER_INHERENT_BITS;
201
202         BUG_ON(PageWriteback(dst));
203
204         sbh = sbufs = page_buffers(src);
205         if (!page_has_buffers(dst))
206                 create_empty_buffers(dst, sbh->b_size, 0);
207
208         if (copy_dirty)
209                 mask |= BIT(BH_Dirty);
210
211         dbh = dbufs = page_buffers(dst);
212         do {
213                 lock_buffer(sbh);
214                 lock_buffer(dbh);
215                 dbh->b_state = sbh->b_state & mask;
216                 dbh->b_blocknr = sbh->b_blocknr;
217                 dbh->b_bdev = sbh->b_bdev;
218                 sbh = sbh->b_this_page;
219                 dbh = dbh->b_this_page;
220         } while (dbh != dbufs);
221
222         copy_highpage(dst, src);
223
224         if (PageUptodate(src) && !PageUptodate(dst))
225                 SetPageUptodate(dst);
226         else if (!PageUptodate(src) && PageUptodate(dst))
227                 ClearPageUptodate(dst);
228         if (PageMappedToDisk(src) && !PageMappedToDisk(dst))
229                 SetPageMappedToDisk(dst);
230         else if (!PageMappedToDisk(src) && PageMappedToDisk(dst))
231                 ClearPageMappedToDisk(dst);
232
233         do {
234                 unlock_buffer(sbh);
235                 unlock_buffer(dbh);
236                 sbh = sbh->b_this_page;
237                 dbh = dbh->b_this_page;
238         } while (dbh != dbufs);
239 }
240
241 int nilfs_copy_dirty_pages(struct address_space *dmap,
242                            struct address_space *smap)
243 {
244         struct pagevec pvec;
245         unsigned int i;
246         pgoff_t index = 0;
247         int err = 0;
248
249         pagevec_init(&pvec);
250 repeat:
251         if (!pagevec_lookup_tag(&pvec, smap, &index, PAGECACHE_TAG_DIRTY))
252                 return 0;
253
254         for (i = 0; i < pagevec_count(&pvec); i++) {
255                 struct page *page = pvec.pages[i], *dpage;
256
257                 lock_page(page);
258                 if (unlikely(!PageDirty(page)))
259                         NILFS_PAGE_BUG(page, "inconsistent dirty state");
260
261                 dpage = grab_cache_page(dmap, page->index);
262                 if (unlikely(!dpage)) {
263                         /* No empty page is added to the page cache */
264                         err = -ENOMEM;
265                         unlock_page(page);
266                         break;
267                 }
268                 if (unlikely(!page_has_buffers(page)))
269                         NILFS_PAGE_BUG(page,
270                                        "found empty page in dat page cache");
271
272                 nilfs_copy_page(dpage, page, 1);
273                 __set_page_dirty_nobuffers(dpage);
274
275                 unlock_page(dpage);
276                 put_page(dpage);
277                 unlock_page(page);
278         }
279         pagevec_release(&pvec);
280         cond_resched();
281
282         if (likely(!err))
283                 goto repeat;
284         return err;
285 }
286
287 /**
288  * nilfs_copy_back_pages -- copy back pages to original cache from shadow cache
289  * @dmap: destination page cache
290  * @smap: source page cache
291  *
292  * No pages must be added to the cache during this process.
293  * This must be ensured by the caller.
294  */
295 void nilfs_copy_back_pages(struct address_space *dmap,
296                            struct address_space *smap)
297 {
298         struct pagevec pvec;
299         unsigned int i, n;
300         pgoff_t index = 0;
301
302         pagevec_init(&pvec);
303 repeat:
304         n = pagevec_lookup(&pvec, smap, &index);
305         if (!n)
306                 return;
307
308         for (i = 0; i < pagevec_count(&pvec); i++) {
309                 struct page *page = pvec.pages[i], *dpage;
310                 pgoff_t offset = page->index;
311
312                 lock_page(page);
313                 dpage = find_lock_page(dmap, offset);
314                 if (dpage) {
315                         /* overwrite existing page in the destination cache */
316                         WARN_ON(PageDirty(dpage));
317                         nilfs_copy_page(dpage, page, 0);
318                         unlock_page(dpage);
319                         put_page(dpage);
320                         /* Do we not need to remove page from smap here? */
321                 } else {
322                         struct page *p;
323
324                         /* move the page to the destination cache */
325                         xa_lock_irq(&smap->i_pages);
326                         p = __xa_erase(&smap->i_pages, offset);
327                         WARN_ON(page != p);
328                         smap->nrpages--;
329                         xa_unlock_irq(&smap->i_pages);
330
331                         xa_lock_irq(&dmap->i_pages);
332                         p = __xa_store(&dmap->i_pages, offset, page, GFP_NOFS);
333                         if (unlikely(p)) {
334                                 /* Probably -ENOMEM */
335                                 page->mapping = NULL;
336                                 put_page(page);
337                         } else {
338                                 page->mapping = dmap;
339                                 dmap->nrpages++;
340                                 if (PageDirty(page))
341                                         __xa_set_mark(&dmap->i_pages, offset,
342                                                         PAGECACHE_TAG_DIRTY);
343                         }
344                         xa_unlock_irq(&dmap->i_pages);
345                 }
346                 unlock_page(page);
347         }
348         pagevec_release(&pvec);
349         cond_resched();
350
351         goto repeat;
352 }
353
354 /**
355  * nilfs_clear_dirty_pages - discard dirty pages in address space
356  * @mapping: address space with dirty pages for discarding
357  * @silent: suppress [true] or print [false] warning messages
358  */
359 void nilfs_clear_dirty_pages(struct address_space *mapping, bool silent)
360 {
361         struct pagevec pvec;
362         unsigned int i;
363         pgoff_t index = 0;
364
365         pagevec_init(&pvec);
366
367         while (pagevec_lookup_tag(&pvec, mapping, &index,
368                                         PAGECACHE_TAG_DIRTY)) {
369                 for (i = 0; i < pagevec_count(&pvec); i++) {
370                         struct page *page = pvec.pages[i];
371
372                         lock_page(page);
373                         nilfs_clear_dirty_page(page, silent);
374                         unlock_page(page);
375                 }
376                 pagevec_release(&pvec);
377                 cond_resched();
378         }
379 }
380
381 /**
382  * nilfs_clear_dirty_page - discard dirty page
383  * @page: dirty page that will be discarded
384  * @silent: suppress [true] or print [false] warning messages
385  */
386 void nilfs_clear_dirty_page(struct page *page, bool silent)
387 {
388         struct inode *inode = page->mapping->host;
389         struct super_block *sb = inode->i_sb;
390
391         BUG_ON(!PageLocked(page));
392
393         if (!silent)
394                 nilfs_msg(sb, KERN_WARNING,
395                           "discard dirty page: offset=%lld, ino=%lu",
396                           page_offset(page), inode->i_ino);
397
398         ClearPageUptodate(page);
399         ClearPageMappedToDisk(page);
400
401         if (page_has_buffers(page)) {
402                 struct buffer_head *bh, *head;
403                 const unsigned long clear_bits =
404                         (BIT(BH_Uptodate) | BIT(BH_Dirty) | BIT(BH_Mapped) |
405                          BIT(BH_Async_Write) | BIT(BH_NILFS_Volatile) |
406                          BIT(BH_NILFS_Checked) | BIT(BH_NILFS_Redirected));
407
408                 bh = head = page_buffers(page);
409                 do {
410                         lock_buffer(bh);
411                         if (!silent)
412                                 nilfs_msg(sb, KERN_WARNING,
413                                           "discard dirty block: blocknr=%llu, size=%zu",
414                                           (u64)bh->b_blocknr, bh->b_size);
415
416                         set_mask_bits(&bh->b_state, clear_bits, 0);
417                         unlock_buffer(bh);
418                 } while (bh = bh->b_this_page, bh != head);
419         }
420
421         __nilfs_clear_page_dirty(page);
422 }
423
424 unsigned int nilfs_page_count_clean_buffers(struct page *page,
425                                             unsigned int from, unsigned int to)
426 {
427         unsigned int block_start, block_end;
428         struct buffer_head *bh, *head;
429         unsigned int nc = 0;
430
431         for (bh = head = page_buffers(page), block_start = 0;
432              bh != head || !block_start;
433              block_start = block_end, bh = bh->b_this_page) {
434                 block_end = block_start + bh->b_size;
435                 if (block_end > from && block_start < to && !buffer_dirty(bh))
436                         nc++;
437         }
438         return nc;
439 }
440
441 void nilfs_mapping_init(struct address_space *mapping, struct inode *inode)
442 {
443         mapping->host = inode;
444         mapping->flags = 0;
445         mapping_set_gfp_mask(mapping, GFP_NOFS);
446         mapping->private_data = NULL;
447         mapping->a_ops = &empty_aops;
448 }
449
450 /*
451  * NILFS2 needs clear_page_dirty() in the following two cases:
452  *
453  * 1) For B-tree node pages and data pages of the dat/gcdat, NILFS2 clears
454  *    page dirty flags when it copies back pages from the shadow cache
455  *    (gcdat->{i_mapping,i_btnode_cache}) to its original cache
456  *    (dat->{i_mapping,i_btnode_cache}).
457  *
458  * 2) Some B-tree operations like insertion or deletion may dispose buffers
459  *    in dirty state, and this needs to cancel the dirty state of their pages.
460  */
461 int __nilfs_clear_page_dirty(struct page *page)
462 {
463         struct address_space *mapping = page->mapping;
464
465         if (mapping) {
466                 xa_lock_irq(&mapping->i_pages);
467                 if (test_bit(PG_dirty, &page->flags)) {
468                         __xa_clear_mark(&mapping->i_pages, page_index(page),
469                                              PAGECACHE_TAG_DIRTY);
470                         xa_unlock_irq(&mapping->i_pages);
471                         return clear_page_dirty_for_io(page);
472                 }
473                 xa_unlock_irq(&mapping->i_pages);
474                 return 0;
475         }
476         return TestClearPageDirty(page);
477 }
478
479 /**
480  * nilfs_find_uncommitted_extent - find extent of uncommitted data
481  * @inode: inode
482  * @start_blk: start block offset (in)
483  * @blkoff: start offset of the found extent (out)
484  *
485  * This function searches an extent of buffers marked "delayed" which
486  * starts from a block offset equal to or larger than @start_blk.  If
487  * such an extent was found, this will store the start offset in
488  * @blkoff and return its length in blocks.  Otherwise, zero is
489  * returned.
490  */
491 unsigned long nilfs_find_uncommitted_extent(struct inode *inode,
492                                             sector_t start_blk,
493                                             sector_t *blkoff)
494 {
495         unsigned int i;
496         pgoff_t index;
497         unsigned int nblocks_in_page;
498         unsigned long length = 0;
499         sector_t b;
500         struct pagevec pvec;
501         struct page *page;
502
503         if (inode->i_mapping->nrpages == 0)
504                 return 0;
505
506         index = start_blk >> (PAGE_SHIFT - inode->i_blkbits);
507         nblocks_in_page = 1U << (PAGE_SHIFT - inode->i_blkbits);
508
509         pagevec_init(&pvec);
510
511 repeat:
512         pvec.nr = find_get_pages_contig(inode->i_mapping, index, PAGEVEC_SIZE,
513                                         pvec.pages);
514         if (pvec.nr == 0)
515                 return length;
516
517         if (length > 0 && pvec.pages[0]->index > index)
518                 goto out;
519
520         b = pvec.pages[0]->index << (PAGE_SHIFT - inode->i_blkbits);
521         i = 0;
522         do {
523                 page = pvec.pages[i];
524
525                 lock_page(page);
526                 if (page_has_buffers(page)) {
527                         struct buffer_head *bh, *head;
528
529                         bh = head = page_buffers(page);
530                         do {
531                                 if (b < start_blk)
532                                         continue;
533                                 if (buffer_delay(bh)) {
534                                         if (length == 0)
535                                                 *blkoff = b;
536                                         length++;
537                                 } else if (length > 0) {
538                                         goto out_locked;
539                                 }
540                         } while (++b, bh = bh->b_this_page, bh != head);
541                 } else {
542                         if (length > 0)
543                                 goto out_locked;
544
545                         b += nblocks_in_page;
546                 }
547                 unlock_page(page);
548
549         } while (++i < pagevec_count(&pvec));
550
551         index = page->index + 1;
552         pagevec_release(&pvec);
553         cond_resched();
554         goto repeat;
555
556 out_locked:
557         unlock_page(page);
558 out:
559         pagevec_release(&pvec);
560         return length;
561 }