btrfs: convert submit_eb_subpage() to take a folio
authorLi Zetao <lizetao1@huawei.com>
Wed, 28 Aug 2024 18:29:00 +0000 (02:29 +0800)
committerDavid Sterba <dsterba@suse.com>
Tue, 10 Sep 2024 14:51:21 +0000 (16:51 +0200)
The old page API is being gradually replaced and converted to use folio
to improve code readability and avoid repeated conversion between page
and folio. Moreover, use folio_pos() instead of page_offset(),
which is more consistent with folio usage.

Signed-off-by: Li Zetao <lizetao1@huawei.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
fs/btrfs/extent_io.c

index f8b0010..f16c5be 100644 (file)
@@ -1735,12 +1735,11 @@ static noinline_for_stack void write_one_eb(struct extent_buffer *eb,
  * Return >=0 for the number of submitted extent buffers.
  * Return <0 for fatal error.
  */
-static int submit_eb_subpage(struct page *page, struct writeback_control *wbc)
+static int submit_eb_subpage(struct folio *folio, struct writeback_control *wbc)
 {
-       struct btrfs_fs_info *fs_info = page_to_fs_info(page);
-       struct folio *folio = page_folio(page);
+       struct btrfs_fs_info *fs_info = folio_to_fs_info(folio);
        int submitted = 0;
-       u64 page_start = page_offset(page);
+       u64 folio_start = folio_pos(folio);
        int bit_start = 0;
        int sectors_per_node = fs_info->nodesize >> fs_info->sectorsize_bits;
 
@@ -1755,21 +1754,21 @@ static int submit_eb_subpage(struct page *page, struct writeback_control *wbc)
                 * Take private lock to ensure the subpage won't be detached
                 * in the meantime.
                 */
-               spin_lock(&page->mapping->i_private_lock);
+               spin_lock(&folio->mapping->i_private_lock);
                if (!folio_test_private(folio)) {
-                       spin_unlock(&page->mapping->i_private_lock);
+                       spin_unlock(&folio->mapping->i_private_lock);
                        break;
                }
                spin_lock_irqsave(&subpage->lock, flags);
                if (!test_bit(bit_start + btrfs_bitmap_nr_dirty * fs_info->sectors_per_page,
                              subpage->bitmaps)) {
                        spin_unlock_irqrestore(&subpage->lock, flags);
-                       spin_unlock(&page->mapping->i_private_lock);
+                       spin_unlock(&folio->mapping->i_private_lock);
                        bit_start++;
                        continue;
                }
 
-               start = page_start + bit_start * fs_info->sectorsize;
+               start = folio_start + bit_start * fs_info->sectorsize;
                bit_start += sectors_per_node;
 
                /*
@@ -1778,7 +1777,7 @@ static int submit_eb_subpage(struct page *page, struct writeback_control *wbc)
                 */
                eb = find_extent_buffer_nolock(fs_info, start);
                spin_unlock_irqrestore(&subpage->lock, flags);
-               spin_unlock(&page->mapping->i_private_lock);
+               spin_unlock(&folio->mapping->i_private_lock);
 
                /*
                 * The eb has already reached 0 refs thus find_extent_buffer()
@@ -1829,7 +1828,7 @@ static int submit_eb_page(struct page *page, struct btrfs_eb_write_context *ctx)
                return 0;
 
        if (page_to_fs_info(page)->nodesize < PAGE_SIZE)
-               return submit_eb_subpage(page, wbc);
+               return submit_eb_subpage(folio, wbc);
 
        spin_lock(&mapping->i_private_lock);
        if (!folio_test_private(folio)) {