Merge tag 'pm-5.12-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
[linux-2.6-microblaze.git] / fs / btrfs / subpage.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 #include <linux/slab.h>
4 #include "ctree.h"
5 #include "subpage.h"
6
7 int btrfs_attach_subpage(const struct btrfs_fs_info *fs_info,
8                          struct page *page, enum btrfs_subpage_type type)
9 {
10         struct btrfs_subpage *subpage = NULL;
11         int ret;
12
13         /*
14          * We have cases like a dummy extent buffer page, which is not mappped
15          * and doesn't need to be locked.
16          */
17         if (page->mapping)
18                 ASSERT(PageLocked(page));
19         /* Either not subpage, or the page already has private attached */
20         if (fs_info->sectorsize == PAGE_SIZE || PagePrivate(page))
21                 return 0;
22
23         ret = btrfs_alloc_subpage(fs_info, &subpage, type);
24         if (ret < 0)
25                 return ret;
26         attach_page_private(page, subpage);
27         return 0;
28 }
29
30 void btrfs_detach_subpage(const struct btrfs_fs_info *fs_info,
31                           struct page *page)
32 {
33         struct btrfs_subpage *subpage;
34
35         /* Either not subpage, or already detached */
36         if (fs_info->sectorsize == PAGE_SIZE || !PagePrivate(page))
37                 return;
38
39         subpage = (struct btrfs_subpage *)detach_page_private(page);
40         ASSERT(subpage);
41         btrfs_free_subpage(subpage);
42 }
43
44 int btrfs_alloc_subpage(const struct btrfs_fs_info *fs_info,
45                         struct btrfs_subpage **ret,
46                         enum btrfs_subpage_type type)
47 {
48         if (fs_info->sectorsize == PAGE_SIZE)
49                 return 0;
50
51         *ret = kzalloc(sizeof(struct btrfs_subpage), GFP_NOFS);
52         if (!*ret)
53                 return -ENOMEM;
54         spin_lock_init(&(*ret)->lock);
55         if (type == BTRFS_SUBPAGE_METADATA)
56                 atomic_set(&(*ret)->eb_refs, 0);
57         else
58                 atomic_set(&(*ret)->readers, 0);
59         return 0;
60 }
61
62 void btrfs_free_subpage(struct btrfs_subpage *subpage)
63 {
64         kfree(subpage);
65 }
66
67 /*
68  * Increase the eb_refs of current subpage.
69  *
70  * This is important for eb allocation, to prevent race with last eb freeing
71  * of the same page.
72  * With the eb_refs increased before the eb inserted into radix tree,
73  * detach_extent_buffer_page() won't detach the page private while we're still
74  * allocating the extent buffer.
75  */
76 void btrfs_page_inc_eb_refs(const struct btrfs_fs_info *fs_info,
77                             struct page *page)
78 {
79         struct btrfs_subpage *subpage;
80
81         if (fs_info->sectorsize == PAGE_SIZE)
82                 return;
83
84         ASSERT(PagePrivate(page) && page->mapping);
85         lockdep_assert_held(&page->mapping->private_lock);
86
87         subpage = (struct btrfs_subpage *)page->private;
88         atomic_inc(&subpage->eb_refs);
89 }
90
91 void btrfs_page_dec_eb_refs(const struct btrfs_fs_info *fs_info,
92                             struct page *page)
93 {
94         struct btrfs_subpage *subpage;
95
96         if (fs_info->sectorsize == PAGE_SIZE)
97                 return;
98
99         ASSERT(PagePrivate(page) && page->mapping);
100         lockdep_assert_held(&page->mapping->private_lock);
101
102         subpage = (struct btrfs_subpage *)page->private;
103         ASSERT(atomic_read(&subpage->eb_refs));
104         atomic_dec(&subpage->eb_refs);
105 }
106
107 static void btrfs_subpage_assert(const struct btrfs_fs_info *fs_info,
108                 struct page *page, u64 start, u32 len)
109 {
110         /* Basic checks */
111         ASSERT(PagePrivate(page) && page->private);
112         ASSERT(IS_ALIGNED(start, fs_info->sectorsize) &&
113                IS_ALIGNED(len, fs_info->sectorsize));
114         /*
115          * The range check only works for mapped page, we can still have
116          * unmapped page like dummy extent buffer pages.
117          */
118         if (page->mapping)
119                 ASSERT(page_offset(page) <= start &&
120                        start + len <= page_offset(page) + PAGE_SIZE);
121 }
122
123 void btrfs_subpage_start_reader(const struct btrfs_fs_info *fs_info,
124                 struct page *page, u64 start, u32 len)
125 {
126         struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
127         const int nbits = len >> fs_info->sectorsize_bits;
128         int ret;
129
130         btrfs_subpage_assert(fs_info, page, start, len);
131
132         ret = atomic_add_return(nbits, &subpage->readers);
133         ASSERT(ret == nbits);
134 }
135
136 void btrfs_subpage_end_reader(const struct btrfs_fs_info *fs_info,
137                 struct page *page, u64 start, u32 len)
138 {
139         struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
140         const int nbits = len >> fs_info->sectorsize_bits;
141
142         btrfs_subpage_assert(fs_info, page, start, len);
143         ASSERT(atomic_read(&subpage->readers) >= nbits);
144         if (atomic_sub_and_test(nbits, &subpage->readers))
145                 unlock_page(page);
146 }
147
148 /*
149  * Convert the [start, start + len) range into a u16 bitmap
150  *
151  * For example: if start == page_offset() + 16K, len = 16K, we get 0x00f0.
152  */
153 static u16 btrfs_subpage_calc_bitmap(const struct btrfs_fs_info *fs_info,
154                 struct page *page, u64 start, u32 len)
155 {
156         const int bit_start = offset_in_page(start) >> fs_info->sectorsize_bits;
157         const int nbits = len >> fs_info->sectorsize_bits;
158
159         btrfs_subpage_assert(fs_info, page, start, len);
160
161         /*
162          * Here nbits can be 16, thus can go beyond u16 range. We make the
163          * first left shift to be calculate in unsigned long (at least u32),
164          * then truncate the result to u16.
165          */
166         return (u16)(((1UL << nbits) - 1) << bit_start);
167 }
168
169 void btrfs_subpage_set_uptodate(const struct btrfs_fs_info *fs_info,
170                 struct page *page, u64 start, u32 len)
171 {
172         struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
173         const u16 tmp = btrfs_subpage_calc_bitmap(fs_info, page, start, len);
174         unsigned long flags;
175
176         spin_lock_irqsave(&subpage->lock, flags);
177         subpage->uptodate_bitmap |= tmp;
178         if (subpage->uptodate_bitmap == U16_MAX)
179                 SetPageUptodate(page);
180         spin_unlock_irqrestore(&subpage->lock, flags);
181 }
182
183 void btrfs_subpage_clear_uptodate(const struct btrfs_fs_info *fs_info,
184                 struct page *page, u64 start, u32 len)
185 {
186         struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
187         const u16 tmp = btrfs_subpage_calc_bitmap(fs_info, page, start, len);
188         unsigned long flags;
189
190         spin_lock_irqsave(&subpage->lock, flags);
191         subpage->uptodate_bitmap &= ~tmp;
192         ClearPageUptodate(page);
193         spin_unlock_irqrestore(&subpage->lock, flags);
194 }
195
196 void btrfs_subpage_set_error(const struct btrfs_fs_info *fs_info,
197                 struct page *page, u64 start, u32 len)
198 {
199         struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
200         const u16 tmp = btrfs_subpage_calc_bitmap(fs_info, page, start, len);
201         unsigned long flags;
202
203         spin_lock_irqsave(&subpage->lock, flags);
204         subpage->error_bitmap |= tmp;
205         SetPageError(page);
206         spin_unlock_irqrestore(&subpage->lock, flags);
207 }
208
209 void btrfs_subpage_clear_error(const struct btrfs_fs_info *fs_info,
210                 struct page *page, u64 start, u32 len)
211 {
212         struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
213         const u16 tmp = btrfs_subpage_calc_bitmap(fs_info, page, start, len);
214         unsigned long flags;
215
216         spin_lock_irqsave(&subpage->lock, flags);
217         subpage->error_bitmap &= ~tmp;
218         if (subpage->error_bitmap == 0)
219                 ClearPageError(page);
220         spin_unlock_irqrestore(&subpage->lock, flags);
221 }
222
223 /*
224  * Unlike set/clear which is dependent on each page status, for test all bits
225  * are tested in the same way.
226  */
227 #define IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(name)                           \
228 bool btrfs_subpage_test_##name(const struct btrfs_fs_info *fs_info,     \
229                 struct page *page, u64 start, u32 len)                  \
230 {                                                                       \
231         struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private; \
232         const u16 tmp = btrfs_subpage_calc_bitmap(fs_info, page, start, len); \
233         unsigned long flags;                                            \
234         bool ret;                                                       \
235                                                                         \
236         spin_lock_irqsave(&subpage->lock, flags);                       \
237         ret = ((subpage->name##_bitmap & tmp) == tmp);                  \
238         spin_unlock_irqrestore(&subpage->lock, flags);                  \
239         return ret;                                                     \
240 }
241 IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(uptodate);
242 IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(error);
243
244 /*
245  * Note that, in selftests (extent-io-tests), we can have empty fs_info passed
246  * in.  We only test sectorsize == PAGE_SIZE cases so far, thus we can fall
247  * back to regular sectorsize branch.
248  */
249 #define IMPLEMENT_BTRFS_PAGE_OPS(name, set_page_func, clear_page_func,  \
250                                test_page_func)                          \
251 void btrfs_page_set_##name(const struct btrfs_fs_info *fs_info,         \
252                 struct page *page, u64 start, u32 len)                  \
253 {                                                                       \
254         if (unlikely(!fs_info) || fs_info->sectorsize == PAGE_SIZE) {   \
255                 set_page_func(page);                                    \
256                 return;                                                 \
257         }                                                               \
258         btrfs_subpage_set_##name(fs_info, page, start, len);            \
259 }                                                                       \
260 void btrfs_page_clear_##name(const struct btrfs_fs_info *fs_info,       \
261                 struct page *page, u64 start, u32 len)                  \
262 {                                                                       \
263         if (unlikely(!fs_info) || fs_info->sectorsize == PAGE_SIZE) {   \
264                 clear_page_func(page);                                  \
265                 return;                                                 \
266         }                                                               \
267         btrfs_subpage_clear_##name(fs_info, page, start, len);          \
268 }                                                                       \
269 bool btrfs_page_test_##name(const struct btrfs_fs_info *fs_info,        \
270                 struct page *page, u64 start, u32 len)                  \
271 {                                                                       \
272         if (unlikely(!fs_info) || fs_info->sectorsize == PAGE_SIZE)     \
273                 return test_page_func(page);                            \
274         return btrfs_subpage_test_##name(fs_info, page, start, len);    \
275 }
276 IMPLEMENT_BTRFS_PAGE_OPS(uptodate, SetPageUptodate, ClearPageUptodate,
277                          PageUptodate);
278 IMPLEMENT_BTRFS_PAGE_OPS(error, SetPageError, ClearPageError, PageError);