lib/nodemask: inline next_node_in() and node_random()
[linux-2.6-microblaze.git] / include / linux / buffer_head.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * include/linux/buffer_head.h
4  *
5  * Everything to do with buffer_heads.
6  */
7
8 #ifndef _LINUX_BUFFER_HEAD_H
9 #define _LINUX_BUFFER_HEAD_H
10
11 #include <linux/types.h>
12 #include <linux/fs.h>
13 #include <linux/linkage.h>
14 #include <linux/pagemap.h>
15 #include <linux/wait.h>
16 #include <linux/atomic.h>
17
18 #ifdef CONFIG_BLOCK
19
20 enum bh_state_bits {
21         BH_Uptodate,    /* Contains valid data */
22         BH_Dirty,       /* Is dirty */
23         BH_Lock,        /* Is locked */
24         BH_Req,         /* Has been submitted for I/O */
25
26         BH_Mapped,      /* Has a disk mapping */
27         BH_New,         /* Disk mapping was newly created by get_block */
28         BH_Async_Read,  /* Is under end_buffer_async_read I/O */
29         BH_Async_Write, /* Is under end_buffer_async_write I/O */
30         BH_Delay,       /* Buffer is not yet allocated on disk */
31         BH_Boundary,    /* Block is followed by a discontiguity */
32         BH_Write_EIO,   /* I/O error on write */
33         BH_Unwritten,   /* Buffer is allocated on disk but not written */
34         BH_Quiet,       /* Buffer Error Prinks to be quiet */
35         BH_Meta,        /* Buffer contains metadata */
36         BH_Prio,        /* Buffer should be submitted with REQ_PRIO */
37         BH_Defer_Completion, /* Defer AIO completion to workqueue */
38
39         BH_PrivateStart,/* not a state bit, but the first bit available
40                          * for private allocation by other entities
41                          */
42 };
43
44 #define MAX_BUF_PER_PAGE (PAGE_SIZE / 512)
45
46 struct page;
47 struct buffer_head;
48 struct address_space;
49 typedef void (bh_end_io_t)(struct buffer_head *bh, int uptodate);
50
51 /*
52  * Historically, a buffer_head was used to map a single block
53  * within a page, and of course as the unit of I/O through the
54  * filesystem and block layers.  Nowadays the basic I/O unit
55  * is the bio, and buffer_heads are used for extracting block
56  * mappings (via a get_block_t call), for tracking state within
57  * a page (via a page_mapping) and for wrapping bio submission
58  * for backward compatibility reasons (e.g. submit_bh).
59  */
60 struct buffer_head {
61         unsigned long b_state;          /* buffer state bitmap (see above) */
62         struct buffer_head *b_this_page;/* circular list of page's buffers */
63         struct page *b_page;            /* the page this bh is mapped to */
64
65         sector_t b_blocknr;             /* start block number */
66         size_t b_size;                  /* size of mapping */
67         char *b_data;                   /* pointer to data within the page */
68
69         struct block_device *b_bdev;
70         bh_end_io_t *b_end_io;          /* I/O completion */
71         void *b_private;                /* reserved for b_end_io */
72         struct list_head b_assoc_buffers; /* associated with another mapping */
73         struct address_space *b_assoc_map;      /* mapping this buffer is
74                                                    associated with */
75         atomic_t b_count;               /* users using this buffer_head */
76         spinlock_t b_uptodate_lock;     /* Used by the first bh in a page, to
77                                          * serialise IO completion of other
78                                          * buffers in the page */
79 };
80
81 /*
82  * macro tricks to expand the set_buffer_foo(), clear_buffer_foo()
83  * and buffer_foo() functions.
84  * To avoid reset buffer flags that are already set, because that causes
85  * a costly cache line transition, check the flag first.
86  */
87 #define BUFFER_FNS(bit, name)                                           \
88 static __always_inline void set_buffer_##name(struct buffer_head *bh)   \
89 {                                                                       \
90         if (!test_bit(BH_##bit, &(bh)->b_state))                        \
91                 set_bit(BH_##bit, &(bh)->b_state);                      \
92 }                                                                       \
93 static __always_inline void clear_buffer_##name(struct buffer_head *bh) \
94 {                                                                       \
95         clear_bit(BH_##bit, &(bh)->b_state);                            \
96 }                                                                       \
97 static __always_inline int buffer_##name(const struct buffer_head *bh)  \
98 {                                                                       \
99         return test_bit(BH_##bit, &(bh)->b_state);                      \
100 }
101
102 /*
103  * test_set_buffer_foo() and test_clear_buffer_foo()
104  */
105 #define TAS_BUFFER_FNS(bit, name)                                       \
106 static __always_inline int test_set_buffer_##name(struct buffer_head *bh) \
107 {                                                                       \
108         return test_and_set_bit(BH_##bit, &(bh)->b_state);              \
109 }                                                                       \
110 static __always_inline int test_clear_buffer_##name(struct buffer_head *bh) \
111 {                                                                       \
112         return test_and_clear_bit(BH_##bit, &(bh)->b_state);            \
113 }                                                                       \
114
115 /*
116  * Emit the buffer bitops functions.   Note that there are also functions
117  * of the form "mark_buffer_foo()".  These are higher-level functions which
118  * do something in addition to setting a b_state bit.
119  */
120 BUFFER_FNS(Uptodate, uptodate)
121 BUFFER_FNS(Dirty, dirty)
122 TAS_BUFFER_FNS(Dirty, dirty)
123 BUFFER_FNS(Lock, locked)
124 BUFFER_FNS(Req, req)
125 TAS_BUFFER_FNS(Req, req)
126 BUFFER_FNS(Mapped, mapped)
127 BUFFER_FNS(New, new)
128 BUFFER_FNS(Async_Read, async_read)
129 BUFFER_FNS(Async_Write, async_write)
130 BUFFER_FNS(Delay, delay)
131 BUFFER_FNS(Boundary, boundary)
132 BUFFER_FNS(Write_EIO, write_io_error)
133 BUFFER_FNS(Unwritten, unwritten)
134 BUFFER_FNS(Meta, meta)
135 BUFFER_FNS(Prio, prio)
136 BUFFER_FNS(Defer_Completion, defer_completion)
137
138 #define bh_offset(bh)           ((unsigned long)(bh)->b_data & ~PAGE_MASK)
139
140 /* If we *know* page->private refers to buffer_heads */
141 #define page_buffers(page)                                      \
142         ({                                                      \
143                 BUG_ON(!PagePrivate(page));                     \
144                 ((struct buffer_head *)page_private(page));     \
145         })
146 #define page_has_buffers(page)  PagePrivate(page)
147 #define folio_buffers(folio)            folio_get_private(folio)
148
149 void buffer_check_dirty_writeback(struct folio *folio,
150                                      bool *dirty, bool *writeback);
151
152 /*
153  * Declarations
154  */
155
156 void mark_buffer_dirty(struct buffer_head *bh);
157 void mark_buffer_write_io_error(struct buffer_head *bh);
158 void touch_buffer(struct buffer_head *bh);
159 void set_bh_page(struct buffer_head *bh,
160                 struct page *page, unsigned long offset);
161 bool try_to_free_buffers(struct folio *);
162 struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
163                 bool retry);
164 void create_empty_buffers(struct page *, unsigned long,
165                         unsigned long b_state);
166 void end_buffer_read_sync(struct buffer_head *bh, int uptodate);
167 void end_buffer_write_sync(struct buffer_head *bh, int uptodate);
168 void end_buffer_async_write(struct buffer_head *bh, int uptodate);
169
170 /* Things to do with buffers at mapping->private_list */
171 void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode);
172 int inode_has_buffers(struct inode *);
173 void invalidate_inode_buffers(struct inode *);
174 int remove_inode_buffers(struct inode *inode);
175 int sync_mapping_buffers(struct address_space *mapping);
176 void clean_bdev_aliases(struct block_device *bdev, sector_t block,
177                         sector_t len);
178 static inline void clean_bdev_bh_alias(struct buffer_head *bh)
179 {
180         clean_bdev_aliases(bh->b_bdev, bh->b_blocknr, 1);
181 }
182
183 void mark_buffer_async_write(struct buffer_head *bh);
184 void __wait_on_buffer(struct buffer_head *);
185 wait_queue_head_t *bh_waitq_head(struct buffer_head *bh);
186 struct buffer_head *__find_get_block(struct block_device *bdev, sector_t block,
187                         unsigned size);
188 struct buffer_head *__getblk_gfp(struct block_device *bdev, sector_t block,
189                                   unsigned size, gfp_t gfp);
190 void __brelse(struct buffer_head *);
191 void __bforget(struct buffer_head *);
192 void __breadahead(struct block_device *, sector_t block, unsigned int size);
193 void __breadahead_gfp(struct block_device *, sector_t block, unsigned int size,
194                   gfp_t gfp);
195 struct buffer_head *__bread_gfp(struct block_device *,
196                                 sector_t block, unsigned size, gfp_t gfp);
197 void invalidate_bh_lrus(void);
198 void invalidate_bh_lrus_cpu(void);
199 bool has_bh_in_lru(int cpu, void *dummy);
200 struct buffer_head *alloc_buffer_head(gfp_t gfp_flags);
201 void free_buffer_head(struct buffer_head * bh);
202 void unlock_buffer(struct buffer_head *bh);
203 void __lock_buffer(struct buffer_head *bh);
204 void ll_rw_block(int, int, int, struct buffer_head * bh[]);
205 int sync_dirty_buffer(struct buffer_head *bh);
206 int __sync_dirty_buffer(struct buffer_head *bh, int op_flags);
207 void write_dirty_buffer(struct buffer_head *bh, int op_flags);
208 int submit_bh(int, int, struct buffer_head *);
209 void write_boundary_block(struct block_device *bdev,
210                         sector_t bblock, unsigned blocksize);
211 int bh_uptodate_or_lock(struct buffer_head *bh);
212 int bh_submit_read(struct buffer_head *bh);
213
214 extern int buffer_heads_over_limit;
215
216 /*
217  * Generic address_space_operations implementations for buffer_head-backed
218  * address_spaces.
219  */
220 void block_invalidate_folio(struct folio *folio, size_t offset, size_t length);
221 int block_write_full_page(struct page *page, get_block_t *get_block,
222                                 struct writeback_control *wbc);
223 int __block_write_full_page(struct inode *inode, struct page *page,
224                         get_block_t *get_block, struct writeback_control *wbc,
225                         bh_end_io_t *handler);
226 int block_read_full_folio(struct folio *, get_block_t *);
227 bool block_is_partially_uptodate(struct folio *, size_t from, size_t count);
228 int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
229                 struct page **pagep, get_block_t *get_block);
230 int __block_write_begin(struct page *page, loff_t pos, unsigned len,
231                 get_block_t *get_block);
232 int block_write_end(struct file *, struct address_space *,
233                                 loff_t, unsigned, unsigned,
234                                 struct page *, void *);
235 int generic_write_end(struct file *, struct address_space *,
236                                 loff_t, unsigned, unsigned,
237                                 struct page *, void *);
238 void page_zero_new_buffers(struct page *page, unsigned from, unsigned to);
239 void clean_page_buffers(struct page *page);
240 int cont_write_begin(struct file *, struct address_space *, loff_t,
241                         unsigned, struct page **, void **,
242                         get_block_t *, loff_t *);
243 int generic_cont_expand_simple(struct inode *inode, loff_t size);
244 int block_commit_write(struct page *page, unsigned from, unsigned to);
245 int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
246                                 get_block_t get_block);
247 /* Convert errno to return value from ->page_mkwrite() call */
248 static inline vm_fault_t block_page_mkwrite_return(int err)
249 {
250         if (err == 0)
251                 return VM_FAULT_LOCKED;
252         if (err == -EFAULT || err == -EAGAIN)
253                 return VM_FAULT_NOPAGE;
254         if (err == -ENOMEM)
255                 return VM_FAULT_OOM;
256         /* -ENOSPC, -EDQUOT, -EIO ... */
257         return VM_FAULT_SIGBUS;
258 }
259 sector_t generic_block_bmap(struct address_space *, sector_t, get_block_t *);
260 int block_truncate_page(struct address_space *, loff_t, get_block_t *);
261 int nobh_write_begin(struct address_space *, loff_t, unsigned len,
262                                 struct page **, void **, get_block_t*);
263 int nobh_write_end(struct file *, struct address_space *,
264                                 loff_t, unsigned, unsigned,
265                                 struct page *, void *);
266 int nobh_truncate_page(struct address_space *, loff_t, get_block_t *);
267 int nobh_writepage(struct page *page, get_block_t *get_block,
268                         struct writeback_control *wbc);
269
270 void buffer_init(void);
271
272 /*
273  * inline definitions
274  */
275
276 static inline void get_bh(struct buffer_head *bh)
277 {
278         atomic_inc(&bh->b_count);
279 }
280
281 static inline void put_bh(struct buffer_head *bh)
282 {
283         smp_mb__before_atomic();
284         atomic_dec(&bh->b_count);
285 }
286
287 static inline void brelse(struct buffer_head *bh)
288 {
289         if (bh)
290                 __brelse(bh);
291 }
292
293 static inline void bforget(struct buffer_head *bh)
294 {
295         if (bh)
296                 __bforget(bh);
297 }
298
299 static inline struct buffer_head *
300 sb_bread(struct super_block *sb, sector_t block)
301 {
302         return __bread_gfp(sb->s_bdev, block, sb->s_blocksize, __GFP_MOVABLE);
303 }
304
305 static inline struct buffer_head *
306 sb_bread_unmovable(struct super_block *sb, sector_t block)
307 {
308         return __bread_gfp(sb->s_bdev, block, sb->s_blocksize, 0);
309 }
310
311 static inline void
312 sb_breadahead(struct super_block *sb, sector_t block)
313 {
314         __breadahead(sb->s_bdev, block, sb->s_blocksize);
315 }
316
317 static inline void
318 sb_breadahead_unmovable(struct super_block *sb, sector_t block)
319 {
320         __breadahead_gfp(sb->s_bdev, block, sb->s_blocksize, 0);
321 }
322
323 static inline struct buffer_head *
324 sb_getblk(struct super_block *sb, sector_t block)
325 {
326         return __getblk_gfp(sb->s_bdev, block, sb->s_blocksize, __GFP_MOVABLE);
327 }
328
329
330 static inline struct buffer_head *
331 sb_getblk_gfp(struct super_block *sb, sector_t block, gfp_t gfp)
332 {
333         return __getblk_gfp(sb->s_bdev, block, sb->s_blocksize, gfp);
334 }
335
336 static inline struct buffer_head *
337 sb_find_get_block(struct super_block *sb, sector_t block)
338 {
339         return __find_get_block(sb->s_bdev, block, sb->s_blocksize);
340 }
341
342 static inline void
343 map_bh(struct buffer_head *bh, struct super_block *sb, sector_t block)
344 {
345         set_buffer_mapped(bh);
346         bh->b_bdev = sb->s_bdev;
347         bh->b_blocknr = block;
348         bh->b_size = sb->s_blocksize;
349 }
350
351 static inline void wait_on_buffer(struct buffer_head *bh)
352 {
353         might_sleep();
354         if (buffer_locked(bh))
355                 __wait_on_buffer(bh);
356 }
357
358 static inline int trylock_buffer(struct buffer_head *bh)
359 {
360         return likely(!test_and_set_bit_lock(BH_Lock, &bh->b_state));
361 }
362
363 static inline void lock_buffer(struct buffer_head *bh)
364 {
365         might_sleep();
366         if (!trylock_buffer(bh))
367                 __lock_buffer(bh);
368 }
369
370 static inline struct buffer_head *getblk_unmovable(struct block_device *bdev,
371                                                    sector_t block,
372                                                    unsigned size)
373 {
374         return __getblk_gfp(bdev, block, size, 0);
375 }
376
377 static inline struct buffer_head *__getblk(struct block_device *bdev,
378                                            sector_t block,
379                                            unsigned size)
380 {
381         return __getblk_gfp(bdev, block, size, __GFP_MOVABLE);
382 }
383
384 /**
385  *  __bread() - reads a specified block and returns the bh
386  *  @bdev: the block_device to read from
387  *  @block: number of block
388  *  @size: size (in bytes) to read
389  *
390  *  Reads a specified block, and returns buffer head that contains it.
391  *  The page cache is allocated from movable area so that it can be migrated.
392  *  It returns NULL if the block was unreadable.
393  */
394 static inline struct buffer_head *
395 __bread(struct block_device *bdev, sector_t block, unsigned size)
396 {
397         return __bread_gfp(bdev, block, size, __GFP_MOVABLE);
398 }
399
400 bool block_dirty_folio(struct address_space *mapping, struct folio *folio);
401
402 #else /* CONFIG_BLOCK */
403
404 static inline void buffer_init(void) {}
405 static inline bool try_to_free_buffers(struct folio *folio) { return true; }
406 static inline int inode_has_buffers(struct inode *inode) { return 0; }
407 static inline void invalidate_inode_buffers(struct inode *inode) {}
408 static inline int remove_inode_buffers(struct inode *inode) { return 1; }
409 static inline int sync_mapping_buffers(struct address_space *mapping) { return 0; }
410 static inline void invalidate_bh_lrus_cpu(void) {}
411 static inline bool has_bh_in_lru(int cpu, void *dummy) { return false; }
412 #define buffer_heads_over_limit 0
413
414 #endif /* CONFIG_BLOCK */
415 #endif /* _LINUX_BUFFER_HEAD_H */