Merge tag 'dmaengine-5.12-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul...
[linux-2.6-microblaze.git] / fs / ocfs2 / buffer_head_io.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* -*- mode: c; c-basic-offset: 8; -*-
3  * vim: noexpandtab sw=8 ts=8 sts=0:
4  *
5  * io.c
6  *
7  * Buffer cache handling
8  *
9  * Copyright (C) 2002, 2004 Oracle.  All rights reserved.
10  */
11
12 #include <linux/fs.h>
13 #include <linux/types.h>
14 #include <linux/highmem.h>
15 #include <linux/bio.h>
16
17 #include <cluster/masklog.h>
18
19 #include "ocfs2.h"
20
21 #include "alloc.h"
22 #include "inode.h"
23 #include "journal.h"
24 #include "uptodate.h"
25 #include "buffer_head_io.h"
26 #include "ocfs2_trace.h"
27
28 /*
29  * Bits on bh->b_state used by ocfs2.
30  *
31  * These MUST be after the JBD2 bits.  Hence, we use BH_JBDPrivateStart.
32  */
33 enum ocfs2_state_bits {
34         BH_NeedsValidate = BH_JBDPrivateStart,
35 };
36
37 /* Expand the magic b_state functions */
38 BUFFER_FNS(NeedsValidate, needs_validate);
39
40 int ocfs2_write_block(struct ocfs2_super *osb, struct buffer_head *bh,
41                       struct ocfs2_caching_info *ci)
42 {
43         int ret = 0;
44
45         trace_ocfs2_write_block((unsigned long long)bh->b_blocknr, ci);
46
47         BUG_ON(bh->b_blocknr < OCFS2_SUPER_BLOCK_BLKNO);
48         BUG_ON(buffer_jbd(bh));
49
50         /* No need to check for a soft readonly file system here. non
51          * journalled writes are only ever done on system files which
52          * can get modified during recovery even if read-only. */
53         if (ocfs2_is_hard_readonly(osb)) {
54                 ret = -EROFS;
55                 mlog_errno(ret);
56                 goto out;
57         }
58
59         ocfs2_metadata_cache_io_lock(ci);
60
61         lock_buffer(bh);
62         set_buffer_uptodate(bh);
63
64         /* remove from dirty list before I/O. */
65         clear_buffer_dirty(bh);
66
67         get_bh(bh); /* for end_buffer_write_sync() */
68         bh->b_end_io = end_buffer_write_sync;
69         submit_bh(REQ_OP_WRITE, 0, bh);
70
71         wait_on_buffer(bh);
72
73         if (buffer_uptodate(bh)) {
74                 ocfs2_set_buffer_uptodate(ci, bh);
75         } else {
76                 /* We don't need to remove the clustered uptodate
77                  * information for this bh as it's not marked locally
78                  * uptodate. */
79                 ret = -EIO;
80                 mlog_errno(ret);
81         }
82
83         ocfs2_metadata_cache_io_unlock(ci);
84 out:
85         return ret;
86 }
87
88 /* Caller must provide a bhs[] with all NULL or non-NULL entries, so it
89  * will be easier to handle read failure.
90  */
91 int ocfs2_read_blocks_sync(struct ocfs2_super *osb, u64 block,
92                            unsigned int nr, struct buffer_head *bhs[])
93 {
94         int status = 0;
95         unsigned int i;
96         struct buffer_head *bh;
97         int new_bh = 0;
98
99         trace_ocfs2_read_blocks_sync((unsigned long long)block, nr);
100
101         if (!nr)
102                 goto bail;
103
104         /* Don't put buffer head and re-assign it to NULL if it is allocated
105          * outside since the caller can't be aware of this alternation!
106          */
107         new_bh = (bhs[0] == NULL);
108
109         for (i = 0 ; i < nr ; i++) {
110                 if (bhs[i] == NULL) {
111                         bhs[i] = sb_getblk(osb->sb, block++);
112                         if (bhs[i] == NULL) {
113                                 status = -ENOMEM;
114                                 mlog_errno(status);
115                                 break;
116                         }
117                 }
118                 bh = bhs[i];
119
120                 if (buffer_jbd(bh)) {
121                         trace_ocfs2_read_blocks_sync_jbd(
122                                         (unsigned long long)bh->b_blocknr);
123                         continue;
124                 }
125
126                 if (buffer_dirty(bh)) {
127                         /* This should probably be a BUG, or
128                          * at least return an error. */
129                         mlog(ML_ERROR,
130                              "trying to sync read a dirty "
131                              "buffer! (blocknr = %llu), skipping\n",
132                              (unsigned long long)bh->b_blocknr);
133                         continue;
134                 }
135
136                 lock_buffer(bh);
137                 if (buffer_jbd(bh)) {
138 #ifdef CATCH_BH_JBD_RACES
139                         mlog(ML_ERROR,
140                              "block %llu had the JBD bit set "
141                              "while I was in lock_buffer!",
142                              (unsigned long long)bh->b_blocknr);
143                         BUG();
144 #else
145                         unlock_buffer(bh);
146                         continue;
147 #endif
148                 }
149
150                 get_bh(bh); /* for end_buffer_read_sync() */
151                 bh->b_end_io = end_buffer_read_sync;
152                 submit_bh(REQ_OP_READ, 0, bh);
153         }
154
155 read_failure:
156         for (i = nr; i > 0; i--) {
157                 bh = bhs[i - 1];
158
159                 if (unlikely(status)) {
160                         if (new_bh && bh) {
161                                 /* If middle bh fails, let previous bh
162                                  * finish its read and then put it to
163                                  * aovoid bh leak
164                                  */
165                                 if (!buffer_jbd(bh))
166                                         wait_on_buffer(bh);
167                                 put_bh(bh);
168                                 bhs[i - 1] = NULL;
169                         } else if (bh && buffer_uptodate(bh)) {
170                                 clear_buffer_uptodate(bh);
171                         }
172                         continue;
173                 }
174
175                 /* No need to wait on the buffer if it's managed by JBD. */
176                 if (!buffer_jbd(bh))
177                         wait_on_buffer(bh);
178
179                 if (!buffer_uptodate(bh)) {
180                         /* Status won't be cleared from here on out,
181                          * so we can safely record this and loop back
182                          * to cleanup the other buffers. */
183                         status = -EIO;
184                         goto read_failure;
185                 }
186         }
187
188 bail:
189         return status;
190 }
191
192 /* Caller must provide a bhs[] with all NULL or non-NULL entries, so it
193  * will be easier to handle read failure.
194  */
195 int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr,
196                       struct buffer_head *bhs[], int flags,
197                       int (*validate)(struct super_block *sb,
198                                       struct buffer_head *bh))
199 {
200         int status = 0;
201         int i, ignore_cache = 0;
202         struct buffer_head *bh;
203         struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
204         int new_bh = 0;
205
206         trace_ocfs2_read_blocks_begin(ci, (unsigned long long)block, nr, flags);
207
208         BUG_ON(!ci);
209         BUG_ON((flags & OCFS2_BH_READAHEAD) &&
210                (flags & OCFS2_BH_IGNORE_CACHE));
211
212         if (bhs == NULL) {
213                 status = -EINVAL;
214                 mlog_errno(status);
215                 goto bail;
216         }
217
218         if (nr < 0) {
219                 mlog(ML_ERROR, "asked to read %d blocks!\n", nr);
220                 status = -EINVAL;
221                 mlog_errno(status);
222                 goto bail;
223         }
224
225         if (nr == 0) {
226                 status = 0;
227                 goto bail;
228         }
229
230         /* Don't put buffer head and re-assign it to NULL if it is allocated
231          * outside since the caller can't be aware of this alternation!
232          */
233         new_bh = (bhs[0] == NULL);
234
235         ocfs2_metadata_cache_io_lock(ci);
236         for (i = 0 ; i < nr ; i++) {
237                 if (bhs[i] == NULL) {
238                         bhs[i] = sb_getblk(sb, block++);
239                         if (bhs[i] == NULL) {
240                                 ocfs2_metadata_cache_io_unlock(ci);
241                                 status = -ENOMEM;
242                                 mlog_errno(status);
243                                 /* Don't forget to put previous bh! */
244                                 break;
245                         }
246                 }
247                 bh = bhs[i];
248                 ignore_cache = (flags & OCFS2_BH_IGNORE_CACHE);
249
250                 /* There are three read-ahead cases here which we need to
251                  * be concerned with. All three assume a buffer has
252                  * previously been submitted with OCFS2_BH_READAHEAD
253                  * and it hasn't yet completed I/O.
254                  *
255                  * 1) The current request is sync to disk. This rarely
256                  *    happens these days, and never when performance
257                  *    matters - the code can just wait on the buffer
258                  *    lock and re-submit.
259                  *
260                  * 2) The current request is cached, but not
261                  *    readahead. ocfs2_buffer_uptodate() will return
262                  *    false anyway, so we'll wind up waiting on the
263                  *    buffer lock to do I/O. We re-check the request
264                  *    with after getting the lock to avoid a re-submit.
265                  *
266                  * 3) The current request is readahead (and so must
267                  *    also be a caching one). We short circuit if the
268                  *    buffer is locked (under I/O) and if it's in the
269                  *    uptodate cache. The re-check from #2 catches the
270                  *    case that the previous read-ahead completes just
271                  *    before our is-it-in-flight check.
272                  */
273
274                 if (!ignore_cache && !ocfs2_buffer_uptodate(ci, bh)) {
275                         trace_ocfs2_read_blocks_from_disk(
276                              (unsigned long long)bh->b_blocknr,
277                              (unsigned long long)ocfs2_metadata_cache_owner(ci));
278                         /* We're using ignore_cache here to say
279                          * "go to disk" */
280                         ignore_cache = 1;
281                 }
282
283                 trace_ocfs2_read_blocks_bh((unsigned long long)bh->b_blocknr,
284                         ignore_cache, buffer_jbd(bh), buffer_dirty(bh));
285
286                 if (buffer_jbd(bh)) {
287                         continue;
288                 }
289
290                 if (ignore_cache) {
291                         if (buffer_dirty(bh)) {
292                                 /* This should probably be a BUG, or
293                                  * at least return an error. */
294                                 continue;
295                         }
296
297                         /* A read-ahead request was made - if the
298                          * buffer is already under read-ahead from a
299                          * previously submitted request than we are
300                          * done here. */
301                         if ((flags & OCFS2_BH_READAHEAD)
302                             && ocfs2_buffer_read_ahead(ci, bh))
303                                 continue;
304
305                         lock_buffer(bh);
306                         if (buffer_jbd(bh)) {
307 #ifdef CATCH_BH_JBD_RACES
308                                 mlog(ML_ERROR, "block %llu had the JBD bit set "
309                                                "while I was in lock_buffer!",
310                                      (unsigned long long)bh->b_blocknr);
311                                 BUG();
312 #else
313                                 unlock_buffer(bh);
314                                 continue;
315 #endif
316                         }
317
318                         /* Re-check ocfs2_buffer_uptodate() as a
319                          * previously read-ahead buffer may have
320                          * completed I/O while we were waiting for the
321                          * buffer lock. */
322                         if (!(flags & OCFS2_BH_IGNORE_CACHE)
323                             && !(flags & OCFS2_BH_READAHEAD)
324                             && ocfs2_buffer_uptodate(ci, bh)) {
325                                 unlock_buffer(bh);
326                                 continue;
327                         }
328
329                         get_bh(bh); /* for end_buffer_read_sync() */
330                         if (validate)
331                                 set_buffer_needs_validate(bh);
332                         bh->b_end_io = end_buffer_read_sync;
333                         submit_bh(REQ_OP_READ, 0, bh);
334                         continue;
335                 }
336         }
337
338 read_failure:
339         for (i = (nr - 1); i >= 0; i--) {
340                 bh = bhs[i];
341
342                 if (!(flags & OCFS2_BH_READAHEAD)) {
343                         if (unlikely(status)) {
344                                 /* Clear the buffers on error including those
345                                  * ever succeeded in reading
346                                  */
347                                 if (new_bh && bh) {
348                                         /* If middle bh fails, let previous bh
349                                          * finish its read and then put it to
350                                          * aovoid bh leak
351                                          */
352                                         if (!buffer_jbd(bh))
353                                                 wait_on_buffer(bh);
354                                         put_bh(bh);
355                                         bhs[i] = NULL;
356                                 } else if (bh && buffer_uptodate(bh)) {
357                                         clear_buffer_uptodate(bh);
358                                 }
359                                 continue;
360                         }
361                         /* We know this can't have changed as we hold the
362                          * owner sem. Avoid doing any work on the bh if the
363                          * journal has it. */
364                         if (!buffer_jbd(bh))
365                                 wait_on_buffer(bh);
366
367                         if (!buffer_uptodate(bh)) {
368                                 /* Status won't be cleared from here on out,
369                                  * so we can safely record this and loop back
370                                  * to cleanup the other buffers. Don't need to
371                                  * remove the clustered uptodate information
372                                  * for this bh as it's not marked locally
373                                  * uptodate. */
374                                 status = -EIO;
375                                 clear_buffer_needs_validate(bh);
376                                 goto read_failure;
377                         }
378
379                         if (buffer_needs_validate(bh)) {
380                                 /* We never set NeedsValidate if the
381                                  * buffer was held by the journal, so
382                                  * that better not have changed */
383                                 BUG_ON(buffer_jbd(bh));
384                                 clear_buffer_needs_validate(bh);
385                                 status = validate(sb, bh);
386                                 if (status)
387                                         goto read_failure;
388                         }
389                 }
390
391                 /* Always set the buffer in the cache, even if it was
392                  * a forced read, or read-ahead which hasn't yet
393                  * completed. */
394                 ocfs2_set_buffer_uptodate(ci, bh);
395         }
396         ocfs2_metadata_cache_io_unlock(ci);
397
398         trace_ocfs2_read_blocks_end((unsigned long long)block, nr,
399                                     flags, ignore_cache);
400
401 bail:
402
403         return status;
404 }
405
406 /* Check whether the blkno is the super block or one of the backups. */
407 static void ocfs2_check_super_or_backup(struct super_block *sb,
408                                         sector_t blkno)
409 {
410         int i;
411         u64 backup_blkno;
412
413         if (blkno == OCFS2_SUPER_BLOCK_BLKNO)
414                 return;
415
416         for (i = 0; i < OCFS2_MAX_BACKUP_SUPERBLOCKS; i++) {
417                 backup_blkno = ocfs2_backup_super_blkno(sb, i);
418                 if (backup_blkno == blkno)
419                         return;
420         }
421
422         BUG();
423 }
424
425 /*
426  * Write super block and backups doesn't need to collaborate with journal,
427  * so we don't need to lock ip_io_mutex and ci doesn't need to bea passed
428  * into this function.
429  */
430 int ocfs2_write_super_or_backup(struct ocfs2_super *osb,
431                                 struct buffer_head *bh)
432 {
433         int ret = 0;
434         struct ocfs2_dinode *di = (struct ocfs2_dinode *)bh->b_data;
435
436         BUG_ON(buffer_jbd(bh));
437         ocfs2_check_super_or_backup(osb->sb, bh->b_blocknr);
438
439         if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb)) {
440                 ret = -EROFS;
441                 mlog_errno(ret);
442                 goto out;
443         }
444
445         lock_buffer(bh);
446         set_buffer_uptodate(bh);
447
448         /* remove from dirty list before I/O. */
449         clear_buffer_dirty(bh);
450
451         get_bh(bh); /* for end_buffer_write_sync() */
452         bh->b_end_io = end_buffer_write_sync;
453         ocfs2_compute_meta_ecc(osb->sb, bh->b_data, &di->i_check);
454         submit_bh(REQ_OP_WRITE, 0, bh);
455
456         wait_on_buffer(bh);
457
458         if (!buffer_uptodate(bh)) {
459                 ret = -EIO;
460                 mlog_errno(ret);
461         }
462
463 out:
464         return ret;
465 }