1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
4 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
7 #include <linux/spinlock.h>
8 #include <linux/completion.h>
9 #include <linux/buffer_head.h>
10 #include <linux/blkdev.h>
11 #include <linux/gfs2_ondisk.h>
12 #include <linux/crc32.h>
13 #include <linux/iomap.h>
14 #include <linux/ktime.h>
30 #include "trace_gfs2.h"
32 /* This doesn't need to be that large as max 64 bit pointers in a 4k
33 * block is 512, so __u16 is fine for that. It saves stack space to
37 struct buffer_head *mp_bh[GFS2_MAX_META_HEIGHT];
38 __u16 mp_list[GFS2_MAX_META_HEIGHT];
39 int mp_fheight; /* find_metapath height */
40 int mp_aheight; /* actual height (lookup height) */
43 static int punch_hole(struct gfs2_inode *ip, u64 offset, u64 length);
46 * gfs2_unstuffer_page - unstuff a stuffed inode into a block cached by a page
48 * @dibh: the dinode buffer
49 * @block: the block number that was allocated
50 * @page: The (optional) page. This is looked up if @page is NULL
55 static int gfs2_unstuffer_page(struct gfs2_inode *ip, struct buffer_head *dibh,
56 u64 block, struct page *page)
58 struct inode *inode = &ip->i_inode;
61 if (!page || page->index) {
62 page = find_or_create_page(inode->i_mapping, 0, GFP_NOFS);
68 if (!PageUptodate(page)) {
69 void *kaddr = kmap(page);
70 u64 dsize = i_size_read(inode);
72 if (dsize > gfs2_max_stuffed_size(ip))
73 dsize = gfs2_max_stuffed_size(ip);
75 memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize);
76 memset(kaddr + dsize, 0, PAGE_SIZE - dsize);
79 SetPageUptodate(page);
82 if (gfs2_is_jdata(ip)) {
83 struct buffer_head *bh;
85 if (!page_has_buffers(page))
86 create_empty_buffers(page, BIT(inode->i_blkbits),
89 bh = page_buffers(page);
90 if (!buffer_mapped(bh))
91 map_bh(bh, inode->i_sb, block);
93 set_buffer_uptodate(bh);
94 gfs2_trans_add_data(ip->i_gl, bh);
97 gfs2_ordered_add_inode(ip);
109 * gfs2_unstuff_dinode - Unstuff a dinode when the data has grown too big
110 * @ip: The GFS2 inode to unstuff
111 * @page: The (optional) page. This is looked up if the @page is NULL
113 * This routine unstuffs a dinode and returns it to a "normal" state such
114 * that the height can be grown in the traditional way.
119 int gfs2_unstuff_dinode(struct gfs2_inode *ip, struct page *page)
121 struct buffer_head *bh, *dibh;
122 struct gfs2_dinode *di;
124 int isdir = gfs2_is_dir(ip);
127 down_write(&ip->i_rw_mutex);
129 error = gfs2_meta_inode_buffer(ip, &dibh);
133 if (i_size_read(&ip->i_inode)) {
134 /* Get a free block, fill it with the stuffed data,
135 and write it out to disk */
138 error = gfs2_alloc_blocks(ip, &block, &n, 0, NULL);
142 gfs2_trans_remove_revoke(GFS2_SB(&ip->i_inode), block, 1);
143 error = gfs2_dir_get_new_buffer(ip, block, &bh);
146 gfs2_buffer_copy_tail(bh, sizeof(struct gfs2_meta_header),
147 dibh, sizeof(struct gfs2_dinode));
150 error = gfs2_unstuffer_page(ip, dibh, block, page);
156 /* Set up the pointer to the new block */
158 gfs2_trans_add_meta(ip->i_gl, dibh);
159 di = (struct gfs2_dinode *)dibh->b_data;
160 gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode));
162 if (i_size_read(&ip->i_inode)) {
163 *(__be64 *)(di + 1) = cpu_to_be64(block);
164 gfs2_add_inode_blocks(&ip->i_inode, 1);
165 di->di_blocks = cpu_to_be64(gfs2_get_inode_blocks(&ip->i_inode));
169 di->di_height = cpu_to_be16(1);
174 up_write(&ip->i_rw_mutex);
180 * find_metapath - Find path through the metadata tree
181 * @sdp: The superblock
182 * @block: The disk block to look up
183 * @mp: The metapath to return the result in
184 * @height: The pre-calculated height of the metadata tree
186 * This routine returns a struct metapath structure that defines a path
187 * through the metadata of inode "ip" to get to block "block".
190 * Given: "ip" is a height 3 file, "offset" is 101342453, and this is a
191 * filesystem with a blocksize of 4096.
193 * find_metapath() would return a struct metapath structure set to:
194 * mp_fheight = 3, mp_list[0] = 0, mp_list[1] = 48, and mp_list[2] = 165.
196 * That means that in order to get to the block containing the byte at
197 * offset 101342453, we would load the indirect block pointed to by pointer
198 * 0 in the dinode. We would then load the indirect block pointed to by
199 * pointer 48 in that indirect block. We would then load the data block
200 * pointed to by pointer 165 in that indirect block.
202 * ----------------------------------------
207 * ----------------------------------------
211 * ----------------------------------------
215 * |0 5 6 7 8 9 0 1 2|
216 * ----------------------------------------
220 * ----------------------------------------
225 * ----------------------------------------
229 * ----------------------------------------
230 * | Data block containing offset |
234 * ----------------------------------------
238 static void find_metapath(const struct gfs2_sbd *sdp, u64 block,
239 struct metapath *mp, unsigned int height)
243 mp->mp_fheight = height;
244 for (i = height; i--;)
245 mp->mp_list[i] = do_div(block, sdp->sd_inptrs);
248 static inline unsigned int metapath_branch_start(const struct metapath *mp)
250 if (mp->mp_list[0] == 0)
256 * metaptr1 - Return the first possible metadata pointer in a metapath buffer
257 * @height: The metadata height (0 = dinode)
260 static inline __be64 *metaptr1(unsigned int height, const struct metapath *mp)
262 struct buffer_head *bh = mp->mp_bh[height];
264 return ((__be64 *)(bh->b_data + sizeof(struct gfs2_dinode)));
265 return ((__be64 *)(bh->b_data + sizeof(struct gfs2_meta_header)));
269 * metapointer - Return pointer to start of metadata in a buffer
270 * @height: The metadata height (0 = dinode)
273 * Return a pointer to the block number of the next height of the metadata
274 * tree given a buffer containing the pointer to the current height of the
278 static inline __be64 *metapointer(unsigned int height, const struct metapath *mp)
280 __be64 *p = metaptr1(height, mp);
281 return p + mp->mp_list[height];
284 static inline const __be64 *metaend(unsigned int height, const struct metapath *mp)
286 const struct buffer_head *bh = mp->mp_bh[height];
287 return (const __be64 *)(bh->b_data + bh->b_size);
290 static void clone_metapath(struct metapath *clone, struct metapath *mp)
295 for (hgt = 0; hgt < mp->mp_aheight; hgt++)
296 get_bh(clone->mp_bh[hgt]);
299 static void gfs2_metapath_ra(struct gfs2_glock *gl, __be64 *start, __be64 *end)
303 for (t = start; t < end; t++) {
304 struct buffer_head *rabh;
309 rabh = gfs2_getbuf(gl, be64_to_cpu(*t), CREATE);
310 if (trylock_buffer(rabh)) {
311 if (!buffer_uptodate(rabh)) {
312 rabh->b_end_io = end_buffer_read_sync;
313 submit_bh(REQ_OP_READ,
314 REQ_RAHEAD | REQ_META | REQ_PRIO,
324 static int __fillup_metapath(struct gfs2_inode *ip, struct metapath *mp,
325 unsigned int x, unsigned int h)
328 __be64 *ptr = metapointer(x, mp);
329 u64 dblock = be64_to_cpu(*ptr);
334 ret = gfs2_meta_indirect_buffer(ip, x + 1, dblock, &mp->mp_bh[x + 1]);
338 mp->mp_aheight = x + 1;
343 * lookup_metapath - Walk the metadata tree to a specific point
347 * Assumes that the inode's buffer has already been looked up and
348 * hooked onto mp->mp_bh[0] and that the metapath has been initialised
349 * by find_metapath().
351 * If this function encounters part of the tree which has not been
352 * allocated, it returns the current height of the tree at the point
353 * at which it found the unallocated block. Blocks which are found are
354 * added to the mp->mp_bh[] list.
359 static int lookup_metapath(struct gfs2_inode *ip, struct metapath *mp)
361 return __fillup_metapath(ip, mp, 0, ip->i_height - 1);
365 * fillup_metapath - fill up buffers for the metadata path to a specific height
368 * @h: The height to which it should be mapped
370 * Similar to lookup_metapath, but does lookups for a range of heights
372 * Returns: error or the number of buffers filled
375 static int fillup_metapath(struct gfs2_inode *ip, struct metapath *mp, int h)
381 /* find the first buffer we need to look up. */
382 for (x = h - 1; x > 0; x--) {
387 ret = __fillup_metapath(ip, mp, x, h);
390 return mp->mp_aheight - x - 1;
393 static sector_t metapath_to_block(struct gfs2_sbd *sdp, struct metapath *mp)
395 sector_t factor = 1, block = 0;
398 for (hgt = mp->mp_fheight - 1; hgt >= 0; hgt--) {
399 if (hgt < mp->mp_aheight)
400 block += mp->mp_list[hgt] * factor;
401 factor *= sdp->sd_inptrs;
406 static void release_metapath(struct metapath *mp)
410 for (i = 0; i < GFS2_MAX_META_HEIGHT; i++) {
411 if (mp->mp_bh[i] == NULL)
413 brelse(mp->mp_bh[i]);
419 * gfs2_extent_length - Returns length of an extent of blocks
420 * @bh: The metadata block
421 * @ptr: Current position in @bh
422 * @limit: Max extent length to return
423 * @eob: Set to 1 if we hit "end of block"
425 * Returns: The length of the extent (minimum of one block)
428 static inline unsigned int gfs2_extent_length(struct buffer_head *bh, __be64 *ptr, size_t limit, int *eob)
430 const __be64 *end = (__be64 *)(bh->b_data + bh->b_size);
431 const __be64 *first = ptr;
432 u64 d = be64_to_cpu(*ptr);
440 } while(be64_to_cpu(*ptr) == d);
446 enum walker_status { WALK_STOP, WALK_FOLLOW, WALK_CONTINUE };
449 * gfs2_metadata_walker - walk an indirect block
450 * @mp: Metapath to indirect block
451 * @ptrs: Number of pointers to look at
453 * When returning WALK_FOLLOW, the walker must update @mp to point at the right
454 * indirect block to follow.
456 typedef enum walker_status (*gfs2_metadata_walker)(struct metapath *mp,
460 * gfs2_walk_metadata - walk a tree of indirect blocks
462 * @mp: Starting point of walk
463 * @max_len: Maximum number of blocks to walk
464 * @walker: Called during the walk
466 * Returns 1 if the walk was stopped by @walker, 0 if we went past @max_len or
467 * past the end of metadata, and a negative error code otherwise.
470 static int gfs2_walk_metadata(struct inode *inode, struct metapath *mp,
471 u64 max_len, gfs2_metadata_walker walker)
473 struct gfs2_inode *ip = GFS2_I(inode);
474 struct gfs2_sbd *sdp = GFS2_SB(inode);
480 * The walk starts in the lowest allocated indirect block, which may be
481 * before the position indicated by @mp. Adjust @max_len accordingly
482 * to avoid a short walk.
484 for (hgt = mp->mp_fheight - 1; hgt >= mp->mp_aheight; hgt--) {
485 max_len += mp->mp_list[hgt] * factor;
486 mp->mp_list[hgt] = 0;
487 factor *= sdp->sd_inptrs;
491 u16 start = mp->mp_list[hgt];
492 enum walker_status status;
496 /* Walk indirect block. */
497 ptrs = (hgt >= 1 ? sdp->sd_inptrs : sdp->sd_diptrs) - start;
500 ptrs = DIV_ROUND_UP_ULL(max_len, factor);
501 status = walker(mp, ptrs);
506 BUG_ON(mp->mp_aheight == mp->mp_fheight);
507 ptrs = mp->mp_list[hgt] - start;
516 if (status == WALK_FOLLOW)
517 goto fill_up_metapath;
520 /* Decrease height of metapath. */
521 brelse(mp->mp_bh[hgt]);
522 mp->mp_bh[hgt] = NULL;
523 mp->mp_list[hgt] = 0;
527 factor *= sdp->sd_inptrs;
529 /* Advance in metadata tree. */
530 (mp->mp_list[hgt])++;
532 if (mp->mp_list[hgt] >= sdp->sd_inptrs)
535 if (mp->mp_list[hgt] >= sdp->sd_diptrs)
540 /* Increase height of metapath. */
541 ret = fillup_metapath(ip, mp, ip->i_height - 1);
546 do_div(factor, sdp->sd_inptrs);
547 mp->mp_aheight = hgt + 1;
552 static enum walker_status gfs2_hole_walker(struct metapath *mp,
555 const __be64 *start, *ptr, *end;
558 hgt = mp->mp_aheight - 1;
559 start = metapointer(hgt, mp);
562 for (ptr = start; ptr < end; ptr++) {
564 mp->mp_list[hgt] += ptr - start;
565 if (mp->mp_aheight == mp->mp_fheight)
570 return WALK_CONTINUE;
574 * gfs2_hole_size - figure out the size of a hole
576 * @lblock: The logical starting block number
577 * @len: How far to look (in blocks)
578 * @mp: The metapath at lblock
579 * @iomap: The iomap to store the hole size in
581 * This function modifies @mp.
583 * Returns: errno on error
585 static int gfs2_hole_size(struct inode *inode, sector_t lblock, u64 len,
586 struct metapath *mp, struct iomap *iomap)
588 struct metapath clone;
592 clone_metapath(&clone, mp);
593 ret = gfs2_walk_metadata(inode, &clone, len, gfs2_hole_walker);
598 hole_size = metapath_to_block(GFS2_SB(inode), &clone) - lblock;
601 iomap->length = hole_size << inode->i_blkbits;
605 release_metapath(&clone);
609 static inline __be64 *gfs2_indirect_init(struct metapath *mp,
610 struct gfs2_glock *gl, unsigned int i,
611 unsigned offset, u64 bn)
613 __be64 *ptr = (__be64 *)(mp->mp_bh[i - 1]->b_data +
614 ((i > 1) ? sizeof(struct gfs2_meta_header) :
615 sizeof(struct gfs2_dinode)));
617 BUG_ON(mp->mp_bh[i] != NULL);
618 mp->mp_bh[i] = gfs2_meta_new(gl, bn);
619 gfs2_trans_add_meta(gl, mp->mp_bh[i]);
620 gfs2_metatype_set(mp->mp_bh[i], GFS2_METATYPE_IN, GFS2_FORMAT_IN);
621 gfs2_buffer_clear_tail(mp->mp_bh[i], sizeof(struct gfs2_meta_header));
623 *ptr = cpu_to_be64(bn);
629 ALLOC_GROW_DEPTH = 1,
630 ALLOC_GROW_HEIGHT = 2,
631 /* ALLOC_UNSTUFF = 3, TBD and rather complicated */
635 * gfs2_iomap_alloc - Build a metadata tree of the requested height
636 * @inode: The GFS2 inode
637 * @iomap: The iomap structure
638 * @mp: The metapath, with proper height information calculated
640 * In this routine we may have to alloc:
641 * i) Indirect blocks to grow the metadata tree height
642 * ii) Indirect blocks to fill in lower part of the metadata tree
645 * This function is called after gfs2_iomap_get, which works out the
646 * total number of blocks which we need via gfs2_alloc_size.
648 * We then do the actual allocation asking for an extent at a time (if
649 * enough contiguous free blocks are available, there will only be one
650 * allocation request per call) and uses the state machine to initialise
651 * the blocks in order.
653 * Right now, this function will allocate at most one indirect block
654 * worth of data -- with a default block size of 4K, that's slightly
655 * less than 2M. If this limitation is ever removed to allow huge
656 * allocations, we would probably still want to limit the iomap size we
657 * return to avoid stalling other tasks during huge writes; the next
658 * iomap iteration would then find the blocks already allocated.
660 * Returns: errno on error
663 static int gfs2_iomap_alloc(struct inode *inode, struct iomap *iomap,
666 struct gfs2_inode *ip = GFS2_I(inode);
667 struct gfs2_sbd *sdp = GFS2_SB(inode);
668 struct buffer_head *dibh = mp->mp_bh[0];
670 unsigned n, i, blks, alloced = 0, iblks = 0, branch_start = 0;
671 size_t dblks = iomap->length >> inode->i_blkbits;
672 const unsigned end_of_metadata = mp->mp_fheight - 1;
674 enum alloc_state state;
678 BUG_ON(mp->mp_aheight < 1);
679 BUG_ON(dibh == NULL);
682 gfs2_trans_add_meta(ip->i_gl, dibh);
684 down_write(&ip->i_rw_mutex);
686 if (mp->mp_fheight == mp->mp_aheight) {
687 /* Bottom indirect block exists */
690 /* Need to allocate indirect blocks */
691 if (mp->mp_fheight == ip->i_height) {
692 /* Writing into existing tree, extend tree down */
693 iblks = mp->mp_fheight - mp->mp_aheight;
694 state = ALLOC_GROW_DEPTH;
696 /* Building up tree height */
697 state = ALLOC_GROW_HEIGHT;
698 iblks = mp->mp_fheight - ip->i_height;
699 branch_start = metapath_branch_start(mp);
700 iblks += (mp->mp_fheight - branch_start);
704 /* start of the second part of the function (state machine) */
706 blks = dblks + iblks;
710 ret = gfs2_alloc_blocks(ip, &bn, &n, 0, NULL);
714 if (state != ALLOC_DATA || gfs2_is_jdata(ip))
715 gfs2_trans_remove_revoke(sdp, bn, n);
717 /* Growing height of tree */
718 case ALLOC_GROW_HEIGHT:
720 ptr = (__be64 *)(dibh->b_data +
721 sizeof(struct gfs2_dinode));
724 for (; i - 1 < mp->mp_fheight - ip->i_height && n > 0;
726 gfs2_indirect_init(mp, ip->i_gl, i, 0, bn++);
727 if (i - 1 == mp->mp_fheight - ip->i_height) {
729 gfs2_buffer_copy_tail(mp->mp_bh[i],
730 sizeof(struct gfs2_meta_header),
731 dibh, sizeof(struct gfs2_dinode));
732 gfs2_buffer_clear_tail(dibh,
733 sizeof(struct gfs2_dinode) +
735 ptr = (__be64 *)(mp->mp_bh[i]->b_data +
736 sizeof(struct gfs2_meta_header));
738 state = ALLOC_GROW_DEPTH;
739 for(i = branch_start; i < mp->mp_fheight; i++) {
740 if (mp->mp_bh[i] == NULL)
742 brelse(mp->mp_bh[i]);
749 fallthrough; /* To branching from existing tree */
750 case ALLOC_GROW_DEPTH:
751 if (i > 1 && i < mp->mp_fheight)
752 gfs2_trans_add_meta(ip->i_gl, mp->mp_bh[i-1]);
753 for (; i < mp->mp_fheight && n > 0; i++, n--)
754 gfs2_indirect_init(mp, ip->i_gl, i,
755 mp->mp_list[i-1], bn++);
756 if (i == mp->mp_fheight)
760 fallthrough; /* To tree complete, adding data blocks */
763 BUG_ON(mp->mp_bh[end_of_metadata] == NULL);
764 gfs2_trans_add_meta(ip->i_gl, mp->mp_bh[end_of_metadata]);
766 ptr = metapointer(end_of_metadata, mp);
767 iomap->addr = bn << inode->i_blkbits;
768 iomap->flags |= IOMAP_F_MERGED | IOMAP_F_NEW;
770 *ptr++ = cpu_to_be64(bn++);
773 } while (iomap->addr == IOMAP_NULL_ADDR);
775 iomap->type = IOMAP_MAPPED;
776 iomap->length = (u64)dblks << inode->i_blkbits;
777 ip->i_height = mp->mp_fheight;
778 gfs2_add_inode_blocks(&ip->i_inode, alloced);
779 gfs2_dinode_out(ip, dibh->b_data);
781 up_write(&ip->i_rw_mutex);
785 #define IOMAP_F_GFS2_BOUNDARY IOMAP_F_PRIVATE
788 * gfs2_alloc_size - Compute the maximum allocation size
791 * @size: Requested size in blocks
793 * Compute the maximum size of the next allocation at @mp.
795 * Returns: size in blocks
797 static u64 gfs2_alloc_size(struct inode *inode, struct metapath *mp, u64 size)
799 struct gfs2_inode *ip = GFS2_I(inode);
800 struct gfs2_sbd *sdp = GFS2_SB(inode);
801 const __be64 *first, *ptr, *end;
804 * For writes to stuffed files, this function is called twice via
805 * gfs2_iomap_get, before and after unstuffing. The size we return the
806 * first time needs to be large enough to get the reservation and
807 * allocation sizes right. The size we return the second time must
808 * be exact or else gfs2_iomap_alloc won't do the right thing.
811 if (gfs2_is_stuffed(ip) || mp->mp_fheight != mp->mp_aheight) {
812 unsigned int maxsize = mp->mp_fheight > 1 ?
813 sdp->sd_inptrs : sdp->sd_diptrs;
814 maxsize -= mp->mp_list[mp->mp_fheight - 1];
820 first = metapointer(ip->i_height - 1, mp);
821 end = metaend(ip->i_height - 1, mp);
822 if (end - first > size)
824 for (ptr = first; ptr < end; ptr++) {
832 * gfs2_iomap_get - Map blocks from an inode to disk blocks
834 * @pos: Starting position in bytes
835 * @length: Length to map, in bytes
836 * @flags: iomap flags
837 * @iomap: The iomap structure
842 static int gfs2_iomap_get(struct inode *inode, loff_t pos, loff_t length,
843 unsigned flags, struct iomap *iomap,
846 struct gfs2_inode *ip = GFS2_I(inode);
847 struct gfs2_sbd *sdp = GFS2_SB(inode);
848 loff_t size = i_size_read(inode);
851 sector_t lblock_stop;
855 struct buffer_head *dibh = NULL, *bh;
861 down_read(&ip->i_rw_mutex);
863 ret = gfs2_meta_inode_buffer(ip, &dibh);
868 if (gfs2_is_stuffed(ip)) {
869 if (flags & IOMAP_WRITE) {
870 loff_t max_size = gfs2_max_stuffed_size(ip);
872 if (pos + length > max_size)
874 iomap->length = max_size;
877 if (flags & IOMAP_REPORT) {
882 iomap->length = length;
886 iomap->length = size;
888 iomap->addr = (ip->i_no_addr << inode->i_blkbits) +
889 sizeof(struct gfs2_dinode);
890 iomap->type = IOMAP_INLINE;
891 iomap->inline_data = dibh->b_data + sizeof(struct gfs2_dinode);
896 lblock = pos >> inode->i_blkbits;
897 iomap->offset = lblock << inode->i_blkbits;
898 lblock_stop = (pos + length - 1) >> inode->i_blkbits;
899 len = lblock_stop - lblock + 1;
900 iomap->length = len << inode->i_blkbits;
902 height = ip->i_height;
903 while ((lblock + 1) * sdp->sd_sb.sb_bsize > sdp->sd_heightsize[height])
905 find_metapath(sdp, lblock, mp, height);
906 if (height > ip->i_height || gfs2_is_stuffed(ip))
909 ret = lookup_metapath(ip, mp);
913 if (mp->mp_aheight != ip->i_height)
916 ptr = metapointer(ip->i_height - 1, mp);
920 bh = mp->mp_bh[ip->i_height - 1];
921 len = gfs2_extent_length(bh, ptr, len, &eob);
923 iomap->addr = be64_to_cpu(*ptr) << inode->i_blkbits;
924 iomap->length = len << inode->i_blkbits;
925 iomap->type = IOMAP_MAPPED;
926 iomap->flags |= IOMAP_F_MERGED;
928 iomap->flags |= IOMAP_F_GFS2_BOUNDARY;
931 iomap->bdev = inode->i_sb->s_bdev;
933 up_read(&ip->i_rw_mutex);
937 if (flags & IOMAP_REPORT) {
940 else if (height == ip->i_height)
941 ret = gfs2_hole_size(inode, lblock, len, mp, iomap);
943 iomap->length = size - pos;
944 } else if (flags & IOMAP_WRITE) {
947 if (flags & IOMAP_DIRECT)
948 goto out; /* (see gfs2_file_direct_write) */
950 len = gfs2_alloc_size(inode, mp, len);
951 alloc_size = len << inode->i_blkbits;
952 if (alloc_size < iomap->length)
953 iomap->length = alloc_size;
955 if (pos < size && height == ip->i_height)
956 ret = gfs2_hole_size(inode, lblock, len, mp, iomap);
959 iomap->addr = IOMAP_NULL_ADDR;
960 iomap->type = IOMAP_HOLE;
965 * gfs2_lblk_to_dblk - convert logical block to disk block
966 * @inode: the inode of the file we're mapping
967 * @lblock: the block relative to the start of the file
968 * @dblock: the returned dblock, if no error
970 * This function maps a single block from a file logical block (relative to
971 * the start of the file) to a file system absolute block using iomap.
973 * Returns: the absolute file system block, or an error
975 int gfs2_lblk_to_dblk(struct inode *inode, u32 lblock, u64 *dblock)
977 struct iomap iomap = { };
978 struct metapath mp = { .mp_aheight = 1, };
979 loff_t pos = (loff_t)lblock << inode->i_blkbits;
982 ret = gfs2_iomap_get(inode, pos, i_blocksize(inode), 0, &iomap, &mp);
983 release_metapath(&mp);
985 *dblock = iomap.addr >> inode->i_blkbits;
990 static int gfs2_write_lock(struct inode *inode)
992 struct gfs2_inode *ip = GFS2_I(inode);
993 struct gfs2_sbd *sdp = GFS2_SB(inode);
996 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &ip->i_gh);
997 error = gfs2_glock_nq(&ip->i_gh);
1000 if (&ip->i_inode == sdp->sd_rindex) {
1001 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
1003 error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE,
1004 GL_NOCACHE, &m_ip->i_gh);
1011 gfs2_glock_dq(&ip->i_gh);
1013 gfs2_holder_uninit(&ip->i_gh);
1017 static void gfs2_write_unlock(struct inode *inode)
1019 struct gfs2_inode *ip = GFS2_I(inode);
1020 struct gfs2_sbd *sdp = GFS2_SB(inode);
1022 if (&ip->i_inode == sdp->sd_rindex) {
1023 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
1025 gfs2_glock_dq_uninit(&m_ip->i_gh);
1027 gfs2_glock_dq_uninit(&ip->i_gh);
1030 static int gfs2_iomap_page_prepare(struct inode *inode, loff_t pos,
1031 unsigned len, struct iomap *iomap)
1033 unsigned int blockmask = i_blocksize(inode) - 1;
1034 struct gfs2_sbd *sdp = GFS2_SB(inode);
1035 unsigned int blocks;
1037 blocks = ((pos & blockmask) + len + blockmask) >> inode->i_blkbits;
1038 return gfs2_trans_begin(sdp, RES_DINODE + blocks, 0);
1041 static void gfs2_iomap_page_done(struct inode *inode, loff_t pos,
1042 unsigned copied, struct page *page,
1043 struct iomap *iomap)
1045 struct gfs2_trans *tr = current->journal_info;
1046 struct gfs2_inode *ip = GFS2_I(inode);
1047 struct gfs2_sbd *sdp = GFS2_SB(inode);
1049 if (page && !gfs2_is_stuffed(ip))
1050 gfs2_page_add_databufs(ip, page, offset_in_page(pos), copied);
1052 if (tr->tr_num_buf_new)
1053 __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
1055 gfs2_trans_end(sdp);
1058 static const struct iomap_page_ops gfs2_iomap_page_ops = {
1059 .page_prepare = gfs2_iomap_page_prepare,
1060 .page_done = gfs2_iomap_page_done,
1063 static int gfs2_iomap_begin_write(struct inode *inode, loff_t pos,
1064 loff_t length, unsigned flags,
1065 struct iomap *iomap,
1066 struct metapath *mp)
1068 struct gfs2_inode *ip = GFS2_I(inode);
1069 struct gfs2_sbd *sdp = GFS2_SB(inode);
1073 unstuff = gfs2_is_stuffed(ip) &&
1074 pos + length > gfs2_max_stuffed_size(ip);
1076 if (unstuff || iomap->type == IOMAP_HOLE) {
1077 unsigned int data_blocks, ind_blocks;
1078 struct gfs2_alloc_parms ap = {};
1079 unsigned int rblocks;
1080 struct gfs2_trans *tr;
1082 gfs2_write_calc_reserv(ip, iomap->length, &data_blocks,
1084 ap.target = data_blocks + ind_blocks;
1085 ret = gfs2_quota_lock_check(ip, &ap);
1089 ret = gfs2_inplace_reserve(ip, &ap);
1093 rblocks = RES_DINODE + ind_blocks;
1094 if (gfs2_is_jdata(ip))
1095 rblocks += data_blocks;
1096 if (ind_blocks || data_blocks)
1097 rblocks += RES_STATFS + RES_QUOTA;
1098 if (inode == sdp->sd_rindex)
1099 rblocks += 2 * RES_STATFS;
1100 rblocks += gfs2_rg_blocks(ip, data_blocks + ind_blocks);
1102 ret = gfs2_trans_begin(sdp, rblocks,
1103 iomap->length >> inode->i_blkbits);
1105 goto out_trans_fail;
1108 ret = gfs2_unstuff_dinode(ip, NULL);
1111 release_metapath(mp);
1112 ret = gfs2_iomap_get(inode, iomap->offset,
1113 iomap->length, flags, iomap, mp);
1118 if (iomap->type == IOMAP_HOLE) {
1119 ret = gfs2_iomap_alloc(inode, iomap, mp);
1121 gfs2_trans_end(sdp);
1122 gfs2_inplace_release(ip);
1123 punch_hole(ip, iomap->offset, iomap->length);
1128 tr = current->journal_info;
1129 if (tr->tr_num_buf_new)
1130 __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
1132 gfs2_trans_end(sdp);
1135 if (gfs2_is_stuffed(ip) || gfs2_is_jdata(ip))
1136 iomap->page_ops = &gfs2_iomap_page_ops;
1140 gfs2_trans_end(sdp);
1142 gfs2_inplace_release(ip);
1144 gfs2_quota_unlock(ip);
1148 static inline bool gfs2_iomap_need_write_lock(unsigned flags)
1150 return (flags & IOMAP_WRITE) && !(flags & IOMAP_DIRECT);
1153 static int gfs2_iomap_begin(struct inode *inode, loff_t pos, loff_t length,
1154 unsigned flags, struct iomap *iomap,
1155 struct iomap *srcmap)
1157 struct gfs2_inode *ip = GFS2_I(inode);
1158 struct metapath mp = { .mp_aheight = 1, };
1161 if (gfs2_is_jdata(ip))
1162 iomap->flags |= IOMAP_F_BUFFER_HEAD;
1164 trace_gfs2_iomap_start(ip, pos, length, flags);
1165 if (gfs2_iomap_need_write_lock(flags)) {
1166 ret = gfs2_write_lock(inode);
1171 ret = gfs2_iomap_get(inode, pos, length, flags, iomap, &mp);
1175 switch(flags & (IOMAP_WRITE | IOMAP_ZERO)) {
1177 if (flags & IOMAP_DIRECT) {
1179 * Silently fall back to buffered I/O for stuffed files
1180 * or if we've got a hole (see gfs2_file_direct_write).
1182 if (iomap->type != IOMAP_MAPPED)
1188 if (iomap->type == IOMAP_HOLE)
1195 ret = gfs2_iomap_begin_write(inode, pos, length, flags, iomap, &mp);
1198 if (ret && gfs2_iomap_need_write_lock(flags))
1199 gfs2_write_unlock(inode);
1200 release_metapath(&mp);
1202 trace_gfs2_iomap_end(ip, iomap, ret);
1206 static int gfs2_iomap_end(struct inode *inode, loff_t pos, loff_t length,
1207 ssize_t written, unsigned flags, struct iomap *iomap)
1209 struct gfs2_inode *ip = GFS2_I(inode);
1210 struct gfs2_sbd *sdp = GFS2_SB(inode);
1212 switch (flags & (IOMAP_WRITE | IOMAP_ZERO)) {
1214 if (flags & IOMAP_DIRECT)
1218 if (iomap->type == IOMAP_HOLE)
1225 if (!gfs2_is_stuffed(ip))
1226 gfs2_ordered_add_inode(ip);
1228 if (inode == sdp->sd_rindex)
1229 adjust_fs_space(inode);
1231 gfs2_inplace_release(ip);
1233 if (length != written && (iomap->flags & IOMAP_F_NEW)) {
1234 /* Deallocate blocks that were just allocated. */
1235 loff_t blockmask = i_blocksize(inode) - 1;
1236 loff_t end = (pos + length) & ~blockmask;
1238 pos = (pos + written + blockmask) & ~blockmask;
1240 truncate_pagecache_range(inode, pos, end - 1);
1241 punch_hole(ip, pos, end - pos);
1245 if (ip->i_qadata && ip->i_qadata->qa_qd_num)
1246 gfs2_quota_unlock(ip);
1248 if (unlikely(!written))
1251 if (iomap->flags & IOMAP_F_SIZE_CHANGED)
1252 mark_inode_dirty(inode);
1253 set_bit(GLF_DIRTY, &ip->i_gl->gl_flags);
1256 if (gfs2_iomap_need_write_lock(flags))
1257 gfs2_write_unlock(inode);
1261 const struct iomap_ops gfs2_iomap_ops = {
1262 .iomap_begin = gfs2_iomap_begin,
1263 .iomap_end = gfs2_iomap_end,
1267 * gfs2_block_map - Map one or more blocks of an inode to a disk block
1269 * @lblock: The logical block number
1270 * @bh_map: The bh to be mapped
1271 * @create: True if its ok to alloc blocks to satify the request
1273 * The size of the requested mapping is defined in bh_map->b_size.
1275 * Clears buffer_mapped(bh_map) and leaves bh_map->b_size unchanged
1276 * when @lblock is not mapped. Sets buffer_mapped(bh_map) and
1277 * bh_map->b_size to indicate the size of the mapping when @lblock and
1278 * successive blocks are mapped, up to the requested size.
1280 * Sets buffer_boundary() if a read of metadata will be required
1281 * before the next block can be mapped. Sets buffer_new() if new
1282 * blocks were allocated.
1287 int gfs2_block_map(struct inode *inode, sector_t lblock,
1288 struct buffer_head *bh_map, int create)
1290 struct gfs2_inode *ip = GFS2_I(inode);
1291 loff_t pos = (loff_t)lblock << inode->i_blkbits;
1292 loff_t length = bh_map->b_size;
1293 struct metapath mp = { .mp_aheight = 1, };
1294 struct iomap iomap = { };
1295 int flags = create ? IOMAP_WRITE : 0;
1298 clear_buffer_mapped(bh_map);
1299 clear_buffer_new(bh_map);
1300 clear_buffer_boundary(bh_map);
1301 trace_gfs2_bmap(ip, bh_map, lblock, create, 1);
1303 ret = gfs2_iomap_get(inode, pos, length, flags, &iomap, &mp);
1304 if (create && !ret && iomap.type == IOMAP_HOLE)
1305 ret = gfs2_iomap_alloc(inode, &iomap, &mp);
1306 release_metapath(&mp);
1310 if (iomap.length > bh_map->b_size) {
1311 iomap.length = bh_map->b_size;
1312 iomap.flags &= ~IOMAP_F_GFS2_BOUNDARY;
1314 if (iomap.addr != IOMAP_NULL_ADDR)
1315 map_bh(bh_map, inode->i_sb, iomap.addr >> inode->i_blkbits);
1316 bh_map->b_size = iomap.length;
1317 if (iomap.flags & IOMAP_F_GFS2_BOUNDARY)
1318 set_buffer_boundary(bh_map);
1319 if (iomap.flags & IOMAP_F_NEW)
1320 set_buffer_new(bh_map);
1323 trace_gfs2_bmap(ip, bh_map, lblock, create, ret);
1328 * Deprecated: do not use in new code
1330 int gfs2_extent_map(struct inode *inode, u64 lblock, int *new, u64 *dblock, unsigned *extlen)
1332 struct buffer_head bh = { .b_state = 0, .b_blocknr = 0 };
1340 bh.b_size = BIT(inode->i_blkbits + (create ? 0 : 5));
1341 ret = gfs2_block_map(inode, lblock, &bh, create);
1342 *extlen = bh.b_size >> inode->i_blkbits;
1343 *dblock = bh.b_blocknr;
1344 if (buffer_new(&bh))
1352 * NOTE: Never call gfs2_block_zero_range with an open transaction because it
1353 * uses iomap write to perform its actions, which begin their own transactions
1354 * (iomap_begin, page_prepare, etc.)
1356 static int gfs2_block_zero_range(struct inode *inode, loff_t from,
1357 unsigned int length)
1359 BUG_ON(current->journal_info);
1360 return iomap_zero_range(inode, from, length, NULL, &gfs2_iomap_ops);
1363 #define GFS2_JTRUNC_REVOKES 8192
1366 * gfs2_journaled_truncate - Wrapper for truncate_pagecache for jdata files
1367 * @inode: The inode being truncated
1368 * @oldsize: The original (larger) size
1369 * @newsize: The new smaller size
1371 * With jdata files, we have to journal a revoke for each block which is
1372 * truncated. As a result, we need to split this into separate transactions
1373 * if the number of pages being truncated gets too large.
1376 static int gfs2_journaled_truncate(struct inode *inode, u64 oldsize, u64 newsize)
1378 struct gfs2_sbd *sdp = GFS2_SB(inode);
1379 u64 max_chunk = GFS2_JTRUNC_REVOKES * sdp->sd_vfs->s_blocksize;
1383 while (oldsize != newsize) {
1384 struct gfs2_trans *tr;
1387 chunk = oldsize - newsize;
1388 if (chunk > max_chunk)
1391 offs = oldsize & ~PAGE_MASK;
1392 if (offs && chunk > PAGE_SIZE)
1393 chunk = offs + ((chunk - offs) & PAGE_MASK);
1395 truncate_pagecache(inode, oldsize - chunk);
1398 tr = current->journal_info;
1399 if (!test_bit(TR_TOUCHED, &tr->tr_flags))
1402 gfs2_trans_end(sdp);
1403 error = gfs2_trans_begin(sdp, RES_DINODE, GFS2_JTRUNC_REVOKES);
1411 static int trunc_start(struct inode *inode, u64 newsize)
1413 struct gfs2_inode *ip = GFS2_I(inode);
1414 struct gfs2_sbd *sdp = GFS2_SB(inode);
1415 struct buffer_head *dibh = NULL;
1416 int journaled = gfs2_is_jdata(ip);
1417 u64 oldsize = inode->i_size;
1420 if (!gfs2_is_stuffed(ip)) {
1421 unsigned int blocksize = i_blocksize(inode);
1422 unsigned int offs = newsize & (blocksize - 1);
1424 error = gfs2_block_zero_range(inode, newsize,
1431 error = gfs2_trans_begin(sdp, RES_DINODE + RES_JDATA, GFS2_JTRUNC_REVOKES);
1433 error = gfs2_trans_begin(sdp, RES_DINODE, 0);
1437 error = gfs2_meta_inode_buffer(ip, &dibh);
1441 gfs2_trans_add_meta(ip->i_gl, dibh);
1443 if (gfs2_is_stuffed(ip))
1444 gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode) + newsize);
1446 ip->i_diskflags |= GFS2_DIF_TRUNC_IN_PROG;
1448 i_size_write(inode, newsize);
1449 ip->i_inode.i_mtime = ip->i_inode.i_ctime = current_time(&ip->i_inode);
1450 gfs2_dinode_out(ip, dibh->b_data);
1453 error = gfs2_journaled_truncate(inode, oldsize, newsize);
1455 truncate_pagecache(inode, newsize);
1459 if (current->journal_info)
1460 gfs2_trans_end(sdp);
1464 int gfs2_iomap_get_alloc(struct inode *inode, loff_t pos, loff_t length,
1465 struct iomap *iomap)
1467 struct metapath mp = { .mp_aheight = 1, };
1470 ret = gfs2_iomap_get(inode, pos, length, IOMAP_WRITE, iomap, &mp);
1471 if (!ret && iomap->type == IOMAP_HOLE)
1472 ret = gfs2_iomap_alloc(inode, iomap, &mp);
1473 release_metapath(&mp);
1478 * sweep_bh_for_rgrps - find an rgrp in a meta buffer and free blocks therein
1480 * @rg_gh: holder of resource group glock
1481 * @bh: buffer head to sweep
1482 * @start: starting point in bh
1483 * @end: end point in bh
1484 * @meta: true if bh points to metadata (rather than data)
1485 * @btotal: place to keep count of total blocks freed
1487 * We sweep a metadata buffer (provided by the metapath) for blocks we need to
1488 * free, and free them all. However, we do it one rgrp at a time. If this
1489 * block has references to multiple rgrps, we break it into individual
1490 * transactions. This allows other processes to use the rgrps while we're
1491 * focused on a single one, for better concurrency / performance.
1492 * At every transaction boundary, we rewrite the inode into the journal.
1493 * That way the bitmaps are kept consistent with the inode and we can recover
1494 * if we're interrupted by power-outages.
1496 * Returns: 0, or return code if an error occurred.
1497 * *btotal has the total number of blocks freed
1499 static int sweep_bh_for_rgrps(struct gfs2_inode *ip, struct gfs2_holder *rd_gh,
1500 struct buffer_head *bh, __be64 *start, __be64 *end,
1501 bool meta, u32 *btotal)
1503 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1504 struct gfs2_rgrpd *rgd;
1505 struct gfs2_trans *tr;
1507 int blks_outside_rgrp;
1508 u64 bn, bstart, isize_blks;
1509 s64 blen; /* needs to be s64 or gfs2_add_inode_blocks breaks */
1511 bool buf_in_tr = false; /* buffer was added to transaction */
1515 if (gfs2_holder_initialized(rd_gh)) {
1516 rgd = gfs2_glock2rgrp(rd_gh->gh_gl);
1517 gfs2_assert_withdraw(sdp,
1518 gfs2_glock_is_locked_by_me(rd_gh->gh_gl));
1520 blks_outside_rgrp = 0;
1524 for (p = start; p < end; p++) {
1527 bn = be64_to_cpu(*p);
1530 if (!rgrp_contains_block(rgd, bn)) {
1531 blks_outside_rgrp++;
1535 rgd = gfs2_blk2rgrpd(sdp, bn, true);
1536 if (unlikely(!rgd)) {
1540 ret = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE,
1545 /* Must be done with the rgrp glock held: */
1546 if (gfs2_rs_active(&ip->i_res) &&
1547 rgd == ip->i_res.rs_rbm.rgd)
1548 gfs2_rs_deltree(&ip->i_res);
1551 /* The size of our transactions will be unknown until we
1552 actually process all the metadata blocks that relate to
1553 the rgrp. So we estimate. We know it can't be more than
1554 the dinode's i_blocks and we don't want to exceed the
1555 journal flush threshold, sd_log_thresh2. */
1556 if (current->journal_info == NULL) {
1557 unsigned int jblocks_rqsted, revokes;
1559 jblocks_rqsted = rgd->rd_length + RES_DINODE +
1561 isize_blks = gfs2_get_inode_blocks(&ip->i_inode);
1562 if (isize_blks > atomic_read(&sdp->sd_log_thresh2))
1564 atomic_read(&sdp->sd_log_thresh2);
1566 jblocks_rqsted += isize_blks;
1567 revokes = jblocks_rqsted;
1569 revokes += end - start;
1570 else if (ip->i_depth)
1571 revokes += sdp->sd_inptrs;
1572 ret = gfs2_trans_begin(sdp, jblocks_rqsted, revokes);
1575 down_write(&ip->i_rw_mutex);
1577 /* check if we will exceed the transaction blocks requested */
1578 tr = current->journal_info;
1579 if (tr->tr_num_buf_new + RES_STATFS +
1580 RES_QUOTA >= atomic_read(&sdp->sd_log_thresh2)) {
1581 /* We set blks_outside_rgrp to ensure the loop will
1582 be repeated for the same rgrp, but with a new
1584 blks_outside_rgrp++;
1585 /* This next part is tricky. If the buffer was added
1586 to the transaction, we've already set some block
1587 pointers to 0, so we better follow through and free
1588 them, or we will introduce corruption (so break).
1589 This may be impossible, or at least rare, but I
1590 decided to cover the case regardless.
1592 If the buffer was not added to the transaction
1593 (this call), doing so would exceed our transaction
1594 size, so we need to end the transaction and start a
1595 new one (so goto). */
1602 gfs2_trans_add_meta(ip->i_gl, bh);
1605 if (bstart + blen == bn) {
1610 __gfs2_free_blocks(ip, rgd, bstart, (u32)blen, meta);
1612 gfs2_add_inode_blocks(&ip->i_inode, -blen);
1618 __gfs2_free_blocks(ip, rgd, bstart, (u32)blen, meta);
1620 gfs2_add_inode_blocks(&ip->i_inode, -blen);
1623 if (!ret && blks_outside_rgrp) { /* If buffer still has non-zero blocks
1624 outside the rgrp we just processed,
1625 do it all over again. */
1626 if (current->journal_info) {
1627 struct buffer_head *dibh;
1629 ret = gfs2_meta_inode_buffer(ip, &dibh);
1633 /* Every transaction boundary, we rewrite the dinode
1634 to keep its di_blocks current in case of failure. */
1635 ip->i_inode.i_mtime = ip->i_inode.i_ctime =
1636 current_time(&ip->i_inode);
1637 gfs2_trans_add_meta(ip->i_gl, dibh);
1638 gfs2_dinode_out(ip, dibh->b_data);
1640 up_write(&ip->i_rw_mutex);
1641 gfs2_trans_end(sdp);
1644 gfs2_glock_dq_uninit(rd_gh);
1652 static bool mp_eq_to_hgt(struct metapath *mp, __u16 *list, unsigned int h)
1654 if (memcmp(mp->mp_list, list, h * sizeof(mp->mp_list[0])))
1660 * find_nonnull_ptr - find a non-null pointer given a metapath and height
1661 * @mp: starting metapath
1662 * @h: desired height to search
1664 * Assumes the metapath is valid (with buffers) out to height h.
1665 * Returns: true if a non-null pointer was found in the metapath buffer
1666 * false if all remaining pointers are NULL in the buffer
1668 static bool find_nonnull_ptr(struct gfs2_sbd *sdp, struct metapath *mp,
1670 __u16 *end_list, unsigned int end_aligned)
1672 struct buffer_head *bh = mp->mp_bh[h];
1673 __be64 *first, *ptr, *end;
1675 first = metaptr1(h, mp);
1676 ptr = first + mp->mp_list[h];
1677 end = (__be64 *)(bh->b_data + bh->b_size);
1678 if (end_list && mp_eq_to_hgt(mp, end_list, h)) {
1679 bool keep_end = h < end_aligned;
1680 end = first + end_list[h] + keep_end;
1684 if (*ptr) { /* if we have a non-null pointer */
1685 mp->mp_list[h] = ptr - first;
1687 if (h < GFS2_MAX_META_HEIGHT)
1696 enum dealloc_states {
1697 DEALLOC_MP_FULL = 0, /* Strip a metapath with all buffers read in */
1698 DEALLOC_MP_LOWER = 1, /* lower the metapath strip height */
1699 DEALLOC_FILL_MP = 2, /* Fill in the metapath to the given height. */
1700 DEALLOC_DONE = 3, /* process complete */
1704 metapointer_range(struct metapath *mp, int height,
1705 __u16 *start_list, unsigned int start_aligned,
1706 __u16 *end_list, unsigned int end_aligned,
1707 __be64 **start, __be64 **end)
1709 struct buffer_head *bh = mp->mp_bh[height];
1712 first = metaptr1(height, mp);
1714 if (mp_eq_to_hgt(mp, start_list, height)) {
1715 bool keep_start = height < start_aligned;
1716 *start = first + start_list[height] + keep_start;
1718 *end = (__be64 *)(bh->b_data + bh->b_size);
1719 if (end_list && mp_eq_to_hgt(mp, end_list, height)) {
1720 bool keep_end = height < end_aligned;
1721 *end = first + end_list[height] + keep_end;
1725 static inline bool walk_done(struct gfs2_sbd *sdp,
1726 struct metapath *mp, int height,
1727 __u16 *end_list, unsigned int end_aligned)
1732 bool keep_end = height < end_aligned;
1733 if (!mp_eq_to_hgt(mp, end_list, height))
1735 end = end_list[height] + keep_end;
1737 end = (height > 0) ? sdp->sd_inptrs : sdp->sd_diptrs;
1738 return mp->mp_list[height] >= end;
1742 * punch_hole - deallocate blocks in a file
1743 * @ip: inode to truncate
1744 * @offset: the start of the hole
1745 * @length: the size of the hole (or 0 for truncate)
1747 * Punch a hole into a file or truncate a file at a given position. This
1748 * function operates in whole blocks (@offset and @length are rounded
1749 * accordingly); partially filled blocks must be cleared otherwise.
1751 * This function works from the bottom up, and from the right to the left. In
1752 * other words, it strips off the highest layer (data) before stripping any of
1753 * the metadata. Doing it this way is best in case the operation is interrupted
1754 * by power failure, etc. The dinode is rewritten in every transaction to
1755 * guarantee integrity.
1757 static int punch_hole(struct gfs2_inode *ip, u64 offset, u64 length)
1759 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1760 u64 maxsize = sdp->sd_heightsize[ip->i_height];
1761 struct metapath mp = {};
1762 struct buffer_head *dibh, *bh;
1763 struct gfs2_holder rd_gh;
1764 unsigned int bsize_shift = sdp->sd_sb.sb_bsize_shift;
1765 u64 lblock = (offset + (1 << bsize_shift) - 1) >> bsize_shift;
1766 __u16 start_list[GFS2_MAX_META_HEIGHT];
1767 __u16 __end_list[GFS2_MAX_META_HEIGHT], *end_list = NULL;
1768 unsigned int start_aligned, end_aligned;
1769 unsigned int strip_h = ip->i_height - 1;
1772 int mp_h; /* metapath buffers are read in to this height */
1774 __be64 *start, *end;
1776 if (offset >= maxsize) {
1778 * The starting point lies beyond the allocated meta-data;
1779 * there are no blocks do deallocate.
1785 * The start position of the hole is defined by lblock, start_list, and
1786 * start_aligned. The end position of the hole is defined by lend,
1787 * end_list, and end_aligned.
1789 * start_aligned and end_aligned define down to which height the start
1790 * and end positions are aligned to the metadata tree (i.e., the
1791 * position is a multiple of the metadata granularity at the height
1792 * above). This determines at which heights additional meta pointers
1793 * needs to be preserved for the remaining data.
1797 u64 end_offset = offset + length;
1801 * Clip the end at the maximum file size for the given height:
1802 * that's how far the metadata goes; files bigger than that
1803 * will have additional layers of indirection.
1805 if (end_offset > maxsize)
1806 end_offset = maxsize;
1807 lend = end_offset >> bsize_shift;
1812 find_metapath(sdp, lend, &mp, ip->i_height);
1813 end_list = __end_list;
1814 memcpy(end_list, mp.mp_list, sizeof(mp.mp_list));
1816 for (mp_h = ip->i_height - 1; mp_h > 0; mp_h--) {
1823 find_metapath(sdp, lblock, &mp, ip->i_height);
1824 memcpy(start_list, mp.mp_list, sizeof(start_list));
1826 for (mp_h = ip->i_height - 1; mp_h > 0; mp_h--) {
1827 if (start_list[mp_h])
1830 start_aligned = mp_h;
1832 ret = gfs2_meta_inode_buffer(ip, &dibh);
1837 ret = lookup_metapath(ip, &mp);
1841 /* issue read-ahead on metadata */
1842 for (mp_h = 0; mp_h < mp.mp_aheight - 1; mp_h++) {
1843 metapointer_range(&mp, mp_h, start_list, start_aligned,
1844 end_list, end_aligned, &start, &end);
1845 gfs2_metapath_ra(ip->i_gl, start, end);
1848 if (mp.mp_aheight == ip->i_height)
1849 state = DEALLOC_MP_FULL; /* We have a complete metapath */
1851 state = DEALLOC_FILL_MP; /* deal with partial metapath */
1853 ret = gfs2_rindex_update(sdp);
1857 ret = gfs2_quota_hold(ip, NO_UID_QUOTA_CHANGE, NO_GID_QUOTA_CHANGE);
1860 gfs2_holder_mark_uninitialized(&rd_gh);
1864 while (state != DEALLOC_DONE) {
1866 /* Truncate a full metapath at the given strip height.
1867 * Note that strip_h == mp_h in order to be in this state. */
1868 case DEALLOC_MP_FULL:
1869 bh = mp.mp_bh[mp_h];
1870 gfs2_assert_withdraw(sdp, bh);
1871 if (gfs2_assert_withdraw(sdp,
1872 prev_bnr != bh->b_blocknr)) {
1873 fs_emerg(sdp, "inode %llu, block:%llu, i_h:%u,"
1874 "s_h:%u, mp_h:%u\n",
1875 (unsigned long long)ip->i_no_addr,
1876 prev_bnr, ip->i_height, strip_h, mp_h);
1878 prev_bnr = bh->b_blocknr;
1880 if (gfs2_metatype_check(sdp, bh,
1881 (mp_h ? GFS2_METATYPE_IN :
1882 GFS2_METATYPE_DI))) {
1888 * Below, passing end_aligned as 0 gives us the
1889 * metapointer range excluding the end point: the end
1890 * point is the first metapath we must not deallocate!
1893 metapointer_range(&mp, mp_h, start_list, start_aligned,
1894 end_list, 0 /* end_aligned */,
1896 ret = sweep_bh_for_rgrps(ip, &rd_gh, mp.mp_bh[mp_h],
1898 mp_h != ip->i_height - 1,
1901 /* If we hit an error or just swept dinode buffer,
1904 state = DEALLOC_DONE;
1907 state = DEALLOC_MP_LOWER;
1910 /* lower the metapath strip height */
1911 case DEALLOC_MP_LOWER:
1912 /* We're done with the current buffer, so release it,
1913 unless it's the dinode buffer. Then back up to the
1914 previous pointer. */
1916 brelse(mp.mp_bh[mp_h]);
1917 mp.mp_bh[mp_h] = NULL;
1919 /* If we can't get any lower in height, we've stripped
1920 off all we can. Next step is to back up and start
1921 stripping the previous level of metadata. */
1924 memcpy(mp.mp_list, start_list, sizeof(start_list));
1926 state = DEALLOC_FILL_MP;
1929 mp.mp_list[mp_h] = 0;
1930 mp_h--; /* search one metadata height down */
1932 if (walk_done(sdp, &mp, mp_h, end_list, end_aligned))
1934 /* Here we've found a part of the metapath that is not
1935 * allocated. We need to search at that height for the
1936 * next non-null pointer. */
1937 if (find_nonnull_ptr(sdp, &mp, mp_h, end_list, end_aligned)) {
1938 state = DEALLOC_FILL_MP;
1941 /* No more non-null pointers at this height. Back up
1942 to the previous height and try again. */
1943 break; /* loop around in the same state */
1945 /* Fill the metapath with buffers to the given height. */
1946 case DEALLOC_FILL_MP:
1947 /* Fill the buffers out to the current height. */
1948 ret = fillup_metapath(ip, &mp, mp_h);
1952 /* On the first pass, issue read-ahead on metadata. */
1953 if (mp.mp_aheight > 1 && strip_h == ip->i_height - 1) {
1954 unsigned int height = mp.mp_aheight - 1;
1956 /* No read-ahead for data blocks. */
1957 if (mp.mp_aheight - 1 == strip_h)
1960 for (; height >= mp.mp_aheight - ret; height--) {
1961 metapointer_range(&mp, height,
1962 start_list, start_aligned,
1963 end_list, end_aligned,
1965 gfs2_metapath_ra(ip->i_gl, start, end);
1969 /* If buffers found for the entire strip height */
1970 if (mp.mp_aheight - 1 == strip_h) {
1971 state = DEALLOC_MP_FULL;
1974 if (mp.mp_aheight < ip->i_height) /* We have a partial height */
1975 mp_h = mp.mp_aheight - 1;
1977 /* If we find a non-null block pointer, crawl a bit
1978 higher up in the metapath and try again, otherwise
1979 we need to look lower for a new starting point. */
1980 if (find_nonnull_ptr(sdp, &mp, mp_h, end_list, end_aligned))
1983 state = DEALLOC_MP_LOWER;
1989 if (current->journal_info == NULL) {
1990 ret = gfs2_trans_begin(sdp, RES_DINODE + RES_STATFS +
1994 down_write(&ip->i_rw_mutex);
1996 gfs2_statfs_change(sdp, 0, +btotal, 0);
1997 gfs2_quota_change(ip, -(s64)btotal, ip->i_inode.i_uid,
1999 ip->i_inode.i_mtime = ip->i_inode.i_ctime = current_time(&ip->i_inode);
2000 gfs2_trans_add_meta(ip->i_gl, dibh);
2001 gfs2_dinode_out(ip, dibh->b_data);
2002 up_write(&ip->i_rw_mutex);
2003 gfs2_trans_end(sdp);
2007 if (gfs2_holder_initialized(&rd_gh))
2008 gfs2_glock_dq_uninit(&rd_gh);
2009 if (current->journal_info) {
2010 up_write(&ip->i_rw_mutex);
2011 gfs2_trans_end(sdp);
2014 gfs2_quota_unhold(ip);
2016 release_metapath(&mp);
2020 static int trunc_end(struct gfs2_inode *ip)
2022 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
2023 struct buffer_head *dibh;
2026 error = gfs2_trans_begin(sdp, RES_DINODE, 0);
2030 down_write(&ip->i_rw_mutex);
2032 error = gfs2_meta_inode_buffer(ip, &dibh);
2036 if (!i_size_read(&ip->i_inode)) {
2038 ip->i_goal = ip->i_no_addr;
2039 gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode));
2040 gfs2_ordered_del_inode(ip);
2042 ip->i_inode.i_mtime = ip->i_inode.i_ctime = current_time(&ip->i_inode);
2043 ip->i_diskflags &= ~GFS2_DIF_TRUNC_IN_PROG;
2045 gfs2_trans_add_meta(ip->i_gl, dibh);
2046 gfs2_dinode_out(ip, dibh->b_data);
2050 up_write(&ip->i_rw_mutex);
2051 gfs2_trans_end(sdp);
2056 * do_shrink - make a file smaller
2058 * @newsize: the size to make the file
2060 * Called with an exclusive lock on @inode. The @size must
2061 * be equal to or smaller than the current inode size.
2066 static int do_shrink(struct inode *inode, u64 newsize)
2068 struct gfs2_inode *ip = GFS2_I(inode);
2071 error = trunc_start(inode, newsize);
2074 if (gfs2_is_stuffed(ip))
2077 error = punch_hole(ip, newsize, 0);
2079 error = trunc_end(ip);
2084 void gfs2_trim_blocks(struct inode *inode)
2088 ret = do_shrink(inode, inode->i_size);
2093 * do_grow - Touch and update inode size
2095 * @size: The new size
2097 * This function updates the timestamps on the inode and
2098 * may also increase the size of the inode. This function
2099 * must not be called with @size any smaller than the current
2102 * Although it is not strictly required to unstuff files here,
2103 * earlier versions of GFS2 have a bug in the stuffed file reading
2104 * code which will result in a buffer overrun if the size is larger
2105 * than the max stuffed file size. In order to prevent this from
2106 * occurring, such files are unstuffed, but in other cases we can
2107 * just update the inode size directly.
2109 * Returns: 0 on success, or -ve on error
2112 static int do_grow(struct inode *inode, u64 size)
2114 struct gfs2_inode *ip = GFS2_I(inode);
2115 struct gfs2_sbd *sdp = GFS2_SB(inode);
2116 struct gfs2_alloc_parms ap = { .target = 1, };
2117 struct buffer_head *dibh;
2121 if (gfs2_is_stuffed(ip) && size > gfs2_max_stuffed_size(ip)) {
2122 error = gfs2_quota_lock_check(ip, &ap);
2126 error = gfs2_inplace_reserve(ip, &ap);
2128 goto do_grow_qunlock;
2132 error = gfs2_trans_begin(sdp, RES_DINODE + RES_STATFS + RES_RG_BIT +
2134 gfs2_is_jdata(ip) ? RES_JDATA : 0) +
2135 (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF ?
2138 goto do_grow_release;
2141 error = gfs2_unstuff_dinode(ip, NULL);
2146 error = gfs2_meta_inode_buffer(ip, &dibh);
2150 truncate_setsize(inode, size);
2151 ip->i_inode.i_mtime = ip->i_inode.i_ctime = current_time(&ip->i_inode);
2152 gfs2_trans_add_meta(ip->i_gl, dibh);
2153 gfs2_dinode_out(ip, dibh->b_data);
2157 gfs2_trans_end(sdp);
2160 gfs2_inplace_release(ip);
2162 gfs2_quota_unlock(ip);
2168 * gfs2_setattr_size - make a file a given size
2170 * @newsize: the size to make the file
2172 * The file size can grow, shrink, or stay the same size. This
2173 * is called holding i_rwsem and an exclusive glock on the inode
2179 int gfs2_setattr_size(struct inode *inode, u64 newsize)
2181 struct gfs2_inode *ip = GFS2_I(inode);
2184 BUG_ON(!S_ISREG(inode->i_mode));
2186 ret = inode_newsize_ok(inode, newsize);
2190 inode_dio_wait(inode);
2192 ret = gfs2_qa_get(ip);
2196 if (newsize >= inode->i_size) {
2197 ret = do_grow(inode, newsize);
2201 ret = do_shrink(inode, newsize);
2203 gfs2_rs_delete(ip, NULL);
2208 int gfs2_truncatei_resume(struct gfs2_inode *ip)
2211 error = punch_hole(ip, i_size_read(&ip->i_inode), 0);
2213 error = trunc_end(ip);
2217 int gfs2_file_dealloc(struct gfs2_inode *ip)
2219 return punch_hole(ip, 0, 0);
2223 * gfs2_free_journal_extents - Free cached journal bmap info
2228 void gfs2_free_journal_extents(struct gfs2_jdesc *jd)
2230 struct gfs2_journal_extent *jext;
2232 while(!list_empty(&jd->extent_list)) {
2233 jext = list_first_entry(&jd->extent_list, struct gfs2_journal_extent, list);
2234 list_del(&jext->list);
2240 * gfs2_add_jextent - Add or merge a new extent to extent cache
2241 * @jd: The journal descriptor
2242 * @lblock: The logical block at start of new extent
2243 * @dblock: The physical block at start of new extent
2244 * @blocks: Size of extent in fs blocks
2246 * Returns: 0 on success or -ENOMEM
2249 static int gfs2_add_jextent(struct gfs2_jdesc *jd, u64 lblock, u64 dblock, u64 blocks)
2251 struct gfs2_journal_extent *jext;
2253 if (!list_empty(&jd->extent_list)) {
2254 jext = list_last_entry(&jd->extent_list, struct gfs2_journal_extent, list);
2255 if ((jext->dblock + jext->blocks) == dblock) {
2256 jext->blocks += blocks;
2261 jext = kzalloc(sizeof(struct gfs2_journal_extent), GFP_NOFS);
2264 jext->dblock = dblock;
2265 jext->lblock = lblock;
2266 jext->blocks = blocks;
2267 list_add_tail(&jext->list, &jd->extent_list);
2273 * gfs2_map_journal_extents - Cache journal bmap info
2274 * @sdp: The super block
2275 * @jd: The journal to map
2277 * Create a reusable "extent" mapping from all logical
2278 * blocks to all physical blocks for the given journal. This will save
2279 * us time when writing journal blocks. Most journals will have only one
2280 * extent that maps all their logical blocks. That's because gfs2.mkfs
2281 * arranges the journal blocks sequentially to maximize performance.
2282 * So the extent would map the first block for the entire file length.
2283 * However, gfs2_jadd can happen while file activity is happening, so
2284 * those journals may not be sequential. Less likely is the case where
2285 * the users created their own journals by mounting the metafs and
2286 * laying it out. But it's still possible. These journals might have
2289 * Returns: 0 on success, or error on failure
2292 int gfs2_map_journal_extents(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd)
2296 struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
2297 struct buffer_head bh;
2298 unsigned int shift = sdp->sd_sb.sb_bsize_shift;
2303 start = ktime_get();
2304 lblock_stop = i_size_read(jd->jd_inode) >> shift;
2305 size = (lblock_stop - lblock) << shift;
2307 WARN_ON(!list_empty(&jd->extent_list));
2313 rc = gfs2_block_map(jd->jd_inode, lblock, &bh, 0);
2314 if (rc || !buffer_mapped(&bh))
2316 rc = gfs2_add_jextent(jd, lblock, bh.b_blocknr, bh.b_size >> shift);
2320 lblock += (bh.b_size >> ip->i_inode.i_blkbits);
2324 fs_info(sdp, "journal %d mapped with %u extents in %lldms\n", jd->jd_jid,
2325 jd->nr_extents, ktime_ms_delta(end, start));
2329 fs_warn(sdp, "error %d mapping journal %u at offset %llu (extent %u)\n",
2331 (unsigned long long)(i_size_read(jd->jd_inode) - size),
2333 fs_warn(sdp, "bmap=%d lblock=%llu block=%llu, state=0x%08lx, size=%llu\n",
2334 rc, (unsigned long long)lblock, (unsigned long long)bh.b_blocknr,
2335 bh.b_state, (unsigned long long)bh.b_size);
2336 gfs2_free_journal_extents(jd);
2341 * gfs2_write_alloc_required - figure out if a write will require an allocation
2342 * @ip: the file being written to
2343 * @offset: the offset to write to
2344 * @len: the number of bytes being written
2346 * Returns: 1 if an alloc is required, 0 otherwise
2349 int gfs2_write_alloc_required(struct gfs2_inode *ip, u64 offset,
2352 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
2353 struct buffer_head bh;
2355 u64 lblock, lblock_stop, size;
2361 if (gfs2_is_stuffed(ip)) {
2362 if (offset + len > gfs2_max_stuffed_size(ip))
2367 shift = sdp->sd_sb.sb_bsize_shift;
2368 BUG_ON(gfs2_is_dir(ip));
2369 end_of_file = (i_size_read(&ip->i_inode) + sdp->sd_sb.sb_bsize - 1) >> shift;
2370 lblock = offset >> shift;
2371 lblock_stop = (offset + len + sdp->sd_sb.sb_bsize - 1) >> shift;
2372 if (lblock_stop > end_of_file && ip != GFS2_I(sdp->sd_rindex))
2375 size = (lblock_stop - lblock) << shift;
2379 gfs2_block_map(&ip->i_inode, lblock, &bh, 0);
2380 if (!buffer_mapped(&bh))
2383 lblock += (bh.b_size >> ip->i_inode.i_blkbits);
2389 static int stuffed_zero_range(struct inode *inode, loff_t offset, loff_t length)
2391 struct gfs2_inode *ip = GFS2_I(inode);
2392 struct buffer_head *dibh;
2395 if (offset >= inode->i_size)
2397 if (offset + length > inode->i_size)
2398 length = inode->i_size - offset;
2400 error = gfs2_meta_inode_buffer(ip, &dibh);
2403 gfs2_trans_add_meta(ip->i_gl, dibh);
2404 memset(dibh->b_data + sizeof(struct gfs2_dinode) + offset, 0,
2410 static int gfs2_journaled_truncate_range(struct inode *inode, loff_t offset,
2413 struct gfs2_sbd *sdp = GFS2_SB(inode);
2414 loff_t max_chunk = GFS2_JTRUNC_REVOKES * sdp->sd_vfs->s_blocksize;
2418 struct gfs2_trans *tr;
2423 if (chunk > max_chunk)
2426 offs = offset & ~PAGE_MASK;
2427 if (offs && chunk > PAGE_SIZE)
2428 chunk = offs + ((chunk - offs) & PAGE_MASK);
2430 truncate_pagecache_range(inode, offset, chunk);
2434 tr = current->journal_info;
2435 if (!test_bit(TR_TOUCHED, &tr->tr_flags))
2438 gfs2_trans_end(sdp);
2439 error = gfs2_trans_begin(sdp, RES_DINODE, GFS2_JTRUNC_REVOKES);
2446 int __gfs2_punch_hole(struct file *file, loff_t offset, loff_t length)
2448 struct inode *inode = file_inode(file);
2449 struct gfs2_inode *ip = GFS2_I(inode);
2450 struct gfs2_sbd *sdp = GFS2_SB(inode);
2451 unsigned int blocksize = i_blocksize(inode);
2455 if (!gfs2_is_stuffed(ip)) {
2456 unsigned int start_off, end_len;
2458 start_off = offset & (blocksize - 1);
2459 end_len = (offset + length) & (blocksize - 1);
2461 unsigned int len = length;
2462 if (length > blocksize - start_off)
2463 len = blocksize - start_off;
2464 error = gfs2_block_zero_range(inode, offset, len);
2467 if (start_off + length < blocksize)
2471 error = gfs2_block_zero_range(inode,
2472 offset + length - end_len, end_len);
2478 start = round_down(offset, blocksize);
2479 end = round_up(offset + length, blocksize) - 1;
2480 error = filemap_write_and_wait_range(inode->i_mapping, start, end);
2484 if (gfs2_is_jdata(ip))
2485 error = gfs2_trans_begin(sdp, RES_DINODE + 2 * RES_JDATA,
2486 GFS2_JTRUNC_REVOKES);
2488 error = gfs2_trans_begin(sdp, RES_DINODE, 0);
2492 if (gfs2_is_stuffed(ip)) {
2493 error = stuffed_zero_range(inode, offset, length);
2498 if (gfs2_is_jdata(ip)) {
2499 BUG_ON(!current->journal_info);
2500 gfs2_journaled_truncate_range(inode, offset, length);
2502 truncate_pagecache_range(inode, offset, offset + length - 1);
2504 file_update_time(file);
2505 mark_inode_dirty(inode);
2507 if (current->journal_info)
2508 gfs2_trans_end(sdp);
2510 if (!gfs2_is_stuffed(ip))
2511 error = punch_hole(ip, offset, length);
2514 if (current->journal_info)
2515 gfs2_trans_end(sdp);
2519 static int gfs2_map_blocks(struct iomap_writepage_ctx *wpc, struct inode *inode,
2522 struct metapath mp = { .mp_aheight = 1, };
2525 if (WARN_ON_ONCE(gfs2_is_stuffed(GFS2_I(inode))))
2528 if (offset >= wpc->iomap.offset &&
2529 offset < wpc->iomap.offset + wpc->iomap.length)
2532 memset(&wpc->iomap, 0, sizeof(wpc->iomap));
2533 ret = gfs2_iomap_get(inode, offset, INT_MAX, 0, &wpc->iomap, &mp);
2534 release_metapath(&mp);
2538 const struct iomap_writeback_ops gfs2_writeback_ops = {
2539 .map_blocks = gfs2_map_blocks,