1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
4 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9 #include <linux/slab.h>
10 #include <linux/spinlock.h>
11 #include <linux/completion.h>
12 #include <linux/buffer_head.h>
14 #include <linux/gfs2_ondisk.h>
15 #include <linux/prefetch.h>
16 #include <linux/blkdev.h>
17 #include <linux/rbtree.h>
18 #include <linux/random.h>
33 #include "trace_gfs2.h"
36 #define BFITNOENT ((u32)~0)
37 #define NO_BLOCK ((u64)~0)
40 struct gfs2_rgrpd *rgd;
41 u32 offset; /* The offset is bitmap relative */
42 int bii; /* Bitmap index */
45 static inline struct gfs2_bitmap *rbm_bi(const struct gfs2_rbm *rbm)
47 return rbm->rgd->rd_bits + rbm->bii;
50 static inline u64 gfs2_rbm_to_block(const struct gfs2_rbm *rbm)
52 BUG_ON(rbm->offset >= rbm->rgd->rd_data);
53 return rbm->rgd->rd_data0 + (rbm_bi(rbm)->bi_start * GFS2_NBBY) +
58 * These routines are used by the resource group routines (rgrp.c)
59 * to keep track of block allocation. Each block is represented by two
60 * bits. So, each byte represents GFS2_NBBY (i.e. 4) blocks.
63 * 1 = Used (not metadata)
64 * 2 = Unlinked (still in use) inode
73 static const char valid_change[16] = {
81 static int gfs2_rbm_find(struct gfs2_rbm *rbm, u8 state, u32 *minext,
82 struct gfs2_blkreserv *rs, bool nowrap);
86 * gfs2_setbit - Set a bit in the bitmaps
87 * @rbm: The position of the bit to set
88 * @do_clone: Also set the clone bitmap, if it exists
89 * @new_state: the new state of the block
93 static inline void gfs2_setbit(const struct gfs2_rbm *rbm, bool do_clone,
94 unsigned char new_state)
96 unsigned char *byte1, *byte2, *end, cur_state;
97 struct gfs2_bitmap *bi = rbm_bi(rbm);
98 unsigned int buflen = bi->bi_bytes;
99 const unsigned int bit = (rbm->offset % GFS2_NBBY) * GFS2_BIT_SIZE;
101 byte1 = bi->bi_bh->b_data + bi->bi_offset + (rbm->offset / GFS2_NBBY);
102 end = bi->bi_bh->b_data + bi->bi_offset + buflen;
104 BUG_ON(byte1 >= end);
106 cur_state = (*byte1 >> bit) & GFS2_BIT_MASK;
108 if (unlikely(!valid_change[new_state * 4 + cur_state])) {
109 struct gfs2_sbd *sdp = rbm->rgd->rd_sbd;
111 fs_warn(sdp, "buf_blk = 0x%x old_state=%d, new_state=%d\n",
112 rbm->offset, cur_state, new_state);
113 fs_warn(sdp, "rgrp=0x%llx bi_start=0x%x biblk: 0x%llx\n",
114 (unsigned long long)rbm->rgd->rd_addr, bi->bi_start,
115 (unsigned long long)bi->bi_bh->b_blocknr);
116 fs_warn(sdp, "bi_offset=0x%x bi_bytes=0x%x block=0x%llx\n",
117 bi->bi_offset, bi->bi_bytes,
118 (unsigned long long)gfs2_rbm_to_block(rbm));
120 gfs2_consist_rgrpd(rbm->rgd);
123 *byte1 ^= (cur_state ^ new_state) << bit;
125 if (do_clone && bi->bi_clone) {
126 byte2 = bi->bi_clone + bi->bi_offset + (rbm->offset / GFS2_NBBY);
127 cur_state = (*byte2 >> bit) & GFS2_BIT_MASK;
128 *byte2 ^= (cur_state ^ new_state) << bit;
133 * gfs2_testbit - test a bit in the bitmaps
134 * @rbm: The bit to test
135 * @use_clone: If true, test the clone bitmap, not the official bitmap.
137 * Some callers like gfs2_unaligned_extlen need to test the clone bitmaps,
138 * not the "real" bitmaps, to avoid allocating recently freed blocks.
140 * Returns: The two bit block state of the requested bit
143 static inline u8 gfs2_testbit(const struct gfs2_rbm *rbm, bool use_clone)
145 struct gfs2_bitmap *bi = rbm_bi(rbm);
150 if (use_clone && bi->bi_clone)
151 buffer = bi->bi_clone;
153 buffer = bi->bi_bh->b_data;
154 buffer += bi->bi_offset;
155 byte = buffer + (rbm->offset / GFS2_NBBY);
156 bit = (rbm->offset % GFS2_NBBY) * GFS2_BIT_SIZE;
158 return (*byte >> bit) & GFS2_BIT_MASK;
163 * @ptr: Pointer to bitmap data
164 * @mask: Mask to use (normally 0x55555.... but adjusted for search start)
165 * @state: The state we are searching for
167 * We xor the bitmap data with a patter which is the bitwise opposite
168 * of what we are looking for, this gives rise to a pattern of ones
169 * wherever there is a match. Since we have two bits per entry, we
170 * take this pattern, shift it down by one place and then and it with
171 * the original. All the even bit positions (0,2,4, etc) then represent
172 * successful matches, so we mask with 0x55555..... to remove the unwanted
175 * This allows searching of a whole u64 at once (32 blocks) with a
176 * single test (on 64 bit arches).
179 static inline u64 gfs2_bit_search(const __le64 *ptr, u64 mask, u8 state)
182 static const u64 search[] = {
183 [0] = 0xffffffffffffffffULL,
184 [1] = 0xaaaaaaaaaaaaaaaaULL,
185 [2] = 0x5555555555555555ULL,
186 [3] = 0x0000000000000000ULL,
188 tmp = le64_to_cpu(*ptr) ^ search[state];
195 * rs_cmp - multi-block reservation range compare
196 * @start: start of the new reservation
197 * @len: number of blocks in the new reservation
198 * @rs: existing reservation to compare against
200 * returns: 1 if the block range is beyond the reach of the reservation
201 * -1 if the block range is before the start of the reservation
202 * 0 if the block range overlaps with the reservation
204 static inline int rs_cmp(u64 start, u32 len, struct gfs2_blkreserv *rs)
206 if (start >= rs->rs_start + rs->rs_requested)
208 if (rs->rs_start >= start + len)
214 * gfs2_bitfit - Search an rgrp's bitmap buffer to find a bit-pair representing
215 * a block in a given allocation state.
216 * @buf: the buffer that holds the bitmaps
217 * @len: the length (in bytes) of the buffer
218 * @goal: start search at this block's bit-pair (within @buffer)
219 * @state: GFS2_BLKST_XXX the state of the block we're looking for.
221 * Scope of @goal and returned block number is only within this bitmap buffer,
222 * not entire rgrp or filesystem. @buffer will be offset from the actual
223 * beginning of a bitmap block buffer, skipping any header structures, but
224 * headers are always a multiple of 64 bits long so that the buffer is
225 * always aligned to a 64 bit boundary.
227 * The size of the buffer is in bytes, but is it assumed that it is
228 * always ok to read a complete multiple of 64 bits at the end
229 * of the block in case the end is no aligned to a natural boundary.
231 * Return: the block number (bitmap buffer scope) that was found
234 static u32 gfs2_bitfit(const u8 *buf, const unsigned int len,
237 u32 spoint = (goal << 1) & ((8*sizeof(u64)) - 1);
238 const __le64 *ptr = ((__le64 *)buf) + (goal >> 5);
239 const __le64 *end = (__le64 *)(buf + ALIGN(len, sizeof(u64)));
241 u64 mask = 0x5555555555555555ULL;
244 /* Mask off bits we don't care about at the start of the search */
246 tmp = gfs2_bit_search(ptr, mask, state);
248 while(tmp == 0 && ptr < end) {
249 tmp = gfs2_bit_search(ptr, 0x5555555555555555ULL, state);
252 /* Mask off any bits which are more than len bytes from the start */
253 if (ptr == end && (len & (sizeof(u64) - 1)))
254 tmp &= (((u64)~0) >> (64 - 8*(len & (sizeof(u64) - 1))));
255 /* Didn't find anything, so return */
260 bit /= 2; /* two bits per entry in the bitmap */
261 return (((const unsigned char *)ptr - buf) * GFS2_NBBY) + bit;
265 * gfs2_rbm_from_block - Set the rbm based upon rgd and block number
266 * @rbm: The rbm with rgd already set correctly
267 * @block: The block number (filesystem relative)
269 * This sets the bi and offset members of an rbm based on a
270 * resource group and a filesystem relative block number. The
271 * resource group must be set in the rbm on entry, the bi and
272 * offset members will be set by this function.
274 * Returns: 0 on success, or an error code
277 static int gfs2_rbm_from_block(struct gfs2_rbm *rbm, u64 block)
279 if (!rgrp_contains_block(rbm->rgd, block))
282 rbm->offset = block - rbm->rgd->rd_data0;
283 /* Check if the block is within the first block */
284 if (rbm->offset < rbm_bi(rbm)->bi_blocks)
287 /* Adjust for the size diff between gfs2_meta_header and gfs2_rgrp */
288 rbm->offset += (sizeof(struct gfs2_rgrp) -
289 sizeof(struct gfs2_meta_header)) * GFS2_NBBY;
290 rbm->bii = rbm->offset / rbm->rgd->rd_sbd->sd_blocks_per_bitmap;
291 rbm->offset -= rbm->bii * rbm->rgd->rd_sbd->sd_blocks_per_bitmap;
296 * gfs2_rbm_add - add a number of blocks to an rbm
297 * @rbm: The rbm with rgd already set correctly
298 * @blocks: The number of blocks to add to rpm
300 * This function takes an existing rbm structure and adds a number of blocks to
303 * Returns: True if the new rbm would point past the end of the rgrp.
306 static bool gfs2_rbm_add(struct gfs2_rbm *rbm, u32 blocks)
308 struct gfs2_rgrpd *rgd = rbm->rgd;
309 struct gfs2_bitmap *bi = rgd->rd_bits + rbm->bii;
311 if (rbm->offset + blocks < bi->bi_blocks) {
312 rbm->offset += blocks;
315 blocks -= bi->bi_blocks - rbm->offset;
319 if (bi == rgd->rd_bits + rgd->rd_length)
321 if (blocks < bi->bi_blocks) {
322 rbm->offset = blocks;
323 rbm->bii = bi - rgd->rd_bits;
326 blocks -= bi->bi_blocks;
331 * gfs2_unaligned_extlen - Look for free blocks which are not byte aligned
332 * @rbm: Position to search (value/result)
333 * @n_unaligned: Number of unaligned blocks to check
334 * @len: Decremented for each block found (terminate on zero)
336 * Returns: true if a non-free block is encountered or the end of the resource
340 static bool gfs2_unaligned_extlen(struct gfs2_rbm *rbm, u32 n_unaligned, u32 *len)
345 for (n = 0; n < n_unaligned; n++) {
346 res = gfs2_testbit(rbm, true);
347 if (res != GFS2_BLKST_FREE)
352 if (gfs2_rbm_add(rbm, 1))
360 * gfs2_free_extlen - Return extent length of free blocks
361 * @rrbm: Starting position
362 * @len: Max length to check
364 * Starting at the block specified by the rbm, see how many free blocks
365 * there are, not reading more than len blocks ahead. This can be done
366 * using memchr_inv when the blocks are byte aligned, but has to be done
367 * on a block by block basis in case of unaligned blocks. Also this
368 * function can cope with bitmap boundaries (although it must stop on
369 * a resource group boundary)
371 * Returns: Number of free blocks in the extent
374 static u32 gfs2_free_extlen(const struct gfs2_rbm *rrbm, u32 len)
376 struct gfs2_rbm rbm = *rrbm;
377 u32 n_unaligned = rbm.offset & 3;
381 u8 *ptr, *start, *end;
383 struct gfs2_bitmap *bi;
386 gfs2_unaligned_extlen(&rbm, 4 - n_unaligned, &len))
389 n_unaligned = len & 3;
390 /* Start is now byte aligned */
393 start = bi->bi_bh->b_data;
395 start = bi->bi_clone;
396 start += bi->bi_offset;
397 end = start + bi->bi_bytes;
398 BUG_ON(rbm.offset & 3);
399 start += (rbm.offset / GFS2_NBBY);
400 bytes = min_t(u32, len / GFS2_NBBY, (end - start));
401 ptr = memchr_inv(start, 0, bytes);
402 chunk_size = ((ptr == NULL) ? bytes : (ptr - start));
403 chunk_size *= GFS2_NBBY;
404 BUG_ON(len < chunk_size);
406 block = gfs2_rbm_to_block(&rbm);
407 if (gfs2_rbm_from_block(&rbm, block + chunk_size)) {
415 n_unaligned = len & 3;
418 /* Deal with any bits left over at the end */
420 gfs2_unaligned_extlen(&rbm, n_unaligned, &len);
426 * gfs2_bitcount - count the number of bits in a certain state
427 * @rgd: the resource group descriptor
428 * @buffer: the buffer that holds the bitmaps
429 * @buflen: the length (in bytes) of the buffer
430 * @state: the state of the block we're looking for
432 * Returns: The number of bits
435 static u32 gfs2_bitcount(struct gfs2_rgrpd *rgd, const u8 *buffer,
436 unsigned int buflen, u8 state)
438 const u8 *byte = buffer;
439 const u8 *end = buffer + buflen;
440 const u8 state1 = state << 2;
441 const u8 state2 = state << 4;
442 const u8 state3 = state << 6;
445 for (; byte < end; byte++) {
446 if (((*byte) & 0x03) == state)
448 if (((*byte) & 0x0C) == state1)
450 if (((*byte) & 0x30) == state2)
452 if (((*byte) & 0xC0) == state3)
460 * gfs2_rgrp_verify - Verify that a resource group is consistent
465 void gfs2_rgrp_verify(struct gfs2_rgrpd *rgd)
467 struct gfs2_sbd *sdp = rgd->rd_sbd;
468 struct gfs2_bitmap *bi = NULL;
469 u32 length = rgd->rd_length;
473 memset(count, 0, 4 * sizeof(u32));
475 /* Count # blocks in each of 4 possible allocation states */
476 for (buf = 0; buf < length; buf++) {
477 bi = rgd->rd_bits + buf;
478 for (x = 0; x < 4; x++)
479 count[x] += gfs2_bitcount(rgd,
485 if (count[0] != rgd->rd_free) {
486 gfs2_lm(sdp, "free data mismatch: %u != %u\n",
487 count[0], rgd->rd_free);
488 gfs2_consist_rgrpd(rgd);
492 tmp = rgd->rd_data - rgd->rd_free - rgd->rd_dinodes;
493 if (count[1] != tmp) {
494 gfs2_lm(sdp, "used data mismatch: %u != %u\n",
496 gfs2_consist_rgrpd(rgd);
500 if (count[2] + count[3] != rgd->rd_dinodes) {
501 gfs2_lm(sdp, "used metadata mismatch: %u != %u\n",
502 count[2] + count[3], rgd->rd_dinodes);
503 gfs2_consist_rgrpd(rgd);
509 * gfs2_blk2rgrpd - Find resource group for a given data/meta block number
510 * @sdp: The GFS2 superblock
511 * @blk: The data block number
512 * @exact: True if this needs to be an exact match
514 * The @exact argument should be set to true by most callers. The exception
515 * is when we need to match blocks which are not represented by the rgrp
516 * bitmap, but which are part of the rgrp (i.e. padding blocks) which are
517 * there for alignment purposes. Another way of looking at it is that @exact
518 * matches only valid data/metadata blocks, but with @exact false, it will
519 * match any block within the extent of the rgrp.
521 * Returns: The resource group, or NULL if not found
524 struct gfs2_rgrpd *gfs2_blk2rgrpd(struct gfs2_sbd *sdp, u64 blk, bool exact)
526 struct rb_node *n, *next;
527 struct gfs2_rgrpd *cur;
529 spin_lock(&sdp->sd_rindex_spin);
530 n = sdp->sd_rindex_tree.rb_node;
532 cur = rb_entry(n, struct gfs2_rgrpd, rd_node);
534 if (blk < cur->rd_addr)
536 else if (blk >= cur->rd_data0 + cur->rd_data)
539 spin_unlock(&sdp->sd_rindex_spin);
541 if (blk < cur->rd_addr)
543 if (blk >= cur->rd_data0 + cur->rd_data)
550 spin_unlock(&sdp->sd_rindex_spin);
556 * gfs2_rgrpd_get_first - get the first Resource Group in the filesystem
557 * @sdp: The GFS2 superblock
559 * Returns: The first rgrp in the filesystem
562 struct gfs2_rgrpd *gfs2_rgrpd_get_first(struct gfs2_sbd *sdp)
564 const struct rb_node *n;
565 struct gfs2_rgrpd *rgd;
567 spin_lock(&sdp->sd_rindex_spin);
568 n = rb_first(&sdp->sd_rindex_tree);
569 rgd = rb_entry(n, struct gfs2_rgrpd, rd_node);
570 spin_unlock(&sdp->sd_rindex_spin);
576 * gfs2_rgrpd_get_next - get the next RG
577 * @rgd: the resource group descriptor
579 * Returns: The next rgrp
582 struct gfs2_rgrpd *gfs2_rgrpd_get_next(struct gfs2_rgrpd *rgd)
584 struct gfs2_sbd *sdp = rgd->rd_sbd;
585 const struct rb_node *n;
587 spin_lock(&sdp->sd_rindex_spin);
588 n = rb_next(&rgd->rd_node);
590 n = rb_first(&sdp->sd_rindex_tree);
592 if (unlikely(&rgd->rd_node == n)) {
593 spin_unlock(&sdp->sd_rindex_spin);
596 rgd = rb_entry(n, struct gfs2_rgrpd, rd_node);
597 spin_unlock(&sdp->sd_rindex_spin);
601 void check_and_update_goal(struct gfs2_inode *ip)
603 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
604 if (!ip->i_goal || gfs2_blk2rgrpd(sdp, ip->i_goal, 1) == NULL)
605 ip->i_goal = ip->i_no_addr;
608 void gfs2_free_clones(struct gfs2_rgrpd *rgd)
612 for (x = 0; x < rgd->rd_length; x++) {
613 struct gfs2_bitmap *bi = rgd->rd_bits + x;
619 static void dump_rs(struct seq_file *seq, const struct gfs2_blkreserv *rs,
620 const char *fs_id_buf)
622 struct gfs2_inode *ip = container_of(rs, struct gfs2_inode, i_res);
624 gfs2_print_dbg(seq, "%s B: n:%llu s:%llu f:%u\n",
626 (unsigned long long)ip->i_no_addr,
627 (unsigned long long)rs->rs_start,
632 * __rs_deltree - remove a multi-block reservation from the rgd tree
633 * @rs: The reservation to remove
636 static void __rs_deltree(struct gfs2_blkreserv *rs)
638 struct gfs2_rgrpd *rgd;
640 if (!gfs2_rs_active(rs))
644 trace_gfs2_rs(rs, TRACE_RS_TREEDEL);
645 rb_erase(&rs->rs_node, &rgd->rd_rstree);
646 RB_CLEAR_NODE(&rs->rs_node);
648 if (rs->rs_requested) {
649 /* return requested blocks to the rgrp */
650 BUG_ON(rs->rs_rgd->rd_requested < rs->rs_requested);
651 rs->rs_rgd->rd_requested -= rs->rs_requested;
653 /* The rgrp extent failure point is likely not to increase;
654 it will only do so if the freed blocks are somehow
655 contiguous with a span of free blocks that follows. Still,
656 it will force the number to be recalculated later. */
657 rgd->rd_extfail_pt += rs->rs_requested;
658 rs->rs_requested = 0;
663 * gfs2_rs_deltree - remove a multi-block reservation from the rgd tree
664 * @rs: The reservation to remove
667 void gfs2_rs_deltree(struct gfs2_blkreserv *rs)
669 struct gfs2_rgrpd *rgd;
673 spin_lock(&rgd->rd_rsspin);
675 BUG_ON(rs->rs_requested);
676 spin_unlock(&rgd->rd_rsspin);
681 * gfs2_rs_delete - delete a multi-block reservation
682 * @ip: The inode for this reservation
683 * @wcount: The inode's write count, or NULL
686 void gfs2_rs_delete(struct gfs2_inode *ip, atomic_t *wcount)
688 down_write(&ip->i_rw_mutex);
689 if ((wcount == NULL) || (atomic_read(wcount) <= 1))
690 gfs2_rs_deltree(&ip->i_res);
691 up_write(&ip->i_rw_mutex);
695 * return_all_reservations - return all reserved blocks back to the rgrp.
696 * @rgd: the rgrp that needs its space back
698 * We previously reserved a bunch of blocks for allocation. Now we need to
699 * give them back. This leave the reservation structures in tact, but removes
700 * all of their corresponding "no-fly zones".
702 static void return_all_reservations(struct gfs2_rgrpd *rgd)
705 struct gfs2_blkreserv *rs;
707 spin_lock(&rgd->rd_rsspin);
708 while ((n = rb_first(&rgd->rd_rstree))) {
709 rs = rb_entry(n, struct gfs2_blkreserv, rs_node);
712 spin_unlock(&rgd->rd_rsspin);
715 void gfs2_clear_rgrpd(struct gfs2_sbd *sdp)
718 struct gfs2_rgrpd *rgd;
719 struct gfs2_glock *gl;
721 while ((n = rb_first(&sdp->sd_rindex_tree))) {
722 rgd = rb_entry(n, struct gfs2_rgrpd, rd_node);
725 rb_erase(n, &sdp->sd_rindex_tree);
728 if (gl->gl_state != LM_ST_UNLOCKED) {
729 gfs2_glock_cb(gl, LM_ST_UNLOCKED);
730 flush_delayed_work(&gl->gl_work);
732 gfs2_rgrp_brelse(rgd);
733 glock_clear_object(gl, rgd);
737 gfs2_free_clones(rgd);
738 return_all_reservations(rgd);
741 kmem_cache_free(gfs2_rgrpd_cachep, rgd);
746 * compute_bitstructs - Compute the bitmap sizes
747 * @rgd: The resource group descriptor
749 * Calculates bitmap descriptors, one for each block that contains bitmap data
754 static int compute_bitstructs(struct gfs2_rgrpd *rgd)
756 struct gfs2_sbd *sdp = rgd->rd_sbd;
757 struct gfs2_bitmap *bi;
758 u32 length = rgd->rd_length; /* # blocks in hdr & bitmap */
759 u32 bytes_left, bytes;
765 rgd->rd_bits = kcalloc(length, sizeof(struct gfs2_bitmap), GFP_NOFS);
769 bytes_left = rgd->rd_bitbytes;
771 for (x = 0; x < length; x++) {
772 bi = rgd->rd_bits + x;
775 /* small rgrp; bitmap stored completely in header block */
778 bi->bi_offset = sizeof(struct gfs2_rgrp);
780 bi->bi_bytes = bytes;
781 bi->bi_blocks = bytes * GFS2_NBBY;
784 bytes = sdp->sd_sb.sb_bsize - sizeof(struct gfs2_rgrp);
785 bi->bi_offset = sizeof(struct gfs2_rgrp);
787 bi->bi_bytes = bytes;
788 bi->bi_blocks = bytes * GFS2_NBBY;
790 } else if (x + 1 == length) {
792 bi->bi_offset = sizeof(struct gfs2_meta_header);
793 bi->bi_start = rgd->rd_bitbytes - bytes_left;
794 bi->bi_bytes = bytes;
795 bi->bi_blocks = bytes * GFS2_NBBY;
798 bytes = sdp->sd_sb.sb_bsize -
799 sizeof(struct gfs2_meta_header);
800 bi->bi_offset = sizeof(struct gfs2_meta_header);
801 bi->bi_start = rgd->rd_bitbytes - bytes_left;
802 bi->bi_bytes = bytes;
803 bi->bi_blocks = bytes * GFS2_NBBY;
810 gfs2_consist_rgrpd(rgd);
813 bi = rgd->rd_bits + (length - 1);
814 if ((bi->bi_start + bi->bi_bytes) * GFS2_NBBY != rgd->rd_data) {
821 "start=%u len=%u offset=%u\n",
822 (unsigned long long)rgd->rd_addr,
824 (unsigned long long)rgd->rd_data0,
827 bi->bi_start, bi->bi_bytes, bi->bi_offset);
828 gfs2_consist_rgrpd(rgd);
836 * gfs2_ri_total - Total up the file system space, according to the rindex.
837 * @sdp: the filesystem
840 u64 gfs2_ri_total(struct gfs2_sbd *sdp)
843 struct inode *inode = sdp->sd_rindex;
844 struct gfs2_inode *ip = GFS2_I(inode);
845 char buf[sizeof(struct gfs2_rindex)];
848 for (rgrps = 0;; rgrps++) {
849 loff_t pos = rgrps * sizeof(struct gfs2_rindex);
851 if (pos + sizeof(struct gfs2_rindex) > i_size_read(inode))
853 error = gfs2_internal_read(ip, buf, &pos,
854 sizeof(struct gfs2_rindex));
855 if (error != sizeof(struct gfs2_rindex))
857 total_data += be32_to_cpu(((struct gfs2_rindex *)buf)->ri_data);
862 static int rgd_insert(struct gfs2_rgrpd *rgd)
864 struct gfs2_sbd *sdp = rgd->rd_sbd;
865 struct rb_node **newn = &sdp->sd_rindex_tree.rb_node, *parent = NULL;
867 /* Figure out where to put new node */
869 struct gfs2_rgrpd *cur = rb_entry(*newn, struct gfs2_rgrpd,
873 if (rgd->rd_addr < cur->rd_addr)
874 newn = &((*newn)->rb_left);
875 else if (rgd->rd_addr > cur->rd_addr)
876 newn = &((*newn)->rb_right);
881 rb_link_node(&rgd->rd_node, parent, newn);
882 rb_insert_color(&rgd->rd_node, &sdp->sd_rindex_tree);
888 * read_rindex_entry - Pull in a new resource index entry from the disk
889 * @ip: Pointer to the rindex inode
891 * Returns: 0 on success, > 0 on EOF, error code otherwise
894 static int read_rindex_entry(struct gfs2_inode *ip)
896 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
897 loff_t pos = sdp->sd_rgrps * sizeof(struct gfs2_rindex);
898 struct gfs2_rindex buf;
900 struct gfs2_rgrpd *rgd;
902 if (pos >= i_size_read(&ip->i_inode))
905 error = gfs2_internal_read(ip, (char *)&buf, &pos,
906 sizeof(struct gfs2_rindex));
908 if (error != sizeof(struct gfs2_rindex))
909 return (error == 0) ? 1 : error;
911 rgd = kmem_cache_zalloc(gfs2_rgrpd_cachep, GFP_NOFS);
917 rgd->rd_addr = be64_to_cpu(buf.ri_addr);
918 rgd->rd_length = be32_to_cpu(buf.ri_length);
919 rgd->rd_data0 = be64_to_cpu(buf.ri_data0);
920 rgd->rd_data = be32_to_cpu(buf.ri_data);
921 rgd->rd_bitbytes = be32_to_cpu(buf.ri_bitbytes);
922 spin_lock_init(&rgd->rd_rsspin);
923 mutex_init(&rgd->rd_mutex);
925 error = compute_bitstructs(rgd);
929 error = gfs2_glock_get(sdp, rgd->rd_addr,
930 &gfs2_rgrp_glops, CREATE, &rgd->rd_gl);
934 rgd->rd_rgl = (struct gfs2_rgrp_lvb *)rgd->rd_gl->gl_lksb.sb_lvbptr;
935 rgd->rd_flags &= ~GFS2_RDF_PREFERRED;
936 if (rgd->rd_data > sdp->sd_max_rg_data)
937 sdp->sd_max_rg_data = rgd->rd_data;
938 spin_lock(&sdp->sd_rindex_spin);
939 error = rgd_insert(rgd);
940 spin_unlock(&sdp->sd_rindex_spin);
942 glock_set_object(rgd->rd_gl, rgd);
946 error = 0; /* someone else read in the rgrp; free it and ignore it */
947 gfs2_glock_put(rgd->rd_gl);
952 kmem_cache_free(gfs2_rgrpd_cachep, rgd);
957 * set_rgrp_preferences - Run all the rgrps, selecting some we prefer to use
958 * @sdp: the GFS2 superblock
960 * The purpose of this function is to select a subset of the resource groups
961 * and mark them as PREFERRED. We do it in such a way that each node prefers
962 * to use a unique set of rgrps to minimize glock contention.
964 static void set_rgrp_preferences(struct gfs2_sbd *sdp)
966 struct gfs2_rgrpd *rgd, *first;
969 /* Skip an initial number of rgrps, based on this node's journal ID.
970 That should start each node out on its own set. */
971 rgd = gfs2_rgrpd_get_first(sdp);
972 for (i = 0; i < sdp->sd_lockstruct.ls_jid; i++)
973 rgd = gfs2_rgrpd_get_next(rgd);
977 rgd->rd_flags |= GFS2_RDF_PREFERRED;
978 for (i = 0; i < sdp->sd_journals; i++) {
979 rgd = gfs2_rgrpd_get_next(rgd);
980 if (!rgd || rgd == first)
983 } while (rgd && rgd != first);
987 * gfs2_ri_update - Pull in a new resource index from the disk
988 * @ip: pointer to the rindex inode
990 * Returns: 0 on successful update, error code otherwise
993 static int gfs2_ri_update(struct gfs2_inode *ip)
995 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
999 error = read_rindex_entry(ip);
1000 } while (error == 0);
1005 if (RB_EMPTY_ROOT(&sdp->sd_rindex_tree)) {
1006 fs_err(sdp, "no resource groups found in the file system.\n");
1009 set_rgrp_preferences(sdp);
1011 sdp->sd_rindex_uptodate = 1;
1016 * gfs2_rindex_update - Update the rindex if required
1017 * @sdp: The GFS2 superblock
1019 * We grab a lock on the rindex inode to make sure that it doesn't
1020 * change whilst we are performing an operation. We keep this lock
1021 * for quite long periods of time compared to other locks. This
1022 * doesn't matter, since it is shared and it is very, very rarely
1023 * accessed in the exclusive mode (i.e. only when expanding the filesystem).
1025 * This makes sure that we're using the latest copy of the resource index
1026 * special file, which might have been updated if someone expanded the
1027 * filesystem (via gfs2_grow utility), which adds new resource groups.
1029 * Returns: 0 on succeess, error code otherwise
1032 int gfs2_rindex_update(struct gfs2_sbd *sdp)
1034 struct gfs2_inode *ip = GFS2_I(sdp->sd_rindex);
1035 struct gfs2_glock *gl = ip->i_gl;
1036 struct gfs2_holder ri_gh;
1038 int unlock_required = 0;
1040 /* Read new copy from disk if we don't have the latest */
1041 if (!sdp->sd_rindex_uptodate) {
1042 if (!gfs2_glock_is_locked_by_me(gl)) {
1043 error = gfs2_glock_nq_init(gl, LM_ST_SHARED, 0, &ri_gh);
1046 unlock_required = 1;
1048 if (!sdp->sd_rindex_uptodate)
1049 error = gfs2_ri_update(ip);
1050 if (unlock_required)
1051 gfs2_glock_dq_uninit(&ri_gh);
1057 static void gfs2_rgrp_in(struct gfs2_rgrpd *rgd, const void *buf)
1059 const struct gfs2_rgrp *str = buf;
1062 rg_flags = be32_to_cpu(str->rg_flags);
1063 rg_flags &= ~GFS2_RDF_MASK;
1064 rgd->rd_flags &= GFS2_RDF_MASK;
1065 rgd->rd_flags |= rg_flags;
1066 rgd->rd_free = be32_to_cpu(str->rg_free);
1067 rgd->rd_dinodes = be32_to_cpu(str->rg_dinodes);
1068 rgd->rd_igeneration = be64_to_cpu(str->rg_igeneration);
1069 /* rd_data0, rd_data and rd_bitbytes already set from rindex */
1072 static void gfs2_rgrp_ondisk2lvb(struct gfs2_rgrp_lvb *rgl, const void *buf)
1074 const struct gfs2_rgrp *str = buf;
1076 rgl->rl_magic = cpu_to_be32(GFS2_MAGIC);
1077 rgl->rl_flags = str->rg_flags;
1078 rgl->rl_free = str->rg_free;
1079 rgl->rl_dinodes = str->rg_dinodes;
1080 rgl->rl_igeneration = str->rg_igeneration;
1084 static void gfs2_rgrp_out(struct gfs2_rgrpd *rgd, void *buf)
1086 struct gfs2_rgrpd *next = gfs2_rgrpd_get_next(rgd);
1087 struct gfs2_rgrp *str = buf;
1090 str->rg_flags = cpu_to_be32(rgd->rd_flags & ~GFS2_RDF_MASK);
1091 str->rg_free = cpu_to_be32(rgd->rd_free);
1092 str->rg_dinodes = cpu_to_be32(rgd->rd_dinodes);
1095 else if (next->rd_addr > rgd->rd_addr)
1096 str->rg_skip = cpu_to_be32(next->rd_addr - rgd->rd_addr);
1097 str->rg_igeneration = cpu_to_be64(rgd->rd_igeneration);
1098 str->rg_data0 = cpu_to_be64(rgd->rd_data0);
1099 str->rg_data = cpu_to_be32(rgd->rd_data);
1100 str->rg_bitbytes = cpu_to_be32(rgd->rd_bitbytes);
1102 crc = gfs2_disk_hash(buf, sizeof(struct gfs2_rgrp));
1103 str->rg_crc = cpu_to_be32(crc);
1105 memset(&str->rg_reserved, 0, sizeof(str->rg_reserved));
1106 gfs2_rgrp_ondisk2lvb(rgd->rd_rgl, buf);
1109 static int gfs2_rgrp_lvb_valid(struct gfs2_rgrpd *rgd)
1111 struct gfs2_rgrp_lvb *rgl = rgd->rd_rgl;
1112 struct gfs2_rgrp *str = (struct gfs2_rgrp *)rgd->rd_bits[0].bi_bh->b_data;
1113 struct gfs2_sbd *sdp = rgd->rd_sbd;
1116 if (rgl->rl_flags != str->rg_flags) {
1117 fs_warn(sdp, "GFS2: rgd: %llu lvb flag mismatch %u/%u",
1118 (unsigned long long)rgd->rd_addr,
1119 be32_to_cpu(rgl->rl_flags), be32_to_cpu(str->rg_flags));
1122 if (rgl->rl_free != str->rg_free) {
1123 fs_warn(sdp, "GFS2: rgd: %llu lvb free mismatch %u/%u",
1124 (unsigned long long)rgd->rd_addr,
1125 be32_to_cpu(rgl->rl_free), be32_to_cpu(str->rg_free));
1128 if (rgl->rl_dinodes != str->rg_dinodes) {
1129 fs_warn(sdp, "GFS2: rgd: %llu lvb dinode mismatch %u/%u",
1130 (unsigned long long)rgd->rd_addr,
1131 be32_to_cpu(rgl->rl_dinodes),
1132 be32_to_cpu(str->rg_dinodes));
1135 if (rgl->rl_igeneration != str->rg_igeneration) {
1136 fs_warn(sdp, "GFS2: rgd: %llu lvb igen mismatch %llu/%llu",
1137 (unsigned long long)rgd->rd_addr,
1138 (unsigned long long)be64_to_cpu(rgl->rl_igeneration),
1139 (unsigned long long)be64_to_cpu(str->rg_igeneration));
1145 static u32 count_unlinked(struct gfs2_rgrpd *rgd)
1147 struct gfs2_bitmap *bi;
1148 const u32 length = rgd->rd_length;
1149 const u8 *buffer = NULL;
1150 u32 i, goal, count = 0;
1152 for (i = 0, bi = rgd->rd_bits; i < length; i++, bi++) {
1154 buffer = bi->bi_bh->b_data + bi->bi_offset;
1155 WARN_ON(!buffer_uptodate(bi->bi_bh));
1156 while (goal < bi->bi_blocks) {
1157 goal = gfs2_bitfit(buffer, bi->bi_bytes, goal,
1158 GFS2_BLKST_UNLINKED);
1159 if (goal == BFITNOENT)
1169 static void rgrp_set_bitmap_flags(struct gfs2_rgrpd *rgd)
1171 struct gfs2_bitmap *bi;
1175 for (x = 0; x < rgd->rd_length; x++) {
1176 bi = rgd->rd_bits + x;
1177 clear_bit(GBF_FULL, &bi->bi_flags);
1180 for (x = 0; x < rgd->rd_length; x++) {
1181 bi = rgd->rd_bits + x;
1182 set_bit(GBF_FULL, &bi->bi_flags);
1188 * gfs2_rgrp_go_instantiate - Read in a RG's header and bitmaps
1189 * @gh: the glock holder representing the rgrpd to read in
1191 * Read in all of a Resource Group's header and bitmap blocks.
1192 * Caller must eventually call gfs2_rgrp_brelse() to free the bitmaps.
1197 int gfs2_rgrp_go_instantiate(struct gfs2_holder *gh)
1199 struct gfs2_glock *gl = gh->gh_gl;
1200 struct gfs2_rgrpd *rgd = gl->gl_object;
1201 struct gfs2_sbd *sdp = rgd->rd_sbd;
1202 unsigned int length = rgd->rd_length;
1203 struct gfs2_bitmap *bi;
1207 if (rgd->rd_bits[0].bi_bh != NULL)
1210 for (x = 0; x < length; x++) {
1211 bi = rgd->rd_bits + x;
1212 error = gfs2_meta_read(gl, rgd->rd_addr + x, 0, 0, &bi->bi_bh);
1217 for (y = length; y--;) {
1218 bi = rgd->rd_bits + y;
1219 error = gfs2_meta_wait(sdp, bi->bi_bh);
1222 if (gfs2_metatype_check(sdp, bi->bi_bh, y ? GFS2_METATYPE_RB :
1223 GFS2_METATYPE_RG)) {
1229 gfs2_rgrp_in(rgd, (rgd->rd_bits[0].bi_bh)->b_data);
1230 rgrp_set_bitmap_flags(rgd);
1231 rgd->rd_flags |= GFS2_RDF_CHECK;
1232 rgd->rd_free_clone = rgd->rd_free;
1233 GLOCK_BUG_ON(rgd->rd_gl, rgd->rd_reserved);
1234 /* max out the rgrp allocation failure point */
1235 rgd->rd_extfail_pt = rgd->rd_free;
1236 if (cpu_to_be32(GFS2_MAGIC) != rgd->rd_rgl->rl_magic) {
1237 rgd->rd_rgl->rl_unlinked = cpu_to_be32(count_unlinked(rgd));
1238 gfs2_rgrp_ondisk2lvb(rgd->rd_rgl,
1239 rgd->rd_bits[0].bi_bh->b_data);
1240 } else if (sdp->sd_args.ar_rgrplvb) {
1241 if (!gfs2_rgrp_lvb_valid(rgd)){
1242 gfs2_consist_rgrpd(rgd);
1246 if (rgd->rd_rgl->rl_unlinked == 0)
1247 rgd->rd_flags &= ~GFS2_RDF_CHECK;
1253 bi = rgd->rd_bits + x;
1256 gfs2_assert_warn(sdp, !bi->bi_clone);
1261 static int update_rgrp_lvb(struct gfs2_rgrpd *rgd, struct gfs2_holder *gh)
1265 if (!test_bit(GLF_INSTANTIATE_NEEDED, &gh->gh_gl->gl_flags))
1268 if (cpu_to_be32(GFS2_MAGIC) != rgd->rd_rgl->rl_magic)
1269 return gfs2_instantiate(gh);
1271 rl_flags = be32_to_cpu(rgd->rd_rgl->rl_flags);
1272 rl_flags &= ~GFS2_RDF_MASK;
1273 rgd->rd_flags &= GFS2_RDF_MASK;
1274 rgd->rd_flags |= (rl_flags | GFS2_RDF_CHECK);
1275 if (rgd->rd_rgl->rl_unlinked == 0)
1276 rgd->rd_flags &= ~GFS2_RDF_CHECK;
1277 rgd->rd_free = be32_to_cpu(rgd->rd_rgl->rl_free);
1278 rgrp_set_bitmap_flags(rgd);
1279 rgd->rd_free_clone = rgd->rd_free;
1280 GLOCK_BUG_ON(rgd->rd_gl, rgd->rd_reserved);
1281 /* max out the rgrp allocation failure point */
1282 rgd->rd_extfail_pt = rgd->rd_free;
1283 rgd->rd_dinodes = be32_to_cpu(rgd->rd_rgl->rl_dinodes);
1284 rgd->rd_igeneration = be64_to_cpu(rgd->rd_rgl->rl_igeneration);
1289 * gfs2_rgrp_brelse - Release RG bitmaps read in with gfs2_rgrp_bh_get()
1290 * @rgd: The resource group
1294 void gfs2_rgrp_brelse(struct gfs2_rgrpd *rgd)
1296 int x, length = rgd->rd_length;
1298 for (x = 0; x < length; x++) {
1299 struct gfs2_bitmap *bi = rgd->rd_bits + x;
1305 set_bit(GLF_INSTANTIATE_NEEDED, &rgd->rd_gl->gl_flags);
1308 int gfs2_rgrp_send_discards(struct gfs2_sbd *sdp, u64 offset,
1309 struct buffer_head *bh,
1310 const struct gfs2_bitmap *bi, unsigned minlen, u64 *ptrimmed)
1312 struct super_block *sb = sdp->sd_vfs;
1315 sector_t nr_blks = 0;
1321 for (x = 0; x < bi->bi_bytes; x++) {
1322 const u8 *clone = bi->bi_clone ? bi->bi_clone : bi->bi_bh->b_data;
1323 clone += bi->bi_offset;
1326 const u8 *orig = bh->b_data + bi->bi_offset + x;
1327 diff = ~(*orig | (*orig >> 1)) & (*clone | (*clone >> 1));
1329 diff = ~(*clone | (*clone >> 1));
1334 blk = offset + ((bi->bi_start + x) * GFS2_NBBY);
1338 goto start_new_extent;
1339 if ((start + nr_blks) != blk) {
1340 if (nr_blks >= minlen) {
1341 rv = sb_issue_discard(sb,
1358 if (nr_blks >= minlen) {
1359 rv = sb_issue_discard(sb, start, nr_blks, GFP_NOFS, 0);
1365 *ptrimmed = trimmed;
1369 if (sdp->sd_args.ar_discard)
1370 fs_warn(sdp, "error %d on discard request, turning discards off for this filesystem\n", rv);
1371 sdp->sd_args.ar_discard = 0;
1376 * gfs2_fitrim - Generate discard requests for unused bits of the filesystem
1377 * @filp: Any file on the filesystem
1378 * @argp: Pointer to the arguments (also used to pass result)
1380 * Returns: 0 on success, otherwise error code
1383 int gfs2_fitrim(struct file *filp, void __user *argp)
1385 struct inode *inode = file_inode(filp);
1386 struct gfs2_sbd *sdp = GFS2_SB(inode);
1387 struct request_queue *q = bdev_get_queue(sdp->sd_vfs->s_bdev);
1388 struct buffer_head *bh;
1389 struct gfs2_rgrpd *rgd;
1390 struct gfs2_rgrpd *rgd_end;
1391 struct gfs2_holder gh;
1392 struct fstrim_range r;
1396 u64 start, end, minlen;
1398 unsigned bs_shift = sdp->sd_sb.sb_bsize_shift;
1400 if (!capable(CAP_SYS_ADMIN))
1403 if (!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))
1406 if (!blk_queue_discard(q))
1409 if (copy_from_user(&r, argp, sizeof(r)))
1412 ret = gfs2_rindex_update(sdp);
1416 start = r.start >> bs_shift;
1417 end = start + (r.len >> bs_shift);
1418 minlen = max_t(u64, r.minlen,
1419 q->limits.discard_granularity) >> bs_shift;
1421 if (end <= start || minlen > sdp->sd_max_rg_data)
1424 rgd = gfs2_blk2rgrpd(sdp, start, 0);
1425 rgd_end = gfs2_blk2rgrpd(sdp, end, 0);
1427 if ((gfs2_rgrpd_get_first(sdp) == gfs2_rgrpd_get_next(rgd_end))
1428 && (start > rgd_end->rd_data0 + rgd_end->rd_data))
1429 return -EINVAL; /* start is beyond the end of the fs */
1433 ret = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE,
1434 LM_FLAG_NODE_SCOPE, &gh);
1438 if (!(rgd->rd_flags & GFS2_RGF_TRIMMED)) {
1439 /* Trim each bitmap in the rgrp */
1440 for (x = 0; x < rgd->rd_length; x++) {
1441 struct gfs2_bitmap *bi = rgd->rd_bits + x;
1442 rgrp_lock_local(rgd);
1443 ret = gfs2_rgrp_send_discards(sdp,
1444 rgd->rd_data0, NULL, bi, minlen,
1446 rgrp_unlock_local(rgd);
1448 gfs2_glock_dq_uninit(&gh);
1454 /* Mark rgrp as having been trimmed */
1455 ret = gfs2_trans_begin(sdp, RES_RG_HDR, 0);
1457 bh = rgd->rd_bits[0].bi_bh;
1458 rgrp_lock_local(rgd);
1459 rgd->rd_flags |= GFS2_RGF_TRIMMED;
1460 gfs2_trans_add_meta(rgd->rd_gl, bh);
1461 gfs2_rgrp_out(rgd, bh->b_data);
1462 rgrp_unlock_local(rgd);
1463 gfs2_trans_end(sdp);
1466 gfs2_glock_dq_uninit(&gh);
1471 rgd = gfs2_rgrpd_get_next(rgd);
1475 r.len = trimmed << bs_shift;
1476 if (copy_to_user(argp, &r, sizeof(r)))
1483 * rs_insert - insert a new multi-block reservation into the rgrp's rb_tree
1484 * @ip: the inode structure
1487 static void rs_insert(struct gfs2_inode *ip)
1489 struct rb_node **newn, *parent = NULL;
1491 struct gfs2_blkreserv *rs = &ip->i_res;
1492 struct gfs2_rgrpd *rgd = rs->rs_rgd;
1494 BUG_ON(gfs2_rs_active(rs));
1496 spin_lock(&rgd->rd_rsspin);
1497 newn = &rgd->rd_rstree.rb_node;
1499 struct gfs2_blkreserv *cur =
1500 rb_entry(*newn, struct gfs2_blkreserv, rs_node);
1503 rc = rs_cmp(rs->rs_start, rs->rs_requested, cur);
1505 newn = &((*newn)->rb_right);
1507 newn = &((*newn)->rb_left);
1509 spin_unlock(&rgd->rd_rsspin);
1515 rb_link_node(&rs->rs_node, parent, newn);
1516 rb_insert_color(&rs->rs_node, &rgd->rd_rstree);
1518 /* Do our rgrp accounting for the reservation */
1519 rgd->rd_requested += rs->rs_requested; /* blocks requested */
1520 spin_unlock(&rgd->rd_rsspin);
1521 trace_gfs2_rs(rs, TRACE_RS_INSERT);
1525 * rgd_free - return the number of free blocks we can allocate
1526 * @rgd: the resource group
1527 * @rs: The reservation to free
1529 * This function returns the number of free blocks for an rgrp.
1530 * That's the clone-free blocks (blocks that are free, not including those
1531 * still being used for unlinked files that haven't been deleted.)
1533 * It also subtracts any blocks reserved by someone else, but does not
1534 * include free blocks that are still part of our current reservation,
1535 * because obviously we can (and will) allocate them.
1537 static inline u32 rgd_free(struct gfs2_rgrpd *rgd, struct gfs2_blkreserv *rs)
1539 u32 tot_reserved, tot_free;
1541 if (WARN_ON_ONCE(rgd->rd_requested < rs->rs_requested))
1543 tot_reserved = rgd->rd_requested - rs->rs_requested;
1545 if (rgd->rd_free_clone < tot_reserved)
1548 tot_free = rgd->rd_free_clone - tot_reserved;
1554 * rg_mblk_search - find a group of multiple free blocks to form a reservation
1555 * @rgd: the resource group descriptor
1556 * @ip: pointer to the inode for which we're reserving blocks
1557 * @ap: the allocation parameters
1561 static void rg_mblk_search(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip,
1562 const struct gfs2_alloc_parms *ap)
1564 struct gfs2_rbm rbm = { .rgd = rgd, };
1566 struct gfs2_blkreserv *rs = &ip->i_res;
1568 u32 free_blocks, blocks_available;
1570 struct inode *inode = &ip->i_inode;
1572 spin_lock(&rgd->rd_rsspin);
1573 free_blocks = rgd_free(rgd, rs);
1574 if (rgd->rd_free_clone < rgd->rd_requested)
1576 blocks_available = rgd->rd_free_clone - rgd->rd_reserved;
1577 if (rgd == rs->rs_rgd)
1578 blocks_available += rs->rs_reserved;
1579 spin_unlock(&rgd->rd_rsspin);
1581 if (S_ISDIR(inode->i_mode))
1584 extlen = max_t(u32, atomic_read(&ip->i_sizehint), ap->target);
1585 extlen = clamp(extlen, (u32)RGRP_RSRV_MINBLKS, free_blocks);
1587 if (free_blocks < extlen || blocks_available < extlen)
1590 /* Find bitmap block that contains bits for goal block */
1591 if (rgrp_contains_block(rgd, ip->i_goal))
1594 goal = rgd->rd_last_alloc + rgd->rd_data0;
1596 if (WARN_ON(gfs2_rbm_from_block(&rbm, goal)))
1599 ret = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, &extlen, &ip->i_res, true);
1601 rs->rs_start = gfs2_rbm_to_block(&rbm);
1602 rs->rs_requested = extlen;
1605 if (goal == rgd->rd_last_alloc + rgd->rd_data0)
1606 rgd->rd_last_alloc = 0;
1611 * gfs2_next_unreserved_block - Return next block that is not reserved
1612 * @rgd: The resource group
1613 * @block: The starting block
1614 * @length: The required length
1615 * @ignore_rs: Reservation to ignore
1617 * If the block does not appear in any reservation, then return the
1618 * block number unchanged. If it does appear in the reservation, then
1619 * keep looking through the tree of reservations in order to find the
1620 * first block number which is not reserved.
1623 static u64 gfs2_next_unreserved_block(struct gfs2_rgrpd *rgd, u64 block,
1625 struct gfs2_blkreserv *ignore_rs)
1627 struct gfs2_blkreserv *rs;
1631 spin_lock(&rgd->rd_rsspin);
1632 n = rgd->rd_rstree.rb_node;
1634 rs = rb_entry(n, struct gfs2_blkreserv, rs_node);
1635 rc = rs_cmp(block, length, rs);
1645 while (rs_cmp(block, length, rs) == 0 && rs != ignore_rs) {
1646 block = rs->rs_start + rs->rs_requested;
1650 rs = rb_entry(n, struct gfs2_blkreserv, rs_node);
1654 spin_unlock(&rgd->rd_rsspin);
1659 * gfs2_reservation_check_and_update - Check for reservations during block alloc
1660 * @rbm: The current position in the resource group
1661 * @rs: Our own reservation
1662 * @minext: The minimum extent length
1663 * @maxext: A pointer to the maximum extent structure
1665 * This checks the current position in the rgrp to see whether there is
1666 * a reservation covering this block. If not then this function is a
1667 * no-op. If there is, then the position is moved to the end of the
1668 * contiguous reservation(s) so that we are pointing at the first
1669 * non-reserved block.
1671 * Returns: 0 if no reservation, 1 if @rbm has changed, otherwise an error
1674 static int gfs2_reservation_check_and_update(struct gfs2_rbm *rbm,
1675 struct gfs2_blkreserv *rs,
1677 struct gfs2_extent *maxext)
1679 u64 block = gfs2_rbm_to_block(rbm);
1684 * If we have a minimum extent length, then skip over any extent
1685 * which is less than the min extent length in size.
1688 extlen = gfs2_free_extlen(rbm, minext);
1689 if (extlen <= maxext->len)
1694 * Check the extent which has been found against the reservations
1695 * and skip if parts of it are already reserved
1697 nblock = gfs2_next_unreserved_block(rbm->rgd, block, extlen, rs);
1698 if (nblock == block) {
1699 if (!minext || extlen >= minext)
1702 if (extlen > maxext->len) {
1703 maxext->len = extlen;
1707 u64 len = nblock - block;
1708 if (len >= (u64)1 << 32)
1713 if (gfs2_rbm_add(rbm, extlen))
1719 * gfs2_rbm_find - Look for blocks of a particular state
1720 * @rbm: Value/result starting position and final position
1721 * @state: The state which we want to find
1722 * @minext: Pointer to the requested extent length
1723 * This is updated to be the actual reservation size.
1724 * @rs: Our own reservation (NULL to skip checking for reservations)
1725 * @nowrap: Stop looking at the end of the rgrp, rather than wrapping
1726 * around until we've reached the starting point.
1729 * - If looking for free blocks, we set GBF_FULL on each bitmap which
1730 * has no free blocks in it.
1731 * - If looking for free blocks, we set rd_extfail_pt on each rgrp which
1732 * has come up short on a free block search.
1734 * Returns: 0 on success, -ENOSPC if there is no block of the requested state
1737 static int gfs2_rbm_find(struct gfs2_rbm *rbm, u8 state, u32 *minext,
1738 struct gfs2_blkreserv *rs, bool nowrap)
1740 bool scan_from_start = rbm->bii == 0 && rbm->offset == 0;
1741 struct buffer_head *bh;
1745 bool wrapped = false;
1747 struct gfs2_bitmap *bi;
1748 struct gfs2_extent maxext = { .rbm.rgd = rbm->rgd, };
1751 * Determine the last bitmap to search. If we're not starting at the
1752 * beginning of a bitmap, we need to search that bitmap twice to scan
1753 * the entire resource group.
1755 last_bii = rbm->bii - (rbm->offset == 0);
1759 if (test_bit(GBF_FULL, &bi->bi_flags) &&
1760 (state == GFS2_BLKST_FREE))
1764 buffer = bh->b_data + bi->bi_offset;
1765 WARN_ON(!buffer_uptodate(bh));
1766 if (state != GFS2_BLKST_UNLINKED && bi->bi_clone)
1767 buffer = bi->bi_clone + bi->bi_offset;
1768 offset = gfs2_bitfit(buffer, bi->bi_bytes, rbm->offset, state);
1769 if (offset == BFITNOENT) {
1770 if (state == GFS2_BLKST_FREE && rbm->offset == 0)
1771 set_bit(GBF_FULL, &bi->bi_flags);
1774 rbm->offset = offset;
1778 ret = gfs2_reservation_check_and_update(rbm, rs, *minext,
1784 if (ret == -E2BIG) {
1787 goto res_covered_end_of_rgrp;
1791 next_bitmap: /* Find next bitmap in the rgrp */
1794 if (rbm->bii == rbm->rgd->rd_length)
1796 res_covered_end_of_rgrp:
1797 if (rbm->bii == 0) {
1805 /* Have we scanned the entire resource group? */
1806 if (wrapped && rbm->bii > last_bii)
1810 if (state != GFS2_BLKST_FREE)
1813 /* If the extent was too small, and it's smaller than the smallest
1814 to have failed before, remember for future reference that it's
1815 useless to search this rgrp again for this amount or more. */
1816 if (wrapped && (scan_from_start || rbm->bii > last_bii) &&
1817 *minext < rbm->rgd->rd_extfail_pt)
1818 rbm->rgd->rd_extfail_pt = *minext - 1;
1820 /* If the maximum extent we found is big enough to fulfill the
1821 minimum requirements, use it anyway. */
1824 *minext = maxext.len;
1832 * try_rgrp_unlink - Look for any unlinked, allocated, but unused inodes
1834 * @last_unlinked: block address of the last dinode we unlinked
1835 * @skip: block address we should explicitly not unlink
1837 * Returns: 0 if no error
1838 * The inode, if one has been found, in inode.
1841 static void try_rgrp_unlink(struct gfs2_rgrpd *rgd, u64 *last_unlinked, u64 skip)
1844 struct gfs2_sbd *sdp = rgd->rd_sbd;
1845 struct gfs2_glock *gl;
1846 struct gfs2_inode *ip;
1849 struct gfs2_rbm rbm = { .rgd = rgd, .bii = 0, .offset = 0 };
1852 error = gfs2_rbm_find(&rbm, GFS2_BLKST_UNLINKED, NULL, NULL,
1854 if (error == -ENOSPC)
1856 if (WARN_ON_ONCE(error))
1859 block = gfs2_rbm_to_block(&rbm);
1860 if (gfs2_rbm_from_block(&rbm, block + 1))
1862 if (*last_unlinked != NO_BLOCK && block <= *last_unlinked)
1866 *last_unlinked = block;
1868 error = gfs2_glock_get(sdp, block, &gfs2_iopen_glops, CREATE, &gl);
1872 /* If the inode is already in cache, we can ignore it here
1873 * because the existing inode disposal code will deal with
1874 * it when all refs have gone away. Accessing gl_object like
1875 * this is not safe in general. Here it is ok because we do
1876 * not dereference the pointer, and we only need an approx
1877 * answer to whether it is NULL or not.
1881 if (ip || !gfs2_queue_delete_work(gl, 0))
1886 /* Limit reclaim to sensible number of tasks */
1887 if (found > NR_CPUS)
1891 rgd->rd_flags &= ~GFS2_RDF_CHECK;
1896 * gfs2_rgrp_congested - Use stats to figure out whether an rgrp is congested
1897 * @rgd: The rgrp in question
1898 * @loops: An indication of how picky we can be (0=very, 1=less so)
1900 * This function uses the recently added glock statistics in order to
1901 * figure out whether a parciular resource group is suffering from
1902 * contention from multiple nodes. This is done purely on the basis
1903 * of timings, since this is the only data we have to work with and
1904 * our aim here is to reject a resource group which is highly contended
1905 * but (very important) not to do this too often in order to ensure that
1906 * we do not land up introducing fragmentation by changing resource
1907 * groups when not actually required.
1909 * The calculation is fairly simple, we want to know whether the SRTTB
1910 * (i.e. smoothed round trip time for blocking operations) to acquire
1911 * the lock for this rgrp's glock is significantly greater than the
1912 * time taken for resource groups on average. We introduce a margin in
1913 * the form of the variable @var which is computed as the sum of the two
1914 * respective variences, and multiplied by a factor depending on @loops
1915 * and whether we have a lot of data to base the decision on. This is
1916 * then tested against the square difference of the means in order to
1917 * decide whether the result is statistically significant or not.
1919 * Returns: A boolean verdict on the congestion status
1922 static bool gfs2_rgrp_congested(const struct gfs2_rgrpd *rgd, int loops)
1924 const struct gfs2_glock *gl = rgd->rd_gl;
1925 const struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
1926 struct gfs2_lkstats *st;
1927 u64 r_dcount, l_dcount;
1928 u64 l_srttb, a_srttb = 0;
1932 int cpu, nonzero = 0;
1935 for_each_present_cpu(cpu) {
1936 st = &per_cpu_ptr(sdp->sd_lkstats, cpu)->lkstats[LM_TYPE_RGRP];
1937 if (st->stats[GFS2_LKS_SRTTB]) {
1938 a_srttb += st->stats[GFS2_LKS_SRTTB];
1942 st = &this_cpu_ptr(sdp->sd_lkstats)->lkstats[LM_TYPE_RGRP];
1944 do_div(a_srttb, nonzero);
1945 r_dcount = st->stats[GFS2_LKS_DCOUNT];
1946 var = st->stats[GFS2_LKS_SRTTVARB] +
1947 gl->gl_stats.stats[GFS2_LKS_SRTTVARB];
1950 l_srttb = gl->gl_stats.stats[GFS2_LKS_SRTTB];
1951 l_dcount = gl->gl_stats.stats[GFS2_LKS_DCOUNT];
1953 if ((l_dcount < 1) || (r_dcount < 1) || (a_srttb == 0))
1956 srttb_diff = a_srttb - l_srttb;
1957 sqr_diff = srttb_diff * srttb_diff;
1960 if (l_dcount < 8 || r_dcount < 8)
1965 return ((srttb_diff < 0) && (sqr_diff > var));
1969 * gfs2_rgrp_used_recently
1970 * @rs: The block reservation with the rgrp to test
1971 * @msecs: The time limit in milliseconds
1973 * Returns: True if the rgrp glock has been used within the time limit
1975 static bool gfs2_rgrp_used_recently(const struct gfs2_blkreserv *rs,
1980 tdiff = ktime_to_ns(ktime_sub(ktime_get_real(),
1981 rs->rs_rgd->rd_gl->gl_dstamp));
1983 return tdiff > (msecs * 1000 * 1000);
1986 static u32 gfs2_orlov_skip(const struct gfs2_inode *ip)
1988 const struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1991 get_random_bytes(&skip, sizeof(skip));
1992 return skip % sdp->sd_rgrps;
1995 static bool gfs2_select_rgrp(struct gfs2_rgrpd **pos, const struct gfs2_rgrpd *begin)
1997 struct gfs2_rgrpd *rgd = *pos;
1998 struct gfs2_sbd *sdp = rgd->rd_sbd;
2000 rgd = gfs2_rgrpd_get_next(rgd);
2002 rgd = gfs2_rgrpd_get_first(sdp);
2004 if (rgd != begin) /* If we didn't wrap */
2010 * fast_to_acquire - determine if a resource group will be fast to acquire
2013 * If this is one of our preferred rgrps, it should be quicker to acquire,
2014 * because we tried to set ourselves up as dlm lock master.
2016 static inline int fast_to_acquire(struct gfs2_rgrpd *rgd)
2018 struct gfs2_glock *gl = rgd->rd_gl;
2020 if (gl->gl_state != LM_ST_UNLOCKED && list_empty(&gl->gl_holders) &&
2021 !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags) &&
2022 !test_bit(GLF_DEMOTE, &gl->gl_flags))
2024 if (rgd->rd_flags & GFS2_RDF_PREFERRED)
2030 * gfs2_inplace_reserve - Reserve space in the filesystem
2031 * @ip: the inode to reserve space for
2032 * @ap: the allocation parameters
2034 * We try our best to find an rgrp that has at least ap->target blocks
2035 * available. After a couple of passes (loops == 2), the prospects of finding
2036 * such an rgrp diminish. At this stage, we return the first rgrp that has
2037 * at least ap->min_target blocks available.
2039 * Returns: 0 on success,
2040 * -ENOMEM if a suitable rgrp can't be found
2044 int gfs2_inplace_reserve(struct gfs2_inode *ip, struct gfs2_alloc_parms *ap)
2046 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
2047 struct gfs2_rgrpd *begin = NULL;
2048 struct gfs2_blkreserv *rs = &ip->i_res;
2049 int error = 0, flags = LM_FLAG_NODE_SCOPE;
2051 u64 last_unlinked = NO_BLOCK;
2052 u32 target = ap->target;
2054 u32 free_blocks, blocks_available, skip = 0;
2056 BUG_ON(rs->rs_reserved);
2058 if (sdp->sd_args.ar_rgrplvb)
2060 if (gfs2_assert_warn(sdp, target))
2062 if (gfs2_rs_active(rs)) {
2064 } else if (rs->rs_rgd &&
2065 rgrp_contains_block(rs->rs_rgd, ip->i_goal)) {
2068 check_and_update_goal(ip);
2069 rs->rs_rgd = begin = gfs2_blk2rgrpd(sdp, ip->i_goal, 1);
2071 if (S_ISDIR(ip->i_inode.i_mode) && (ap->aflags & GFS2_AF_ORLOV))
2072 skip = gfs2_orlov_skip(ip);
2073 if (rs->rs_rgd == NULL)
2077 struct gfs2_rgrpd *rgd;
2079 rg_locked = gfs2_glock_is_locked_by_me(rs->rs_rgd->rd_gl);
2081 rgrp_lock_local(rs->rs_rgd);
2085 if (!gfs2_rs_active(rs)) {
2087 !fast_to_acquire(rs->rs_rgd))
2090 gfs2_rgrp_used_recently(rs, 1000) &&
2091 gfs2_rgrp_congested(rs->rs_rgd, loops))
2094 error = gfs2_glock_nq_init(rs->rs_rgd->rd_gl,
2095 LM_ST_EXCLUSIVE, flags,
2097 if (unlikely(error))
2099 rgrp_lock_local(rs->rs_rgd);
2100 if (!gfs2_rs_active(rs) && (loops < 2) &&
2101 gfs2_rgrp_congested(rs->rs_rgd, loops))
2103 if (sdp->sd_args.ar_rgrplvb) {
2104 error = update_rgrp_lvb(rs->rs_rgd,
2106 if (unlikely(error)) {
2107 rgrp_unlock_local(rs->rs_rgd);
2108 gfs2_glock_dq_uninit(&ip->i_rgd_gh);
2114 /* Skip unusable resource groups */
2115 if ((rs->rs_rgd->rd_flags & (GFS2_RGF_NOALLOC |
2117 (loops == 0 && target > rs->rs_rgd->rd_extfail_pt))
2120 if (sdp->sd_args.ar_rgrplvb) {
2121 error = gfs2_instantiate(&ip->i_rgd_gh);
2126 /* Get a reservation if we don't already have one */
2127 if (!gfs2_rs_active(rs))
2128 rg_mblk_search(rs->rs_rgd, ip, ap);
2130 /* Skip rgrps when we can't get a reservation on first pass */
2131 if (!gfs2_rs_active(rs) && (loops < 1))
2134 /* If rgrp has enough free space, use it */
2136 spin_lock(&rgd->rd_rsspin);
2137 free_blocks = rgd_free(rgd, rs);
2138 blocks_available = rgd->rd_free_clone - rgd->rd_reserved;
2139 if (free_blocks < target || blocks_available < target) {
2140 spin_unlock(&rgd->rd_rsspin);
2143 rs->rs_reserved = ap->target;
2144 if (rs->rs_reserved > blocks_available)
2145 rs->rs_reserved = blocks_available;
2146 rgd->rd_reserved += rs->rs_reserved;
2147 spin_unlock(&rgd->rd_rsspin);
2148 rgrp_unlock_local(rs->rs_rgd);
2151 /* Check for unlinked inodes which can be reclaimed */
2152 if (rs->rs_rgd->rd_flags & GFS2_RDF_CHECK)
2153 try_rgrp_unlink(rs->rs_rgd, &last_unlinked,
2156 rgrp_unlock_local(rs->rs_rgd);
2158 /* Drop reservation, if we couldn't use reserved rgrp */
2159 if (gfs2_rs_active(rs))
2160 gfs2_rs_deltree(rs);
2162 /* Unlock rgrp if required */
2164 gfs2_glock_dq_uninit(&ip->i_rgd_gh);
2166 /* Find the next rgrp, and continue looking */
2167 if (gfs2_select_rgrp(&rs->rs_rgd, begin))
2172 /* If we've scanned all the rgrps, but found no free blocks
2173 * then this checks for some less likely conditions before
2177 /* Check that fs hasn't grown if writing to rindex */
2178 if (ip == GFS2_I(sdp->sd_rindex) && !sdp->sd_rindex_uptodate) {
2179 error = gfs2_ri_update(ip);
2183 /* Flushing the log may release space */
2186 target = ap->min_target;
2187 gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
2188 GFS2_LFC_INPLACE_RESERVE);
2196 * gfs2_inplace_release - release an inplace reservation
2197 * @ip: the inode the reservation was taken out on
2199 * Release a reservation made by gfs2_inplace_reserve().
2202 void gfs2_inplace_release(struct gfs2_inode *ip)
2204 struct gfs2_blkreserv *rs = &ip->i_res;
2206 if (rs->rs_reserved) {
2207 struct gfs2_rgrpd *rgd = rs->rs_rgd;
2209 spin_lock(&rgd->rd_rsspin);
2210 GLOCK_BUG_ON(rgd->rd_gl, rgd->rd_reserved < rs->rs_reserved);
2211 rgd->rd_reserved -= rs->rs_reserved;
2212 spin_unlock(&rgd->rd_rsspin);
2213 rs->rs_reserved = 0;
2215 if (gfs2_holder_initialized(&ip->i_rgd_gh))
2216 gfs2_glock_dq_uninit(&ip->i_rgd_gh);
2220 * gfs2_alloc_extent - allocate an extent from a given bitmap
2221 * @rbm: the resource group information
2222 * @dinode: TRUE if the first block we allocate is for a dinode
2223 * @n: The extent length (value/result)
2225 * Add the bitmap buffer to the transaction.
2226 * Set the found bits to @new_state to change block's allocation state.
2228 static void gfs2_alloc_extent(const struct gfs2_rbm *rbm, bool dinode,
2231 struct gfs2_rbm pos = { .rgd = rbm->rgd, };
2232 const unsigned int elen = *n;
2237 block = gfs2_rbm_to_block(rbm);
2238 gfs2_trans_add_meta(rbm->rgd->rd_gl, rbm_bi(rbm)->bi_bh);
2239 gfs2_setbit(rbm, true, dinode ? GFS2_BLKST_DINODE : GFS2_BLKST_USED);
2242 ret = gfs2_rbm_from_block(&pos, block);
2243 if (ret || gfs2_testbit(&pos, true) != GFS2_BLKST_FREE)
2245 gfs2_trans_add_meta(pos.rgd->rd_gl, rbm_bi(&pos)->bi_bh);
2246 gfs2_setbit(&pos, true, GFS2_BLKST_USED);
2253 * rgblk_free - Change alloc state of given block(s)
2254 * @sdp: the filesystem
2255 * @rgd: the resource group the blocks are in
2256 * @bstart: the start of a run of blocks to free
2257 * @blen: the length of the block run (all must lie within ONE RG!)
2258 * @new_state: GFS2_BLKST_XXX the after-allocation block state
2261 static void rgblk_free(struct gfs2_sbd *sdp, struct gfs2_rgrpd *rgd,
2262 u64 bstart, u32 blen, unsigned char new_state)
2264 struct gfs2_rbm rbm;
2265 struct gfs2_bitmap *bi, *bi_prev = NULL;
2268 if (WARN_ON_ONCE(gfs2_rbm_from_block(&rbm, bstart)))
2272 if (bi != bi_prev) {
2273 if (!bi->bi_clone) {
2274 bi->bi_clone = kmalloc(bi->bi_bh->b_size,
2275 GFP_NOFS | __GFP_NOFAIL);
2276 memcpy(bi->bi_clone + bi->bi_offset,
2277 bi->bi_bh->b_data + bi->bi_offset,
2280 gfs2_trans_add_meta(rbm.rgd->rd_gl, bi->bi_bh);
2283 gfs2_setbit(&rbm, false, new_state);
2284 gfs2_rbm_add(&rbm, 1);
2289 * gfs2_rgrp_dump - print out an rgrp
2290 * @seq: The iterator
2291 * @rgd: The rgrp in question
2292 * @fs_id_buf: pointer to file system id (if requested)
2296 void gfs2_rgrp_dump(struct seq_file *seq, struct gfs2_rgrpd *rgd,
2297 const char *fs_id_buf)
2299 struct gfs2_blkreserv *trs;
2300 const struct rb_node *n;
2302 spin_lock(&rgd->rd_rsspin);
2303 gfs2_print_dbg(seq, "%s R: n:%llu f:%02x b:%u/%u i:%u q:%u r:%u e:%u\n",
2305 (unsigned long long)rgd->rd_addr, rgd->rd_flags,
2306 rgd->rd_free, rgd->rd_free_clone, rgd->rd_dinodes,
2307 rgd->rd_requested, rgd->rd_reserved, rgd->rd_extfail_pt);
2308 if (rgd->rd_sbd->sd_args.ar_rgrplvb) {
2309 struct gfs2_rgrp_lvb *rgl = rgd->rd_rgl;
2311 gfs2_print_dbg(seq, "%s L: f:%02x b:%u i:%u\n", fs_id_buf,
2312 be32_to_cpu(rgl->rl_flags),
2313 be32_to_cpu(rgl->rl_free),
2314 be32_to_cpu(rgl->rl_dinodes));
2316 for (n = rb_first(&rgd->rd_rstree); n; n = rb_next(&trs->rs_node)) {
2317 trs = rb_entry(n, struct gfs2_blkreserv, rs_node);
2318 dump_rs(seq, trs, fs_id_buf);
2320 spin_unlock(&rgd->rd_rsspin);
2323 static void gfs2_rgrp_error(struct gfs2_rgrpd *rgd)
2325 struct gfs2_sbd *sdp = rgd->rd_sbd;
2326 char fs_id_buf[sizeof(sdp->sd_fsname) + 7];
2328 fs_warn(sdp, "rgrp %llu has an error, marking it readonly until umount\n",
2329 (unsigned long long)rgd->rd_addr);
2330 fs_warn(sdp, "umount on all nodes and run fsck.gfs2 to fix the error\n");
2331 sprintf(fs_id_buf, "fsid=%s: ", sdp->sd_fsname);
2332 gfs2_rgrp_dump(NULL, rgd, fs_id_buf);
2333 rgd->rd_flags |= GFS2_RDF_ERROR;
2337 * gfs2_adjust_reservation - Adjust (or remove) a reservation after allocation
2338 * @ip: The inode we have just allocated blocks for
2339 * @rbm: The start of the allocated blocks
2340 * @len: The extent length
2342 * Adjusts a reservation after an allocation has taken place. If the
2343 * reservation does not match the allocation, or if it is now empty
2344 * then it is removed.
2347 static void gfs2_adjust_reservation(struct gfs2_inode *ip,
2348 const struct gfs2_rbm *rbm, unsigned len)
2350 struct gfs2_blkreserv *rs = &ip->i_res;
2351 struct gfs2_rgrpd *rgd = rbm->rgd;
2353 BUG_ON(rs->rs_reserved < len);
2354 rs->rs_reserved -= len;
2355 if (gfs2_rs_active(rs)) {
2356 u64 start = gfs2_rbm_to_block(rbm);
2358 if (rs->rs_start == start) {
2361 rs->rs_start += len;
2362 rlen = min(rs->rs_requested, len);
2363 rs->rs_requested -= rlen;
2364 rgd->rd_requested -= rlen;
2365 trace_gfs2_rs(rs, TRACE_RS_CLAIM);
2366 if (rs->rs_start < rgd->rd_data0 + rgd->rd_data &&
2369 /* We used up our block reservation, so we should
2370 reserve more blocks next time. */
2371 atomic_add(RGRP_RSRV_ADDBLKS, &ip->i_sizehint);
2378 * gfs2_set_alloc_start - Set starting point for block allocation
2379 * @rbm: The rbm which will be set to the required location
2380 * @ip: The gfs2 inode
2381 * @dinode: Flag to say if allocation includes a new inode
2383 * This sets the starting point from the reservation if one is active
2384 * otherwise it falls back to guessing a start point based on the
2385 * inode's goal block or the last allocation point in the rgrp.
2388 static void gfs2_set_alloc_start(struct gfs2_rbm *rbm,
2389 const struct gfs2_inode *ip, bool dinode)
2393 if (gfs2_rs_active(&ip->i_res)) {
2394 goal = ip->i_res.rs_start;
2396 if (!dinode && rgrp_contains_block(rbm->rgd, ip->i_goal))
2399 goal = rbm->rgd->rd_last_alloc + rbm->rgd->rd_data0;
2401 if (WARN_ON_ONCE(gfs2_rbm_from_block(rbm, goal))) {
2408 * gfs2_alloc_blocks - Allocate one or more blocks of data and/or a dinode
2409 * @ip: the inode to allocate the block for
2410 * @bn: Used to return the starting block number
2411 * @nblocks: requested number of blocks/extent length (value/result)
2412 * @dinode: 1 if we're allocating a dinode block, else 0
2413 * @generation: the generation number of the inode
2415 * Returns: 0 or error
2418 int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *nblocks,
2419 bool dinode, u64 *generation)
2421 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
2422 struct buffer_head *dibh;
2423 struct gfs2_rbm rbm = { .rgd = ip->i_res.rs_rgd, };
2424 u64 block; /* block, within the file system scope */
2426 int error = -ENOSPC;
2428 BUG_ON(ip->i_res.rs_reserved < *nblocks);
2430 rgrp_lock_local(rbm.rgd);
2431 if (gfs2_rs_active(&ip->i_res)) {
2432 gfs2_set_alloc_start(&rbm, ip, dinode);
2433 error = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, &minext, &ip->i_res, false);
2435 if (error == -ENOSPC) {
2436 gfs2_set_alloc_start(&rbm, ip, dinode);
2437 error = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, &minext, NULL, false);
2440 /* Since all blocks are reserved in advance, this shouldn't happen */
2442 fs_warn(sdp, "inum=%llu error=%d, nblocks=%u, full=%d fail_pt=%d\n",
2443 (unsigned long long)ip->i_no_addr, error, *nblocks,
2444 test_bit(GBF_FULL, &rbm.rgd->rd_bits->bi_flags),
2445 rbm.rgd->rd_extfail_pt);
2449 gfs2_alloc_extent(&rbm, dinode, nblocks);
2450 block = gfs2_rbm_to_block(&rbm);
2451 rbm.rgd->rd_last_alloc = block - rbm.rgd->rd_data0;
2453 ip->i_goal = block + *nblocks - 1;
2454 error = gfs2_meta_inode_buffer(ip, &dibh);
2456 struct gfs2_dinode *di =
2457 (struct gfs2_dinode *)dibh->b_data;
2458 gfs2_trans_add_meta(ip->i_gl, dibh);
2459 di->di_goal_meta = di->di_goal_data =
2460 cpu_to_be64(ip->i_goal);
2464 spin_lock(&rbm.rgd->rd_rsspin);
2465 gfs2_adjust_reservation(ip, &rbm, *nblocks);
2466 if (rbm.rgd->rd_free < *nblocks || rbm.rgd->rd_reserved < *nblocks) {
2467 fs_warn(sdp, "nblocks=%u\n", *nblocks);
2468 spin_unlock(&rbm.rgd->rd_rsspin);
2471 GLOCK_BUG_ON(rbm.rgd->rd_gl, rbm.rgd->rd_reserved < *nblocks);
2472 GLOCK_BUG_ON(rbm.rgd->rd_gl, rbm.rgd->rd_free_clone < *nblocks);
2473 GLOCK_BUG_ON(rbm.rgd->rd_gl, rbm.rgd->rd_free < *nblocks);
2474 rbm.rgd->rd_reserved -= *nblocks;
2475 rbm.rgd->rd_free_clone -= *nblocks;
2476 rbm.rgd->rd_free -= *nblocks;
2477 spin_unlock(&rbm.rgd->rd_rsspin);
2479 rbm.rgd->rd_dinodes++;
2480 *generation = rbm.rgd->rd_igeneration++;
2481 if (*generation == 0)
2482 *generation = rbm.rgd->rd_igeneration++;
2485 gfs2_trans_add_meta(rbm.rgd->rd_gl, rbm.rgd->rd_bits[0].bi_bh);
2486 gfs2_rgrp_out(rbm.rgd, rbm.rgd->rd_bits[0].bi_bh->b_data);
2487 rgrp_unlock_local(rbm.rgd);
2489 gfs2_statfs_change(sdp, 0, -(s64)*nblocks, dinode ? 1 : 0);
2491 gfs2_trans_remove_revoke(sdp, block, *nblocks);
2493 gfs2_quota_change(ip, *nblocks, ip->i_inode.i_uid, ip->i_inode.i_gid);
2495 trace_gfs2_block_alloc(ip, rbm.rgd, block, *nblocks,
2496 dinode ? GFS2_BLKST_DINODE : GFS2_BLKST_USED);
2501 rgrp_unlock_local(rbm.rgd);
2502 gfs2_rgrp_error(rbm.rgd);
2507 * __gfs2_free_blocks - free a contiguous run of block(s)
2508 * @ip: the inode these blocks are being freed from
2509 * @rgd: the resource group the blocks are in
2510 * @bstart: first block of a run of contiguous blocks
2511 * @blen: the length of the block run
2512 * @meta: 1 if the blocks represent metadata
2516 void __gfs2_free_blocks(struct gfs2_inode *ip, struct gfs2_rgrpd *rgd,
2517 u64 bstart, u32 blen, int meta)
2519 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
2521 rgrp_lock_local(rgd);
2522 rgblk_free(sdp, rgd, bstart, blen, GFS2_BLKST_FREE);
2523 trace_gfs2_block_alloc(ip, rgd, bstart, blen, GFS2_BLKST_FREE);
2524 rgd->rd_free += blen;
2525 rgd->rd_flags &= ~GFS2_RGF_TRIMMED;
2526 gfs2_trans_add_meta(rgd->rd_gl, rgd->rd_bits[0].bi_bh);
2527 gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
2528 rgrp_unlock_local(rgd);
2530 /* Directories keep their data in the metadata address space */
2531 if (meta || ip->i_depth || gfs2_is_jdata(ip))
2532 gfs2_journal_wipe(ip, bstart, blen);
2536 * gfs2_free_meta - free a contiguous run of data block(s)
2537 * @ip: the inode these blocks are being freed from
2538 * @rgd: the resource group the blocks are in
2539 * @bstart: first block of a run of contiguous blocks
2540 * @blen: the length of the block run
2544 void gfs2_free_meta(struct gfs2_inode *ip, struct gfs2_rgrpd *rgd,
2545 u64 bstart, u32 blen)
2547 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
2549 __gfs2_free_blocks(ip, rgd, bstart, blen, 1);
2550 gfs2_statfs_change(sdp, 0, +blen, 0);
2551 gfs2_quota_change(ip, -(s64)blen, ip->i_inode.i_uid, ip->i_inode.i_gid);
2554 void gfs2_unlink_di(struct inode *inode)
2556 struct gfs2_inode *ip = GFS2_I(inode);
2557 struct gfs2_sbd *sdp = GFS2_SB(inode);
2558 struct gfs2_rgrpd *rgd;
2559 u64 blkno = ip->i_no_addr;
2561 rgd = gfs2_blk2rgrpd(sdp, blkno, true);
2564 rgrp_lock_local(rgd);
2565 rgblk_free(sdp, rgd, blkno, 1, GFS2_BLKST_UNLINKED);
2566 trace_gfs2_block_alloc(ip, rgd, blkno, 1, GFS2_BLKST_UNLINKED);
2567 gfs2_trans_add_meta(rgd->rd_gl, rgd->rd_bits[0].bi_bh);
2568 gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
2569 be32_add_cpu(&rgd->rd_rgl->rl_unlinked, 1);
2570 rgrp_unlock_local(rgd);
2573 void gfs2_free_di(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip)
2575 struct gfs2_sbd *sdp = rgd->rd_sbd;
2577 rgrp_lock_local(rgd);
2578 rgblk_free(sdp, rgd, ip->i_no_addr, 1, GFS2_BLKST_FREE);
2579 if (!rgd->rd_dinodes)
2580 gfs2_consist_rgrpd(rgd);
2584 gfs2_trans_add_meta(rgd->rd_gl, rgd->rd_bits[0].bi_bh);
2585 gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
2586 rgrp_unlock_local(rgd);
2587 be32_add_cpu(&rgd->rd_rgl->rl_unlinked, -1);
2589 gfs2_statfs_change(sdp, 0, +1, -1);
2590 trace_gfs2_block_alloc(ip, rgd, ip->i_no_addr, 1, GFS2_BLKST_FREE);
2591 gfs2_quota_change(ip, -1, ip->i_inode.i_uid, ip->i_inode.i_gid);
2592 gfs2_journal_wipe(ip, ip->i_no_addr, 1);
2596 * gfs2_check_blk_type - Check the type of a block
2597 * @sdp: The superblock
2598 * @no_addr: The block number to check
2599 * @type: The block type we are looking for
2601 * The inode glock of @no_addr must be held. The @type to check for is either
2602 * GFS2_BLKST_DINODE or GFS2_BLKST_UNLINKED; checking for type GFS2_BLKST_FREE
2603 * or GFS2_BLKST_USED would make no sense.
2605 * Returns: 0 if the block type matches the expected type
2606 * -ESTALE if it doesn't match
2607 * or -ve errno if something went wrong while checking
2610 int gfs2_check_blk_type(struct gfs2_sbd *sdp, u64 no_addr, unsigned int type)
2612 struct gfs2_rgrpd *rgd;
2613 struct gfs2_holder rgd_gh;
2614 struct gfs2_rbm rbm;
2615 int error = -EINVAL;
2617 rgd = gfs2_blk2rgrpd(sdp, no_addr, 1);
2621 error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_SHARED, 0, &rgd_gh);
2626 error = gfs2_rbm_from_block(&rbm, no_addr);
2627 if (!WARN_ON_ONCE(error)) {
2629 * No need to take the local resource group lock here; the
2630 * inode glock of @no_addr provides the necessary
2631 * synchronization in case the block is an inode. (In case
2632 * the block is not an inode, the block type will not match
2633 * the @type we are looking for.)
2635 if (gfs2_testbit(&rbm, false) != type)
2639 gfs2_glock_dq_uninit(&rgd_gh);
2646 * gfs2_rlist_add - add a RG to a list of RGs
2648 * @rlist: the list of resource groups
2651 * Figure out what RG a block belongs to and add that RG to the list
2653 * FIXME: Don't use NOFAIL
2657 void gfs2_rlist_add(struct gfs2_inode *ip, struct gfs2_rgrp_list *rlist,
2660 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
2661 struct gfs2_rgrpd *rgd;
2662 struct gfs2_rgrpd **tmp;
2663 unsigned int new_space;
2666 if (gfs2_assert_warn(sdp, !rlist->rl_ghs))
2670 * The resource group last accessed is kept in the last position.
2673 if (rlist->rl_rgrps) {
2674 rgd = rlist->rl_rgd[rlist->rl_rgrps - 1];
2675 if (rgrp_contains_block(rgd, block))
2677 rgd = gfs2_blk2rgrpd(sdp, block, 1);
2679 rgd = ip->i_res.rs_rgd;
2680 if (!rgd || !rgrp_contains_block(rgd, block))
2681 rgd = gfs2_blk2rgrpd(sdp, block, 1);
2685 fs_err(sdp, "rlist_add: no rgrp for block %llu\n",
2686 (unsigned long long)block);
2690 for (x = 0; x < rlist->rl_rgrps; x++) {
2691 if (rlist->rl_rgd[x] == rgd) {
2692 swap(rlist->rl_rgd[x],
2693 rlist->rl_rgd[rlist->rl_rgrps - 1]);
2698 if (rlist->rl_rgrps == rlist->rl_space) {
2699 new_space = rlist->rl_space + 10;
2701 tmp = kcalloc(new_space, sizeof(struct gfs2_rgrpd *),
2702 GFP_NOFS | __GFP_NOFAIL);
2704 if (rlist->rl_rgd) {
2705 memcpy(tmp, rlist->rl_rgd,
2706 rlist->rl_space * sizeof(struct gfs2_rgrpd *));
2707 kfree(rlist->rl_rgd);
2710 rlist->rl_space = new_space;
2711 rlist->rl_rgd = tmp;
2714 rlist->rl_rgd[rlist->rl_rgrps++] = rgd;
2718 * gfs2_rlist_alloc - all RGs have been added to the rlist, now allocate
2719 * and initialize an array of glock holders for them
2720 * @rlist: the list of resource groups
2722 * FIXME: Don't use NOFAIL
2726 void gfs2_rlist_alloc(struct gfs2_rgrp_list *rlist)
2730 rlist->rl_ghs = kmalloc_array(rlist->rl_rgrps,
2731 sizeof(struct gfs2_holder),
2732 GFP_NOFS | __GFP_NOFAIL);
2733 for (x = 0; x < rlist->rl_rgrps; x++)
2734 gfs2_holder_init(rlist->rl_rgd[x]->rd_gl, LM_ST_EXCLUSIVE,
2735 LM_FLAG_NODE_SCOPE, &rlist->rl_ghs[x]);
2739 * gfs2_rlist_free - free a resource group list
2740 * @rlist: the list of resource groups
2744 void gfs2_rlist_free(struct gfs2_rgrp_list *rlist)
2748 kfree(rlist->rl_rgd);
2750 if (rlist->rl_ghs) {
2751 for (x = 0; x < rlist->rl_rgrps; x++)
2752 gfs2_holder_uninit(&rlist->rl_ghs[x]);
2753 kfree(rlist->rl_ghs);
2754 rlist->rl_ghs = NULL;
2758 void rgrp_lock_local(struct gfs2_rgrpd *rgd)
2760 mutex_lock(&rgd->rd_mutex);
2763 void rgrp_unlock_local(struct gfs2_rgrpd *rgd)
2765 mutex_unlock(&rgd->rd_mutex);