1 /* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
6 * metadata alloc and free
7 * Inspired by ext3 block groups.
9 * Copyright (C) 2002, 2004 Oracle. All rights reserved.
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public
13 * License as published by the Free Software Foundation; either
14 * version 2 of the License, or (at your option) any later version.
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
21 * You should have received a copy of the GNU General Public
22 * License along with this program; if not, write to the
23 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
24 * Boston, MA 021110-1307, USA.
28 #include <linux/types.h>
29 #include <linux/slab.h>
30 #include <linux/highmem.h>
32 #include <cluster/masklog.h>
37 #include "blockcheck.h"
41 #include "localalloc.h"
46 #include "ocfs2_trace.h"
48 #include "buffer_head_io.h"
50 #define NOT_ALLOC_NEW_GROUP 0
51 #define ALLOC_NEW_GROUP 0x1
52 #define ALLOC_GROUPS_FROM_GLOBAL 0x2
54 #define OCFS2_MAX_TO_STEAL 1024
56 struct ocfs2_suballoc_result {
57 u64 sr_bg_blkno; /* The bg we allocated from. Set
58 to 0 when a block group is
60 u64 sr_bg_stable_blkno; /*
61 * Doesn't change, always
66 u64 sr_blkno; /* The first allocated block */
67 unsigned int sr_bit_offset; /* The bit in the bg */
68 unsigned int sr_bits; /* How many bits we claimed */
71 static u64 ocfs2_group_from_res(struct ocfs2_suballoc_result *res)
73 if (res->sr_blkno == 0)
77 return res->sr_bg_blkno;
79 return ocfs2_which_suballoc_group(res->sr_blkno, res->sr_bit_offset);
82 static inline void ocfs2_debug_bg(struct ocfs2_group_desc *bg);
83 static inline void ocfs2_debug_suballoc_inode(struct ocfs2_dinode *fe);
84 static inline u16 ocfs2_find_victim_chain(struct ocfs2_chain_list *cl);
85 static int ocfs2_block_group_fill(handle_t *handle,
86 struct inode *alloc_inode,
87 struct buffer_head *bg_bh,
89 unsigned int group_clusters,
91 struct ocfs2_chain_list *cl);
92 static int ocfs2_block_group_alloc(struct ocfs2_super *osb,
93 struct inode *alloc_inode,
94 struct buffer_head *bh,
96 u64 *last_alloc_group,
99 static int ocfs2_cluster_group_search(struct inode *inode,
100 struct buffer_head *group_bh,
101 u32 bits_wanted, u32 min_bits,
103 struct ocfs2_suballoc_result *res);
104 static int ocfs2_block_group_search(struct inode *inode,
105 struct buffer_head *group_bh,
106 u32 bits_wanted, u32 min_bits,
108 struct ocfs2_suballoc_result *res);
109 static int ocfs2_claim_suballoc_bits(struct ocfs2_alloc_context *ac,
113 struct ocfs2_suballoc_result *res);
114 static int ocfs2_test_bg_bit_allocatable(struct buffer_head *bg_bh,
116 static int ocfs2_relink_block_group(handle_t *handle,
117 struct inode *alloc_inode,
118 struct buffer_head *fe_bh,
119 struct buffer_head *bg_bh,
120 struct buffer_head *prev_bg_bh,
122 static inline int ocfs2_block_group_reasonably_empty(struct ocfs2_group_desc *bg,
124 static inline u32 ocfs2_desc_bitmap_to_cluster_off(struct inode *inode,
127 static inline void ocfs2_block_to_cluster_group(struct inode *inode,
131 static int ocfs2_reserve_clusters_with_limit(struct ocfs2_super *osb,
132 u32 bits_wanted, u64 max_block,
134 struct ocfs2_alloc_context **ac);
136 void ocfs2_free_ac_resource(struct ocfs2_alloc_context *ac)
138 struct inode *inode = ac->ac_inode;
141 if (ac->ac_which != OCFS2_AC_USE_LOCAL)
142 ocfs2_inode_unlock(inode, 1);
144 mutex_unlock(&inode->i_mutex);
152 if (ac->ac_find_loc_priv) {
153 kfree(ac->ac_find_loc_priv);
154 ac->ac_find_loc_priv = NULL;
158 void ocfs2_free_alloc_context(struct ocfs2_alloc_context *ac)
160 ocfs2_free_ac_resource(ac);
164 static u32 ocfs2_bits_per_group(struct ocfs2_chain_list *cl)
166 return (u32)le16_to_cpu(cl->cl_cpg) * (u32)le16_to_cpu(cl->cl_bpc);
169 #define do_error(fmt, ...) \
172 mlog(ML_ERROR, fmt "\n", ##__VA_ARGS__); \
174 return ocfs2_error(sb, fmt, ##__VA_ARGS__); \
177 static int ocfs2_validate_gd_self(struct super_block *sb,
178 struct buffer_head *bh,
181 struct ocfs2_group_desc *gd = (struct ocfs2_group_desc *)bh->b_data;
183 if (!OCFS2_IS_VALID_GROUP_DESC(gd)) {
184 do_error("Group descriptor #%llu has bad signature %.*s",
185 (unsigned long long)bh->b_blocknr, 7,
189 if (le64_to_cpu(gd->bg_blkno) != bh->b_blocknr) {
190 do_error("Group descriptor #%llu has an invalid bg_blkno "
192 (unsigned long long)bh->b_blocknr,
193 (unsigned long long)le64_to_cpu(gd->bg_blkno));
196 if (le32_to_cpu(gd->bg_generation) != OCFS2_SB(sb)->fs_generation) {
197 do_error("Group descriptor #%llu has an invalid "
198 "fs_generation of #%u",
199 (unsigned long long)bh->b_blocknr,
200 le32_to_cpu(gd->bg_generation));
203 if (le16_to_cpu(gd->bg_free_bits_count) > le16_to_cpu(gd->bg_bits)) {
204 do_error("Group descriptor #%llu has bit count %u but "
205 "claims that %u are free",
206 (unsigned long long)bh->b_blocknr,
207 le16_to_cpu(gd->bg_bits),
208 le16_to_cpu(gd->bg_free_bits_count));
211 if (le16_to_cpu(gd->bg_bits) > (8 * le16_to_cpu(gd->bg_size))) {
212 do_error("Group descriptor #%llu has bit count %u but "
213 "max bitmap bits of %u",
214 (unsigned long long)bh->b_blocknr,
215 le16_to_cpu(gd->bg_bits),
216 8 * le16_to_cpu(gd->bg_size));
222 static int ocfs2_validate_gd_parent(struct super_block *sb,
223 struct ocfs2_dinode *di,
224 struct buffer_head *bh,
227 unsigned int max_bits;
228 struct ocfs2_group_desc *gd = (struct ocfs2_group_desc *)bh->b_data;
230 if (di->i_blkno != gd->bg_parent_dinode) {
231 do_error("Group descriptor #%llu has bad parent "
232 "pointer (%llu, expected %llu)",
233 (unsigned long long)bh->b_blocknr,
234 (unsigned long long)le64_to_cpu(gd->bg_parent_dinode),
235 (unsigned long long)le64_to_cpu(di->i_blkno));
238 max_bits = le16_to_cpu(di->id2.i_chain.cl_cpg) * le16_to_cpu(di->id2.i_chain.cl_bpc);
239 if (le16_to_cpu(gd->bg_bits) > max_bits) {
240 do_error("Group descriptor #%llu has bit count of %u",
241 (unsigned long long)bh->b_blocknr,
242 le16_to_cpu(gd->bg_bits));
245 /* In resize, we may meet the case bg_chain == cl_next_free_rec. */
246 if ((le16_to_cpu(gd->bg_chain) >
247 le16_to_cpu(di->id2.i_chain.cl_next_free_rec)) ||
248 ((le16_to_cpu(gd->bg_chain) ==
249 le16_to_cpu(di->id2.i_chain.cl_next_free_rec)) && !resize)) {
250 do_error("Group descriptor #%llu has bad chain %u",
251 (unsigned long long)bh->b_blocknr,
252 le16_to_cpu(gd->bg_chain));
261 * This version only prints errors. It does not fail the filesystem, and
262 * exists only for resize.
264 int ocfs2_check_group_descriptor(struct super_block *sb,
265 struct ocfs2_dinode *di,
266 struct buffer_head *bh)
269 struct ocfs2_group_desc *gd = (struct ocfs2_group_desc *)bh->b_data;
271 BUG_ON(!buffer_uptodate(bh));
274 * If the ecc fails, we return the error but otherwise
275 * leave the filesystem running. We know any error is
276 * local to this block.
278 rc = ocfs2_validate_meta_ecc(sb, bh->b_data, &gd->bg_check);
281 "Checksum failed for group descriptor %llu\n",
282 (unsigned long long)bh->b_blocknr);
284 rc = ocfs2_validate_gd_self(sb, bh, 1);
286 rc = ocfs2_validate_gd_parent(sb, di, bh, 1);
291 static int ocfs2_validate_group_descriptor(struct super_block *sb,
292 struct buffer_head *bh)
295 struct ocfs2_group_desc *gd = (struct ocfs2_group_desc *)bh->b_data;
297 trace_ocfs2_validate_group_descriptor(
298 (unsigned long long)bh->b_blocknr);
300 BUG_ON(!buffer_uptodate(bh));
303 * If the ecc fails, we return the error but otherwise
304 * leave the filesystem running. We know any error is
305 * local to this block.
307 rc = ocfs2_validate_meta_ecc(sb, bh->b_data, &gd->bg_check);
312 * Errors after here are fatal.
315 return ocfs2_validate_gd_self(sb, bh, 0);
318 int ocfs2_read_group_descriptor(struct inode *inode, struct ocfs2_dinode *di,
319 u64 gd_blkno, struct buffer_head **bh)
322 struct buffer_head *tmp = *bh;
324 rc = ocfs2_read_block(INODE_CACHE(inode), gd_blkno, &tmp,
325 ocfs2_validate_group_descriptor);
329 rc = ocfs2_validate_gd_parent(inode->i_sb, di, tmp, 0);
335 /* If ocfs2_read_block() got us a new bh, pass it up. */
343 static void ocfs2_bg_discontig_add_extent(struct ocfs2_super *osb,
344 struct ocfs2_group_desc *bg,
345 struct ocfs2_chain_list *cl,
346 u64 p_blkno, unsigned int clusters)
348 struct ocfs2_extent_list *el = &bg->bg_list;
349 struct ocfs2_extent_rec *rec;
351 BUG_ON(!ocfs2_supports_discontig_bg(osb));
352 if (!el->l_next_free_rec)
353 el->l_count = cpu_to_le16(ocfs2_extent_recs_per_gd(osb->sb));
354 rec = &el->l_recs[le16_to_cpu(el->l_next_free_rec)];
355 rec->e_blkno = cpu_to_le64(p_blkno);
356 rec->e_cpos = cpu_to_le32(le16_to_cpu(bg->bg_bits) /
357 le16_to_cpu(cl->cl_bpc));
358 rec->e_leaf_clusters = cpu_to_le16(clusters);
359 le16_add_cpu(&bg->bg_bits, clusters * le16_to_cpu(cl->cl_bpc));
360 le16_add_cpu(&bg->bg_free_bits_count,
361 clusters * le16_to_cpu(cl->cl_bpc));
362 le16_add_cpu(&el->l_next_free_rec, 1);
365 static int ocfs2_block_group_fill(handle_t *handle,
366 struct inode *alloc_inode,
367 struct buffer_head *bg_bh,
369 unsigned int group_clusters,
371 struct ocfs2_chain_list *cl)
374 struct ocfs2_super *osb = OCFS2_SB(alloc_inode->i_sb);
375 struct ocfs2_group_desc *bg = (struct ocfs2_group_desc *) bg_bh->b_data;
376 struct super_block * sb = alloc_inode->i_sb;
378 if (((unsigned long long) bg_bh->b_blocknr) != group_blkno) {
379 status = ocfs2_error(alloc_inode->i_sb, "group block (%llu) != "
381 (unsigned long long)group_blkno,
382 (unsigned long long) bg_bh->b_blocknr);
386 status = ocfs2_journal_access_gd(handle,
387 INODE_CACHE(alloc_inode),
389 OCFS2_JOURNAL_ACCESS_CREATE);
395 memset(bg, 0, sb->s_blocksize);
396 strcpy(bg->bg_signature, OCFS2_GROUP_DESC_SIGNATURE);
397 bg->bg_generation = cpu_to_le32(OCFS2_SB(sb)->fs_generation);
398 bg->bg_size = cpu_to_le16(ocfs2_group_bitmap_size(sb, 1,
399 osb->s_feature_incompat));
400 bg->bg_chain = cpu_to_le16(my_chain);
401 bg->bg_next_group = cl->cl_recs[my_chain].c_blkno;
402 bg->bg_parent_dinode = cpu_to_le64(OCFS2_I(alloc_inode)->ip_blkno);
403 bg->bg_blkno = cpu_to_le64(group_blkno);
404 if (group_clusters == le16_to_cpu(cl->cl_cpg))
405 bg->bg_bits = cpu_to_le16(ocfs2_bits_per_group(cl));
407 ocfs2_bg_discontig_add_extent(osb, bg, cl, group_blkno,
410 /* set the 1st bit in the bitmap to account for the descriptor block */
411 ocfs2_set_bit(0, (unsigned long *)bg->bg_bitmap);
412 bg->bg_free_bits_count = cpu_to_le16(le16_to_cpu(bg->bg_bits) - 1);
414 ocfs2_journal_dirty(handle, bg_bh);
416 /* There is no need to zero out or otherwise initialize the
417 * other blocks in a group - All valid FS metadata in a block
418 * group stores the superblock fs_generation value at
419 * allocation time. */
427 static inline u16 ocfs2_find_smallest_chain(struct ocfs2_chain_list *cl)
432 while (curr < le16_to_cpu(cl->cl_count)) {
433 if (le32_to_cpu(cl->cl_recs[best].c_total) >
434 le32_to_cpu(cl->cl_recs[curr].c_total))
441 static struct buffer_head *
442 ocfs2_block_group_alloc_contig(struct ocfs2_super *osb, handle_t *handle,
443 struct inode *alloc_inode,
444 struct ocfs2_alloc_context *ac,
445 struct ocfs2_chain_list *cl)
448 u32 bit_off, num_bits;
450 struct buffer_head *bg_bh;
451 unsigned int alloc_rec = ocfs2_find_smallest_chain(cl);
453 status = ocfs2_claim_clusters(handle, ac,
454 le16_to_cpu(cl->cl_cpg), &bit_off,
457 if (status != -ENOSPC)
462 /* setup the group */
463 bg_blkno = ocfs2_clusters_to_blocks(osb->sb, bit_off);
464 trace_ocfs2_block_group_alloc_contig(
465 (unsigned long long)bg_blkno, alloc_rec);
467 bg_bh = sb_getblk(osb->sb, bg_blkno);
473 ocfs2_set_new_buffer_uptodate(INODE_CACHE(alloc_inode), bg_bh);
475 status = ocfs2_block_group_fill(handle, alloc_inode, bg_bh,
476 bg_blkno, num_bits, alloc_rec, cl);
483 return status ? ERR_PTR(status) : bg_bh;
486 static int ocfs2_block_group_claim_bits(struct ocfs2_super *osb,
488 struct ocfs2_alloc_context *ac,
489 unsigned int min_bits,
490 u32 *bit_off, u32 *num_bits)
495 status = ocfs2_claim_clusters(handle, ac, min_bits,
497 if (status != -ENOSPC)
506 static int ocfs2_block_group_grow_discontig(handle_t *handle,
507 struct inode *alloc_inode,
508 struct buffer_head *bg_bh,
509 struct ocfs2_alloc_context *ac,
510 struct ocfs2_chain_list *cl,
511 unsigned int min_bits)
514 struct ocfs2_super *osb = OCFS2_SB(alloc_inode->i_sb);
515 struct ocfs2_group_desc *bg =
516 (struct ocfs2_group_desc *)bg_bh->b_data;
517 unsigned int needed = le16_to_cpu(cl->cl_cpg) -
518 le16_to_cpu(bg->bg_bits) / le16_to_cpu(cl->cl_bpc);
519 u32 p_cpos, clusters;
521 struct ocfs2_extent_list *el = &bg->bg_list;
523 status = ocfs2_journal_access_gd(handle,
524 INODE_CACHE(alloc_inode),
526 OCFS2_JOURNAL_ACCESS_CREATE);
532 while ((needed > 0) && (le16_to_cpu(el->l_next_free_rec) <
533 le16_to_cpu(el->l_count))) {
534 if (min_bits > needed)
536 status = ocfs2_block_group_claim_bits(osb, handle, ac,
540 if (status != -ENOSPC)
544 p_blkno = ocfs2_clusters_to_blocks(osb->sb, p_cpos);
545 ocfs2_bg_discontig_add_extent(osb, bg, cl, p_blkno,
549 needed = le16_to_cpu(cl->cl_cpg) -
550 le16_to_cpu(bg->bg_bits) / le16_to_cpu(cl->cl_bpc);
555 * We have used up all the extent rec but can't fill up
556 * the cpg. So bail out.
562 ocfs2_journal_dirty(handle, bg_bh);
568 static void ocfs2_bg_alloc_cleanup(handle_t *handle,
569 struct ocfs2_alloc_context *cluster_ac,
570 struct inode *alloc_inode,
571 struct buffer_head *bg_bh)
574 struct ocfs2_group_desc *bg;
575 struct ocfs2_extent_list *el;
576 struct ocfs2_extent_rec *rec;
581 bg = (struct ocfs2_group_desc *)bg_bh->b_data;
583 for (i = 0; i < le16_to_cpu(el->l_next_free_rec); i++) {
584 rec = &el->l_recs[i];
585 ret = ocfs2_free_clusters(handle, cluster_ac->ac_inode,
587 le64_to_cpu(rec->e_blkno),
588 le16_to_cpu(rec->e_leaf_clusters));
591 /* Try all the clusters to free */
594 ocfs2_remove_from_cache(INODE_CACHE(alloc_inode), bg_bh);
598 static struct buffer_head *
599 ocfs2_block_group_alloc_discontig(handle_t *handle,
600 struct inode *alloc_inode,
601 struct ocfs2_alloc_context *ac,
602 struct ocfs2_chain_list *cl)
605 u32 bit_off, num_bits;
607 unsigned int min_bits = le16_to_cpu(cl->cl_cpg) >> 1;
608 struct buffer_head *bg_bh = NULL;
609 unsigned int alloc_rec = ocfs2_find_smallest_chain(cl);
610 struct ocfs2_super *osb = OCFS2_SB(alloc_inode->i_sb);
612 if (!ocfs2_supports_discontig_bg(osb)) {
617 status = ocfs2_extend_trans(handle,
618 ocfs2_calc_bg_discontig_credits(osb->sb));
625 * We're going to be grabbing from multiple cluster groups.
626 * We don't have enough credits to relink them all, and the
627 * cluster groups will be staying in cache for the duration of
630 ac->ac_disable_chain_relink = 1;
632 /* Claim the first region */
633 status = ocfs2_block_group_claim_bits(osb, handle, ac, min_bits,
634 &bit_off, &num_bits);
636 if (status != -ENOSPC)
642 /* setup the group */
643 bg_blkno = ocfs2_clusters_to_blocks(osb->sb, bit_off);
644 trace_ocfs2_block_group_alloc_discontig(
645 (unsigned long long)bg_blkno, alloc_rec);
647 bg_bh = sb_getblk(osb->sb, bg_blkno);
653 ocfs2_set_new_buffer_uptodate(INODE_CACHE(alloc_inode), bg_bh);
655 status = ocfs2_block_group_fill(handle, alloc_inode, bg_bh,
656 bg_blkno, num_bits, alloc_rec, cl);
662 status = ocfs2_block_group_grow_discontig(handle, alloc_inode,
663 bg_bh, ac, cl, min_bits);
669 ocfs2_bg_alloc_cleanup(handle, ac, alloc_inode, bg_bh);
670 return status ? ERR_PTR(status) : bg_bh;
674 * We expect the block group allocator to already be locked.
676 static int ocfs2_block_group_alloc(struct ocfs2_super *osb,
677 struct inode *alloc_inode,
678 struct buffer_head *bh,
680 u64 *last_alloc_group,
684 struct ocfs2_dinode *fe = (struct ocfs2_dinode *) bh->b_data;
685 struct ocfs2_chain_list *cl;
686 struct ocfs2_alloc_context *ac = NULL;
687 handle_t *handle = NULL;
689 struct buffer_head *bg_bh = NULL;
690 struct ocfs2_group_desc *bg;
692 BUG_ON(ocfs2_is_cluster_bitmap(alloc_inode));
694 cl = &fe->id2.i_chain;
695 status = ocfs2_reserve_clusters_with_limit(osb,
696 le16_to_cpu(cl->cl_cpg),
697 max_block, flags, &ac);
699 if (status != -ENOSPC)
704 credits = ocfs2_calc_group_alloc_credits(osb->sb,
705 le16_to_cpu(cl->cl_cpg));
706 handle = ocfs2_start_trans(osb, credits);
707 if (IS_ERR(handle)) {
708 status = PTR_ERR(handle);
714 if (last_alloc_group && *last_alloc_group != 0) {
715 trace_ocfs2_block_group_alloc(
716 (unsigned long long)*last_alloc_group);
717 ac->ac_last_group = *last_alloc_group;
720 bg_bh = ocfs2_block_group_alloc_contig(osb, handle, alloc_inode,
722 if (IS_ERR(bg_bh) && (PTR_ERR(bg_bh) == -ENOSPC))
723 bg_bh = ocfs2_block_group_alloc_discontig(handle,
727 status = PTR_ERR(bg_bh);
729 if (status != -ENOSPC)
733 bg = (struct ocfs2_group_desc *) bg_bh->b_data;
735 status = ocfs2_journal_access_di(handle, INODE_CACHE(alloc_inode),
736 bh, OCFS2_JOURNAL_ACCESS_WRITE);
742 alloc_rec = le16_to_cpu(bg->bg_chain);
743 le32_add_cpu(&cl->cl_recs[alloc_rec].c_free,
744 le16_to_cpu(bg->bg_free_bits_count));
745 le32_add_cpu(&cl->cl_recs[alloc_rec].c_total,
746 le16_to_cpu(bg->bg_bits));
747 cl->cl_recs[alloc_rec].c_blkno = bg->bg_blkno;
748 if (le16_to_cpu(cl->cl_next_free_rec) < le16_to_cpu(cl->cl_count))
749 le16_add_cpu(&cl->cl_next_free_rec, 1);
751 le32_add_cpu(&fe->id1.bitmap1.i_used, le16_to_cpu(bg->bg_bits) -
752 le16_to_cpu(bg->bg_free_bits_count));
753 le32_add_cpu(&fe->id1.bitmap1.i_total, le16_to_cpu(bg->bg_bits));
754 le32_add_cpu(&fe->i_clusters, le16_to_cpu(cl->cl_cpg));
756 ocfs2_journal_dirty(handle, bh);
758 spin_lock(&OCFS2_I(alloc_inode)->ip_lock);
759 OCFS2_I(alloc_inode)->ip_clusters = le32_to_cpu(fe->i_clusters);
760 fe->i_size = cpu_to_le64(ocfs2_clusters_to_bytes(alloc_inode->i_sb,
761 le32_to_cpu(fe->i_clusters)));
762 spin_unlock(&OCFS2_I(alloc_inode)->ip_lock);
763 i_size_write(alloc_inode, le64_to_cpu(fe->i_size));
764 alloc_inode->i_blocks = ocfs2_inode_sector_count(alloc_inode);
765 ocfs2_update_inode_fsync_trans(handle, alloc_inode, 0);
769 /* save the new last alloc group so that the caller can cache it. */
770 if (last_alloc_group)
771 *last_alloc_group = ac->ac_last_group;
775 ocfs2_commit_trans(osb, handle);
778 ocfs2_free_alloc_context(ac);
787 static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
788 struct ocfs2_alloc_context *ac,
791 u64 *last_alloc_group,
795 u32 bits_wanted = ac->ac_bits_wanted;
796 struct inode *alloc_inode;
797 struct buffer_head *bh = NULL;
798 struct ocfs2_dinode *fe;
801 alloc_inode = ocfs2_get_system_file_inode(osb, type, slot);
807 mutex_lock(&alloc_inode->i_mutex);
809 status = ocfs2_inode_lock(alloc_inode, &bh, 1);
811 mutex_unlock(&alloc_inode->i_mutex);
818 ac->ac_inode = alloc_inode;
819 ac->ac_alloc_slot = slot;
821 fe = (struct ocfs2_dinode *) bh->b_data;
823 /* The bh was validated by the inode read inside
824 * ocfs2_inode_lock(). Any corruption is a code bug. */
825 BUG_ON(!OCFS2_IS_VALID_DINODE(fe));
827 if (!(fe->i_flags & cpu_to_le32(OCFS2_CHAIN_FL))) {
828 status = ocfs2_error(alloc_inode->i_sb, "Invalid chain allocator %llu",
829 (unsigned long long)le64_to_cpu(fe->i_blkno));
833 free_bits = le32_to_cpu(fe->id1.bitmap1.i_total) -
834 le32_to_cpu(fe->id1.bitmap1.i_used);
836 if (bits_wanted > free_bits) {
837 /* cluster bitmap never grows */
838 if (ocfs2_is_cluster_bitmap(alloc_inode)) {
839 trace_ocfs2_reserve_suballoc_bits_nospc(bits_wanted,
845 if (!(flags & ALLOC_NEW_GROUP)) {
846 trace_ocfs2_reserve_suballoc_bits_no_new_group(
847 slot, bits_wanted, free_bits);
852 status = ocfs2_block_group_alloc(osb, alloc_inode, bh,
854 last_alloc_group, flags);
856 if (status != -ENOSPC)
860 atomic_inc(&osb->alloc_stats.bg_extends);
862 /* You should never ask for this much metadata */
864 (le32_to_cpu(fe->id1.bitmap1.i_total)
865 - le32_to_cpu(fe->id1.bitmap1.i_used)));
878 static void ocfs2_init_inode_steal_slot(struct ocfs2_super *osb)
880 spin_lock(&osb->osb_lock);
881 osb->s_inode_steal_slot = OCFS2_INVALID_SLOT;
882 spin_unlock(&osb->osb_lock);
883 atomic_set(&osb->s_num_inodes_stolen, 0);
886 static void ocfs2_init_meta_steal_slot(struct ocfs2_super *osb)
888 spin_lock(&osb->osb_lock);
889 osb->s_meta_steal_slot = OCFS2_INVALID_SLOT;
890 spin_unlock(&osb->osb_lock);
891 atomic_set(&osb->s_num_meta_stolen, 0);
894 void ocfs2_init_steal_slots(struct ocfs2_super *osb)
896 ocfs2_init_inode_steal_slot(osb);
897 ocfs2_init_meta_steal_slot(osb);
900 static void __ocfs2_set_steal_slot(struct ocfs2_super *osb, int slot, int type)
902 spin_lock(&osb->osb_lock);
903 if (type == INODE_ALLOC_SYSTEM_INODE)
904 osb->s_inode_steal_slot = slot;
905 else if (type == EXTENT_ALLOC_SYSTEM_INODE)
906 osb->s_meta_steal_slot = slot;
907 spin_unlock(&osb->osb_lock);
910 static int __ocfs2_get_steal_slot(struct ocfs2_super *osb, int type)
912 int slot = OCFS2_INVALID_SLOT;
914 spin_lock(&osb->osb_lock);
915 if (type == INODE_ALLOC_SYSTEM_INODE)
916 slot = osb->s_inode_steal_slot;
917 else if (type == EXTENT_ALLOC_SYSTEM_INODE)
918 slot = osb->s_meta_steal_slot;
919 spin_unlock(&osb->osb_lock);
924 static int ocfs2_get_inode_steal_slot(struct ocfs2_super *osb)
926 return __ocfs2_get_steal_slot(osb, INODE_ALLOC_SYSTEM_INODE);
929 static int ocfs2_get_meta_steal_slot(struct ocfs2_super *osb)
931 return __ocfs2_get_steal_slot(osb, EXTENT_ALLOC_SYSTEM_INODE);
934 static int ocfs2_steal_resource(struct ocfs2_super *osb,
935 struct ocfs2_alloc_context *ac,
938 int i, status = -ENOSPC;
939 int slot = __ocfs2_get_steal_slot(osb, type);
941 /* Start to steal resource from the first slot after ours. */
942 if (slot == OCFS2_INVALID_SLOT)
943 slot = osb->slot_num + 1;
945 for (i = 0; i < osb->max_slots; i++, slot++) {
946 if (slot == osb->max_slots)
949 if (slot == osb->slot_num)
952 status = ocfs2_reserve_suballoc_bits(osb, ac,
955 NOT_ALLOC_NEW_GROUP);
957 __ocfs2_set_steal_slot(osb, slot, type);
961 ocfs2_free_ac_resource(ac);
967 static int ocfs2_steal_inode(struct ocfs2_super *osb,
968 struct ocfs2_alloc_context *ac)
970 return ocfs2_steal_resource(osb, ac, INODE_ALLOC_SYSTEM_INODE);
973 static int ocfs2_steal_meta(struct ocfs2_super *osb,
974 struct ocfs2_alloc_context *ac)
976 return ocfs2_steal_resource(osb, ac, EXTENT_ALLOC_SYSTEM_INODE);
979 int ocfs2_reserve_new_metadata_blocks(struct ocfs2_super *osb,
981 struct ocfs2_alloc_context **ac)
984 int slot = ocfs2_get_meta_steal_slot(osb);
986 *ac = kzalloc(sizeof(struct ocfs2_alloc_context), GFP_KERNEL);
993 (*ac)->ac_bits_wanted = blocks;
994 (*ac)->ac_which = OCFS2_AC_USE_META;
995 (*ac)->ac_group_search = ocfs2_block_group_search;
997 if (slot != OCFS2_INVALID_SLOT &&
998 atomic_read(&osb->s_num_meta_stolen) < OCFS2_MAX_TO_STEAL)
1001 atomic_set(&osb->s_num_meta_stolen, 0);
1002 status = ocfs2_reserve_suballoc_bits(osb, (*ac),
1003 EXTENT_ALLOC_SYSTEM_INODE,
1004 (u32)osb->slot_num, NULL,
1005 ALLOC_GROUPS_FROM_GLOBAL|ALLOC_NEW_GROUP);
1010 if (slot != OCFS2_INVALID_SLOT)
1011 ocfs2_init_meta_steal_slot(osb);
1013 } else if (status < 0 && status != -ENOSPC) {
1018 ocfs2_free_ac_resource(*ac);
1021 status = ocfs2_steal_meta(osb, *ac);
1022 atomic_inc(&osb->s_num_meta_stolen);
1024 if (status != -ENOSPC)
1031 if ((status < 0) && *ac) {
1032 ocfs2_free_alloc_context(*ac);
1041 int ocfs2_reserve_new_metadata(struct ocfs2_super *osb,
1042 struct ocfs2_extent_list *root_el,
1043 struct ocfs2_alloc_context **ac)
1045 return ocfs2_reserve_new_metadata_blocks(osb,
1046 ocfs2_extend_meta_needed(root_el),
1050 int ocfs2_reserve_new_inode(struct ocfs2_super *osb,
1051 struct ocfs2_alloc_context **ac)
1054 int slot = ocfs2_get_inode_steal_slot(osb);
1057 *ac = kzalloc(sizeof(struct ocfs2_alloc_context), GFP_KERNEL);
1064 (*ac)->ac_bits_wanted = 1;
1065 (*ac)->ac_which = OCFS2_AC_USE_INODE;
1067 (*ac)->ac_group_search = ocfs2_block_group_search;
1070 * stat(2) can't handle i_ino > 32bits, so we tell the
1071 * lower levels not to allocate us a block group past that
1072 * limit. The 'inode64' mount option avoids this behavior.
1074 if (!(osb->s_mount_opt & OCFS2_MOUNT_INODE64))
1075 (*ac)->ac_max_block = (u32)~0U;
1078 * slot is set when we successfully steal inode from other nodes.
1079 * It is reset in 3 places:
1080 * 1. when we flush the truncate log
1081 * 2. when we complete local alloc recovery.
1082 * 3. when we successfully allocate from our own slot.
1083 * After it is set, we will go on stealing inodes until we find the
1084 * need to check our slots to see whether there is some space for us.
1086 if (slot != OCFS2_INVALID_SLOT &&
1087 atomic_read(&osb->s_num_inodes_stolen) < OCFS2_MAX_TO_STEAL)
1090 atomic_set(&osb->s_num_inodes_stolen, 0);
1091 alloc_group = osb->osb_inode_alloc_group;
1092 status = ocfs2_reserve_suballoc_bits(osb, *ac,
1093 INODE_ALLOC_SYSTEM_INODE,
1097 ALLOC_GROUPS_FROM_GLOBAL);
1101 spin_lock(&osb->osb_lock);
1102 osb->osb_inode_alloc_group = alloc_group;
1103 spin_unlock(&osb->osb_lock);
1104 trace_ocfs2_reserve_new_inode_new_group(
1105 (unsigned long long)alloc_group);
1108 * Some inodes must be freed by us, so try to allocate
1109 * from our own next time.
1111 if (slot != OCFS2_INVALID_SLOT)
1112 ocfs2_init_inode_steal_slot(osb);
1114 } else if (status < 0 && status != -ENOSPC) {
1119 ocfs2_free_ac_resource(*ac);
1122 status = ocfs2_steal_inode(osb, *ac);
1123 atomic_inc(&osb->s_num_inodes_stolen);
1125 if (status != -ENOSPC)
1132 if ((status < 0) && *ac) {
1133 ocfs2_free_alloc_context(*ac);
1142 /* local alloc code has to do the same thing, so rather than do this
1144 int ocfs2_reserve_cluster_bitmap_bits(struct ocfs2_super *osb,
1145 struct ocfs2_alloc_context *ac)
1149 ac->ac_which = OCFS2_AC_USE_MAIN;
1150 ac->ac_group_search = ocfs2_cluster_group_search;
1152 status = ocfs2_reserve_suballoc_bits(osb, ac,
1153 GLOBAL_BITMAP_SYSTEM_INODE,
1154 OCFS2_INVALID_SLOT, NULL,
1156 if (status < 0 && status != -ENOSPC) {
1165 /* Callers don't need to care which bitmap (local alloc or main) to
1166 * use so we figure it out for them, but unfortunately this clutters
1168 static int ocfs2_reserve_clusters_with_limit(struct ocfs2_super *osb,
1169 u32 bits_wanted, u64 max_block,
1171 struct ocfs2_alloc_context **ac)
1175 *ac = kzalloc(sizeof(struct ocfs2_alloc_context), GFP_KERNEL);
1182 (*ac)->ac_bits_wanted = bits_wanted;
1183 (*ac)->ac_max_block = max_block;
1186 if (!(flags & ALLOC_GROUPS_FROM_GLOBAL) &&
1187 ocfs2_alloc_should_use_local(osb, bits_wanted)) {
1188 status = ocfs2_reserve_local_alloc_bits(osb,
1191 if ((status < 0) && (status != -ENOSPC)) {
1197 if (status == -ENOSPC) {
1198 status = ocfs2_reserve_cluster_bitmap_bits(osb, *ac);
1200 if (status != -ENOSPC)
1208 if ((status < 0) && *ac) {
1209 ocfs2_free_alloc_context(*ac);
1218 int ocfs2_reserve_clusters(struct ocfs2_super *osb,
1220 struct ocfs2_alloc_context **ac)
1222 return ocfs2_reserve_clusters_with_limit(osb, bits_wanted, 0,
1223 ALLOC_NEW_GROUP, ac);
1227 * More or less lifted from ext3. I'll leave their description below:
1229 * "For ext3 allocations, we must not reuse any blocks which are
1230 * allocated in the bitmap buffer's "last committed data" copy. This
1231 * prevents deletes from freeing up the page for reuse until we have
1232 * committed the delete transaction.
1234 * If we didn't do this, then deleting something and reallocating it as
1235 * data would allow the old block to be overwritten before the
1236 * transaction committed (because we force data to disk before commit).
1237 * This would lead to corruption if we crashed between overwriting the
1238 * data and committing the delete.
1240 * @@@ We may want to make this allocation behaviour conditional on
1241 * data-writes at some point, and disable it for metadata allocations or
1242 * sync-data inodes."
1244 * Note: OCFS2 already does this differently for metadata vs data
1245 * allocations, as those bitmaps are separate and undo access is never
1246 * called on a metadata group descriptor.
1248 static int ocfs2_test_bg_bit_allocatable(struct buffer_head *bg_bh,
1251 struct ocfs2_group_desc *bg = (struct ocfs2_group_desc *) bg_bh->b_data;
1254 if (ocfs2_test_bit(nr, (unsigned long *)bg->bg_bitmap))
1257 if (!buffer_jbd(bg_bh))
1260 jbd_lock_bh_state(bg_bh);
1261 bg = (struct ocfs2_group_desc *) bh2jh(bg_bh)->b_committed_data;
1263 ret = !ocfs2_test_bit(nr, (unsigned long *)bg->bg_bitmap);
1266 jbd_unlock_bh_state(bg_bh);
1271 static int ocfs2_block_group_find_clear_bits(struct ocfs2_super *osb,
1272 struct buffer_head *bg_bh,
1273 unsigned int bits_wanted,
1274 unsigned int total_bits,
1275 struct ocfs2_suballoc_result *res)
1278 u16 best_offset, best_size;
1279 int offset, start, found, status = 0;
1280 struct ocfs2_group_desc *bg = (struct ocfs2_group_desc *) bg_bh->b_data;
1282 /* Callers got this descriptor from
1283 * ocfs2_read_group_descriptor(). Any corruption is a code bug. */
1284 BUG_ON(!OCFS2_IS_VALID_GROUP_DESC(bg));
1286 found = start = best_offset = best_size = 0;
1287 bitmap = bg->bg_bitmap;
1289 while((offset = ocfs2_find_next_zero_bit(bitmap, total_bits, start)) != -1) {
1290 if (offset == total_bits)
1293 if (!ocfs2_test_bg_bit_allocatable(bg_bh, offset)) {
1294 /* We found a zero, but we can't use it as it
1295 * hasn't been put to disk yet! */
1298 } else if (offset == start) {
1299 /* we found a zero */
1301 /* move start to the next bit to test */
1304 /* got a zero after some ones */
1308 if (found > best_size) {
1310 best_offset = start - found;
1312 /* we got everything we needed */
1313 if (found == bits_wanted) {
1314 /* mlog(0, "Found it all!\n"); */
1320 res->sr_bit_offset = best_offset;
1321 res->sr_bits = best_size;
1324 /* No error log here -- see the comment above
1325 * ocfs2_test_bg_bit_allocatable */
1331 int ocfs2_block_group_set_bits(handle_t *handle,
1332 struct inode *alloc_inode,
1333 struct ocfs2_group_desc *bg,
1334 struct buffer_head *group_bh,
1335 unsigned int bit_off,
1336 unsigned int num_bits)
1339 void *bitmap = bg->bg_bitmap;
1340 int journal_type = OCFS2_JOURNAL_ACCESS_WRITE;
1342 /* All callers get the descriptor via
1343 * ocfs2_read_group_descriptor(). Any corruption is a code bug. */
1344 BUG_ON(!OCFS2_IS_VALID_GROUP_DESC(bg));
1345 BUG_ON(le16_to_cpu(bg->bg_free_bits_count) < num_bits);
1347 trace_ocfs2_block_group_set_bits(bit_off, num_bits);
1349 if (ocfs2_is_cluster_bitmap(alloc_inode))
1350 journal_type = OCFS2_JOURNAL_ACCESS_UNDO;
1352 status = ocfs2_journal_access_gd(handle,
1353 INODE_CACHE(alloc_inode),
1361 le16_add_cpu(&bg->bg_free_bits_count, -num_bits);
1362 if (le16_to_cpu(bg->bg_free_bits_count) > le16_to_cpu(bg->bg_bits)) {
1363 return ocfs2_error(alloc_inode->i_sb, "Group descriptor # %llu has bit"
1364 " count %u but claims %u are freed. num_bits %d",
1365 (unsigned long long)le64_to_cpu(bg->bg_blkno),
1366 le16_to_cpu(bg->bg_bits),
1367 le16_to_cpu(bg->bg_free_bits_count), num_bits);
1370 ocfs2_set_bit(bit_off++, bitmap);
1372 ocfs2_journal_dirty(handle, group_bh);
1378 /* find the one with the most empty bits */
1379 static inline u16 ocfs2_find_victim_chain(struct ocfs2_chain_list *cl)
1383 BUG_ON(!cl->cl_next_free_rec);
1386 while (curr < le16_to_cpu(cl->cl_next_free_rec)) {
1387 if (le32_to_cpu(cl->cl_recs[curr].c_free) >
1388 le32_to_cpu(cl->cl_recs[best].c_free))
1393 BUG_ON(best >= le16_to_cpu(cl->cl_next_free_rec));
1397 static int ocfs2_relink_block_group(handle_t *handle,
1398 struct inode *alloc_inode,
1399 struct buffer_head *fe_bh,
1400 struct buffer_head *bg_bh,
1401 struct buffer_head *prev_bg_bh,
1405 /* there is a really tiny chance the journal calls could fail,
1406 * but we wouldn't want inconsistent blocks in *any* case. */
1407 u64 bg_ptr, prev_bg_ptr;
1408 struct ocfs2_dinode *fe = (struct ocfs2_dinode *) fe_bh->b_data;
1409 struct ocfs2_group_desc *bg = (struct ocfs2_group_desc *) bg_bh->b_data;
1410 struct ocfs2_group_desc *prev_bg = (struct ocfs2_group_desc *) prev_bg_bh->b_data;
1412 /* The caller got these descriptors from
1413 * ocfs2_read_group_descriptor(). Any corruption is a code bug. */
1414 BUG_ON(!OCFS2_IS_VALID_GROUP_DESC(bg));
1415 BUG_ON(!OCFS2_IS_VALID_GROUP_DESC(prev_bg));
1417 trace_ocfs2_relink_block_group(
1418 (unsigned long long)le64_to_cpu(fe->i_blkno), chain,
1419 (unsigned long long)le64_to_cpu(bg->bg_blkno),
1420 (unsigned long long)le64_to_cpu(prev_bg->bg_blkno));
1422 bg_ptr = le64_to_cpu(bg->bg_next_group);
1423 prev_bg_ptr = le64_to_cpu(prev_bg->bg_next_group);
1425 status = ocfs2_journal_access_gd(handle, INODE_CACHE(alloc_inode),
1427 OCFS2_JOURNAL_ACCESS_WRITE);
1431 prev_bg->bg_next_group = bg->bg_next_group;
1432 ocfs2_journal_dirty(handle, prev_bg_bh);
1434 status = ocfs2_journal_access_gd(handle, INODE_CACHE(alloc_inode),
1435 bg_bh, OCFS2_JOURNAL_ACCESS_WRITE);
1437 goto out_rollback_prev_bg;
1439 bg->bg_next_group = fe->id2.i_chain.cl_recs[chain].c_blkno;
1440 ocfs2_journal_dirty(handle, bg_bh);
1442 status = ocfs2_journal_access_di(handle, INODE_CACHE(alloc_inode),
1443 fe_bh, OCFS2_JOURNAL_ACCESS_WRITE);
1445 goto out_rollback_bg;
1447 fe->id2.i_chain.cl_recs[chain].c_blkno = bg->bg_blkno;
1448 ocfs2_journal_dirty(handle, fe_bh);
1456 bg->bg_next_group = cpu_to_le64(bg_ptr);
1457 out_rollback_prev_bg:
1458 prev_bg->bg_next_group = cpu_to_le64(prev_bg_ptr);
1462 static inline int ocfs2_block_group_reasonably_empty(struct ocfs2_group_desc *bg,
1465 return le16_to_cpu(bg->bg_free_bits_count) > wanted;
1468 /* return 0 on success, -ENOSPC to keep searching and any other < 0
1469 * value on error. */
1470 static int ocfs2_cluster_group_search(struct inode *inode,
1471 struct buffer_head *group_bh,
1472 u32 bits_wanted, u32 min_bits,
1474 struct ocfs2_suballoc_result *res)
1476 int search = -ENOSPC;
1479 struct ocfs2_group_desc *gd = (struct ocfs2_group_desc *) group_bh->b_data;
1480 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1481 unsigned int max_bits, gd_cluster_off;
1483 BUG_ON(!ocfs2_is_cluster_bitmap(inode));
1485 if (gd->bg_free_bits_count) {
1486 max_bits = le16_to_cpu(gd->bg_bits);
1488 /* Tail groups in cluster bitmaps which aren't cpg
1489 * aligned are prone to partial extension by a failed
1490 * fs resize. If the file system resize never got to
1491 * update the dinode cluster count, then we don't want
1492 * to trust any clusters past it, regardless of what
1493 * the group descriptor says. */
1494 gd_cluster_off = ocfs2_blocks_to_clusters(inode->i_sb,
1495 le64_to_cpu(gd->bg_blkno));
1496 if ((gd_cluster_off + max_bits) >
1497 OCFS2_I(inode)->ip_clusters) {
1498 max_bits = OCFS2_I(inode)->ip_clusters - gd_cluster_off;
1499 trace_ocfs2_cluster_group_search_wrong_max_bits(
1500 (unsigned long long)le64_to_cpu(gd->bg_blkno),
1501 le16_to_cpu(gd->bg_bits),
1502 OCFS2_I(inode)->ip_clusters, max_bits);
1505 ret = ocfs2_block_group_find_clear_bits(OCFS2_SB(inode->i_sb),
1506 group_bh, bits_wanted,
1512 blkoff = ocfs2_clusters_to_blocks(inode->i_sb,
1514 res->sr_bit_offset +
1516 trace_ocfs2_cluster_group_search_max_block(
1517 (unsigned long long)blkoff,
1518 (unsigned long long)max_block);
1519 if (blkoff > max_block)
1523 /* ocfs2_block_group_find_clear_bits() might
1524 * return success, but we still want to return
1525 * -ENOSPC unless it found the minimum number
1527 if (min_bits <= res->sr_bits)
1528 search = 0; /* success */
1529 else if (res->sr_bits) {
1531 * Don't show bits which we'll be returning
1532 * for allocation to the local alloc bitmap.
1534 ocfs2_local_alloc_seen_free_bits(osb, res->sr_bits);
1541 static int ocfs2_block_group_search(struct inode *inode,
1542 struct buffer_head *group_bh,
1543 u32 bits_wanted, u32 min_bits,
1545 struct ocfs2_suballoc_result *res)
1549 struct ocfs2_group_desc *bg = (struct ocfs2_group_desc *) group_bh->b_data;
1551 BUG_ON(min_bits != 1);
1552 BUG_ON(ocfs2_is_cluster_bitmap(inode));
1554 if (bg->bg_free_bits_count) {
1555 ret = ocfs2_block_group_find_clear_bits(OCFS2_SB(inode->i_sb),
1556 group_bh, bits_wanted,
1557 le16_to_cpu(bg->bg_bits),
1559 if (!ret && max_block) {
1560 blkoff = le64_to_cpu(bg->bg_blkno) +
1561 res->sr_bit_offset + res->sr_bits;
1562 trace_ocfs2_block_group_search_max_block(
1563 (unsigned long long)blkoff,
1564 (unsigned long long)max_block);
1565 if (blkoff > max_block)
1573 int ocfs2_alloc_dinode_update_counts(struct inode *inode,
1575 struct buffer_head *di_bh,
1581 struct ocfs2_dinode *di = (struct ocfs2_dinode *) di_bh->b_data;
1582 struct ocfs2_chain_list *cl = (struct ocfs2_chain_list *) &di->id2.i_chain;
1584 ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
1585 OCFS2_JOURNAL_ACCESS_WRITE);
1591 tmp_used = le32_to_cpu(di->id1.bitmap1.i_used);
1592 di->id1.bitmap1.i_used = cpu_to_le32(num_bits + tmp_used);
1593 le32_add_cpu(&cl->cl_recs[chain].c_free, -num_bits);
1594 ocfs2_journal_dirty(handle, di_bh);
1600 void ocfs2_rollback_alloc_dinode_counts(struct inode *inode,
1601 struct buffer_head *di_bh,
1606 struct ocfs2_dinode *di = (struct ocfs2_dinode *) di_bh->b_data;
1607 struct ocfs2_chain_list *cl;
1609 cl = (struct ocfs2_chain_list *)&di->id2.i_chain;
1610 tmp_used = le32_to_cpu(di->id1.bitmap1.i_used);
1611 di->id1.bitmap1.i_used = cpu_to_le32(tmp_used - num_bits);
1612 le32_add_cpu(&cl->cl_recs[chain].c_free, num_bits);
1615 static int ocfs2_bg_discontig_fix_by_rec(struct ocfs2_suballoc_result *res,
1616 struct ocfs2_extent_rec *rec,
1617 struct ocfs2_chain_list *cl)
1619 unsigned int bpc = le16_to_cpu(cl->cl_bpc);
1620 unsigned int bitoff = le32_to_cpu(rec->e_cpos) * bpc;
1621 unsigned int bitcount = le16_to_cpu(rec->e_leaf_clusters) * bpc;
1623 if (res->sr_bit_offset < bitoff)
1625 if (res->sr_bit_offset >= (bitoff + bitcount))
1627 res->sr_blkno = le64_to_cpu(rec->e_blkno) +
1628 (res->sr_bit_offset - bitoff);
1629 if ((res->sr_bit_offset + res->sr_bits) > (bitoff + bitcount))
1630 res->sr_bits = (bitoff + bitcount) - res->sr_bit_offset;
1634 static void ocfs2_bg_discontig_fix_result(struct ocfs2_alloc_context *ac,
1635 struct ocfs2_group_desc *bg,
1636 struct ocfs2_suballoc_result *res)
1639 u64 bg_blkno = res->sr_bg_blkno; /* Save off */
1640 struct ocfs2_extent_rec *rec;
1641 struct ocfs2_dinode *di = (struct ocfs2_dinode *)ac->ac_bh->b_data;
1642 struct ocfs2_chain_list *cl = &di->id2.i_chain;
1644 if (ocfs2_is_cluster_bitmap(ac->ac_inode)) {
1649 res->sr_blkno = res->sr_bg_blkno + res->sr_bit_offset;
1650 res->sr_bg_blkno = 0; /* Clear it for contig block groups */
1651 if (!ocfs2_supports_discontig_bg(OCFS2_SB(ac->ac_inode->i_sb)) ||
1652 !bg->bg_list.l_next_free_rec)
1655 for (i = 0; i < le16_to_cpu(bg->bg_list.l_next_free_rec); i++) {
1656 rec = &bg->bg_list.l_recs[i];
1657 if (ocfs2_bg_discontig_fix_by_rec(res, rec, cl)) {
1658 res->sr_bg_blkno = bg_blkno; /* Restore */
1664 static int ocfs2_search_one_group(struct ocfs2_alloc_context *ac,
1668 struct ocfs2_suballoc_result *res,
1672 struct buffer_head *group_bh = NULL;
1673 struct ocfs2_group_desc *gd;
1674 struct ocfs2_dinode *di = (struct ocfs2_dinode *)ac->ac_bh->b_data;
1675 struct inode *alloc_inode = ac->ac_inode;
1677 ret = ocfs2_read_group_descriptor(alloc_inode, di,
1678 res->sr_bg_blkno, &group_bh);
1684 gd = (struct ocfs2_group_desc *) group_bh->b_data;
1685 ret = ac->ac_group_search(alloc_inode, group_bh, bits_wanted, min_bits,
1686 ac->ac_max_block, res);
1694 ocfs2_bg_discontig_fix_result(ac, gd, res);
1697 * sr_bg_blkno might have been changed by
1698 * ocfs2_bg_discontig_fix_result
1700 res->sr_bg_stable_blkno = group_bh->b_blocknr;
1702 if (ac->ac_find_loc_only)
1705 ret = ocfs2_alloc_dinode_update_counts(alloc_inode, handle, ac->ac_bh,
1707 le16_to_cpu(gd->bg_chain));
1713 ret = ocfs2_block_group_set_bits(handle, alloc_inode, gd, group_bh,
1714 res->sr_bit_offset, res->sr_bits);
1716 ocfs2_rollback_alloc_dinode_counts(alloc_inode, ac->ac_bh,
1718 le16_to_cpu(gd->bg_chain));
1723 *bits_left = le16_to_cpu(gd->bg_free_bits_count);
1731 static int ocfs2_search_chain(struct ocfs2_alloc_context *ac,
1735 struct ocfs2_suballoc_result *res,
1741 struct inode *alloc_inode = ac->ac_inode;
1742 struct buffer_head *group_bh = NULL;
1743 struct buffer_head *prev_group_bh = NULL;
1744 struct ocfs2_dinode *fe = (struct ocfs2_dinode *) ac->ac_bh->b_data;
1745 struct ocfs2_chain_list *cl = (struct ocfs2_chain_list *) &fe->id2.i_chain;
1746 struct ocfs2_group_desc *bg;
1748 chain = ac->ac_chain;
1749 trace_ocfs2_search_chain_begin(
1750 (unsigned long long)OCFS2_I(alloc_inode)->ip_blkno,
1751 bits_wanted, chain);
1753 status = ocfs2_read_group_descriptor(alloc_inode, fe,
1754 le64_to_cpu(cl->cl_recs[chain].c_blkno),
1760 bg = (struct ocfs2_group_desc *) group_bh->b_data;
1763 /* for now, the chain search is a bit simplistic. We just use
1764 * the 1st group with any empty bits. */
1765 while ((status = ac->ac_group_search(alloc_inode, group_bh,
1766 bits_wanted, min_bits,
1769 if (!bg->bg_next_group)
1772 brelse(prev_group_bh);
1773 prev_group_bh = NULL;
1775 next_group = le64_to_cpu(bg->bg_next_group);
1776 prev_group_bh = group_bh;
1778 status = ocfs2_read_group_descriptor(alloc_inode, fe,
1779 next_group, &group_bh);
1784 bg = (struct ocfs2_group_desc *) group_bh->b_data;
1787 if (status != -ENOSPC)
1792 trace_ocfs2_search_chain_succ(
1793 (unsigned long long)le64_to_cpu(bg->bg_blkno), res->sr_bits);
1795 res->sr_bg_blkno = le64_to_cpu(bg->bg_blkno);
1797 BUG_ON(res->sr_bits == 0);
1799 ocfs2_bg_discontig_fix_result(ac, bg, res);
1802 * sr_bg_blkno might have been changed by
1803 * ocfs2_bg_discontig_fix_result
1805 res->sr_bg_stable_blkno = group_bh->b_blocknr;
1808 * Keep track of previous block descriptor read. When
1809 * we find a target, if we have read more than X
1810 * number of descriptors, and the target is reasonably
1811 * empty, relink him to top of his chain.
1813 * We've read 0 extra blocks and only send one more to
1814 * the transaction, yet the next guy to search has a
1817 * Do this *after* figuring out how many bits we're taking out
1818 * of our target group.
1820 if (!ac->ac_disable_chain_relink &&
1822 (ocfs2_block_group_reasonably_empty(bg, res->sr_bits))) {
1823 status = ocfs2_relink_block_group(handle, alloc_inode,
1824 ac->ac_bh, group_bh,
1825 prev_group_bh, chain);
1832 if (ac->ac_find_loc_only)
1835 status = ocfs2_alloc_dinode_update_counts(alloc_inode, handle,
1836 ac->ac_bh, res->sr_bits,
1843 status = ocfs2_block_group_set_bits(handle,
1850 ocfs2_rollback_alloc_dinode_counts(alloc_inode,
1851 ac->ac_bh, res->sr_bits, chain);
1856 trace_ocfs2_search_chain_end(
1857 (unsigned long long)le64_to_cpu(fe->i_blkno),
1861 *bits_left = le16_to_cpu(bg->bg_free_bits_count);
1864 brelse(prev_group_bh);
1871 /* will give out up to bits_wanted contiguous bits. */
1872 static int ocfs2_claim_suballoc_bits(struct ocfs2_alloc_context *ac,
1876 struct ocfs2_suballoc_result *res)
1881 u64 hint = ac->ac_last_group;
1882 struct ocfs2_chain_list *cl;
1883 struct ocfs2_dinode *fe;
1885 BUG_ON(ac->ac_bits_given >= ac->ac_bits_wanted);
1886 BUG_ON(bits_wanted > (ac->ac_bits_wanted - ac->ac_bits_given));
1889 fe = (struct ocfs2_dinode *) ac->ac_bh->b_data;
1891 /* The bh was validated by the inode read during
1892 * ocfs2_reserve_suballoc_bits(). Any corruption is a code bug. */
1893 BUG_ON(!OCFS2_IS_VALID_DINODE(fe));
1895 if (le32_to_cpu(fe->id1.bitmap1.i_used) >=
1896 le32_to_cpu(fe->id1.bitmap1.i_total)) {
1897 status = ocfs2_error(ac->ac_inode->i_sb,
1898 "Chain allocator dinode %llu has %u used "
1899 "bits but only %u total.",
1900 (unsigned long long)le64_to_cpu(fe->i_blkno),
1901 le32_to_cpu(fe->id1.bitmap1.i_used),
1902 le32_to_cpu(fe->id1.bitmap1.i_total));
1906 res->sr_bg_blkno = hint;
1907 if (res->sr_bg_blkno) {
1908 /* Attempt to short-circuit the usual search mechanism
1909 * by jumping straight to the most recently used
1910 * allocation group. This helps us maintain some
1911 * contiguousness across allocations. */
1912 status = ocfs2_search_one_group(ac, handle, bits_wanted,
1913 min_bits, res, &bits_left);
1916 if (status < 0 && status != -ENOSPC) {
1922 cl = (struct ocfs2_chain_list *) &fe->id2.i_chain;
1924 victim = ocfs2_find_victim_chain(cl);
1925 ac->ac_chain = victim;
1927 status = ocfs2_search_chain(ac, handle, bits_wanted, min_bits,
1930 hint = ocfs2_group_from_res(res);
1933 if (status < 0 && status != -ENOSPC) {
1938 trace_ocfs2_claim_suballoc_bits(victim);
1940 /* If we didn't pick a good victim, then just default to
1941 * searching each chain in order. Don't allow chain relinking
1942 * because we only calculate enough journal credits for one
1943 * relink per alloc. */
1944 ac->ac_disable_chain_relink = 1;
1945 for (i = 0; i < le16_to_cpu(cl->cl_next_free_rec); i ++) {
1948 if (!cl->cl_recs[i].c_free)
1952 status = ocfs2_search_chain(ac, handle, bits_wanted, min_bits,
1955 hint = ocfs2_group_from_res(res);
1958 if (status < 0 && status != -ENOSPC) {
1965 if (status != -ENOSPC) {
1966 /* If the next search of this group is not likely to
1967 * yield a suitable extent, then we reset the last
1968 * group hint so as to not waste a disk read */
1969 if (bits_left < min_bits)
1970 ac->ac_last_group = 0;
1972 ac->ac_last_group = hint;
1981 int ocfs2_claim_metadata(handle_t *handle,
1982 struct ocfs2_alloc_context *ac,
1985 u16 *suballoc_bit_start,
1986 unsigned int *num_bits,
1990 struct ocfs2_suballoc_result res = { .sr_blkno = 0, };
1993 BUG_ON(ac->ac_bits_wanted < (ac->ac_bits_given + bits_wanted));
1994 BUG_ON(ac->ac_which != OCFS2_AC_USE_META);
1996 status = ocfs2_claim_suballoc_bits(ac,
2005 atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
2007 *suballoc_loc = res.sr_bg_blkno;
2008 *suballoc_bit_start = res.sr_bit_offset;
2009 *blkno_start = res.sr_blkno;
2010 ac->ac_bits_given += res.sr_bits;
2011 *num_bits = res.sr_bits;
2019 static void ocfs2_init_inode_ac_group(struct inode *dir,
2020 struct buffer_head *parent_di_bh,
2021 struct ocfs2_alloc_context *ac)
2023 struct ocfs2_dinode *di = (struct ocfs2_dinode *)parent_di_bh->b_data;
2025 * Try to allocate inodes from some specific group.
2027 * If the parent dir has recorded the last group used in allocation,
2028 * cool, use it. Otherwise if we try to allocate new inode from the
2029 * same slot the parent dir belongs to, use the same chunk.
2031 * We are very careful here to avoid the mistake of setting
2032 * ac_last_group to a group descriptor from a different (unlocked) slot.
2034 if (OCFS2_I(dir)->ip_last_used_group &&
2035 OCFS2_I(dir)->ip_last_used_slot == ac->ac_alloc_slot)
2036 ac->ac_last_group = OCFS2_I(dir)->ip_last_used_group;
2037 else if (le16_to_cpu(di->i_suballoc_slot) == ac->ac_alloc_slot) {
2038 if (di->i_suballoc_loc)
2039 ac->ac_last_group = le64_to_cpu(di->i_suballoc_loc);
2041 ac->ac_last_group = ocfs2_which_suballoc_group(
2042 le64_to_cpu(di->i_blkno),
2043 le16_to_cpu(di->i_suballoc_bit));
2047 static inline void ocfs2_save_inode_ac_group(struct inode *dir,
2048 struct ocfs2_alloc_context *ac)
2050 OCFS2_I(dir)->ip_last_used_group = ac->ac_last_group;
2051 OCFS2_I(dir)->ip_last_used_slot = ac->ac_alloc_slot;
2054 int ocfs2_find_new_inode_loc(struct inode *dir,
2055 struct buffer_head *parent_fe_bh,
2056 struct ocfs2_alloc_context *ac,
2060 handle_t *handle = NULL;
2061 struct ocfs2_suballoc_result *res;
2064 BUG_ON(ac->ac_bits_given != 0);
2065 BUG_ON(ac->ac_bits_wanted != 1);
2066 BUG_ON(ac->ac_which != OCFS2_AC_USE_INODE);
2068 res = kzalloc(sizeof(*res), GFP_NOFS);
2075 ocfs2_init_inode_ac_group(dir, parent_fe_bh, ac);
2078 * The handle started here is for chain relink. Alternatively,
2079 * we could just disable relink for these calls.
2081 handle = ocfs2_start_trans(OCFS2_SB(dir->i_sb), OCFS2_SUBALLOC_ALLOC);
2082 if (IS_ERR(handle)) {
2083 ret = PTR_ERR(handle);
2090 * This will instruct ocfs2_claim_suballoc_bits and
2091 * ocfs2_search_one_group to search but save actual allocation
2094 ac->ac_find_loc_only = 1;
2096 ret = ocfs2_claim_suballoc_bits(ac, handle, 1, 1, res);
2102 ac->ac_find_loc_priv = res;
2103 *fe_blkno = res->sr_blkno;
2104 ocfs2_update_inode_fsync_trans(handle, dir, 0);
2107 ocfs2_commit_trans(OCFS2_SB(dir->i_sb), handle);
2115 int ocfs2_claim_new_inode_at_loc(handle_t *handle,
2117 struct ocfs2_alloc_context *ac,
2124 struct ocfs2_suballoc_result *res = ac->ac_find_loc_priv;
2125 struct buffer_head *bg_bh = NULL;
2126 struct ocfs2_group_desc *bg;
2127 struct ocfs2_dinode *di = (struct ocfs2_dinode *) ac->ac_bh->b_data;
2130 * Since di_blkno is being passed back in, we check for any
2131 * inconsistencies which may have happened between
2132 * calls. These are code bugs as di_blkno is not expected to
2133 * change once returned from ocfs2_find_new_inode_loc()
2135 BUG_ON(res->sr_blkno != di_blkno);
2137 ret = ocfs2_read_group_descriptor(ac->ac_inode, di,
2138 res->sr_bg_stable_blkno, &bg_bh);
2144 bg = (struct ocfs2_group_desc *) bg_bh->b_data;
2145 chain = le16_to_cpu(bg->bg_chain);
2147 ret = ocfs2_alloc_dinode_update_counts(ac->ac_inode, handle,
2148 ac->ac_bh, res->sr_bits,
2155 ret = ocfs2_block_group_set_bits(handle,
2162 ocfs2_rollback_alloc_dinode_counts(ac->ac_inode,
2163 ac->ac_bh, res->sr_bits, chain);
2168 trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
2171 atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
2173 BUG_ON(res->sr_bits != 1);
2175 *suballoc_loc = res->sr_bg_blkno;
2176 *suballoc_bit = res->sr_bit_offset;
2177 ac->ac_bits_given++;
2178 ocfs2_save_inode_ac_group(dir, ac);
2186 int ocfs2_claim_new_inode(handle_t *handle,
2188 struct buffer_head *parent_fe_bh,
2189 struct ocfs2_alloc_context *ac,
2195 struct ocfs2_suballoc_result res;
2198 BUG_ON(ac->ac_bits_given != 0);
2199 BUG_ON(ac->ac_bits_wanted != 1);
2200 BUG_ON(ac->ac_which != OCFS2_AC_USE_INODE);
2202 ocfs2_init_inode_ac_group(dir, parent_fe_bh, ac);
2204 status = ocfs2_claim_suballoc_bits(ac,
2213 atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
2215 BUG_ON(res.sr_bits != 1);
2217 *suballoc_loc = res.sr_bg_blkno;
2218 *suballoc_bit = res.sr_bit_offset;
2219 *fe_blkno = res.sr_blkno;
2220 ac->ac_bits_given++;
2221 ocfs2_save_inode_ac_group(dir, ac);
2229 /* translate a group desc. blkno and it's bitmap offset into
2230 * disk cluster offset. */
2231 static inline u32 ocfs2_desc_bitmap_to_cluster_off(struct inode *inode,
2235 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
2238 BUG_ON(!ocfs2_is_cluster_bitmap(inode));
2240 if (bg_blkno != osb->first_cluster_group_blkno)
2241 cluster = ocfs2_blocks_to_clusters(inode->i_sb, bg_blkno);
2242 cluster += (u32) bg_bit_off;
2246 /* given a cluster offset, calculate which block group it belongs to
2247 * and return that block offset. */
2248 u64 ocfs2_which_cluster_group(struct inode *inode, u32 cluster)
2250 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
2253 BUG_ON(!ocfs2_is_cluster_bitmap(inode));
2255 group_no = cluster / osb->bitmap_cpg;
2257 return osb->first_cluster_group_blkno;
2258 return ocfs2_clusters_to_blocks(inode->i_sb,
2259 group_no * osb->bitmap_cpg);
2262 /* given the block number of a cluster start, calculate which cluster
2263 * group and descriptor bitmap offset that corresponds to. */
2264 static inline void ocfs2_block_to_cluster_group(struct inode *inode,
2269 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
2270 u32 data_cluster = ocfs2_blocks_to_clusters(osb->sb, data_blkno);
2272 BUG_ON(!ocfs2_is_cluster_bitmap(inode));
2274 *bg_blkno = ocfs2_which_cluster_group(inode,
2277 if (*bg_blkno == osb->first_cluster_group_blkno)
2278 *bg_bit_off = (u16) data_cluster;
2280 *bg_bit_off = (u16) ocfs2_blocks_to_clusters(osb->sb,
2281 data_blkno - *bg_blkno);
2285 * min_bits - minimum contiguous chunk from this total allocation we
2286 * can handle. set to what we asked for originally for a full
2287 * contig. allocation, set to '1' to indicate we can deal with extents
2290 int __ocfs2_claim_clusters(handle_t *handle,
2291 struct ocfs2_alloc_context *ac,
2298 unsigned int bits_wanted = max_clusters;
2299 struct ocfs2_suballoc_result res = { .sr_blkno = 0, };
2300 struct ocfs2_super *osb = OCFS2_SB(ac->ac_inode->i_sb);
2302 BUG_ON(ac->ac_bits_given >= ac->ac_bits_wanted);
2304 BUG_ON(ac->ac_which != OCFS2_AC_USE_LOCAL
2305 && ac->ac_which != OCFS2_AC_USE_MAIN);
2307 if (ac->ac_which == OCFS2_AC_USE_LOCAL) {
2308 WARN_ON(min_clusters > 1);
2310 status = ocfs2_claim_local_alloc_bits(osb,
2317 atomic_inc(&osb->alloc_stats.local_data);
2319 if (min_clusters > (osb->bitmap_cpg - 1)) {
2320 /* The only paths asking for contiguousness
2321 * should know about this already. */
2322 mlog(ML_ERROR, "minimum allocation requested %u exceeds "
2323 "group bitmap size %u!\n", min_clusters,
2328 /* clamp the current request down to a realistic size. */
2329 if (bits_wanted > (osb->bitmap_cpg - 1))
2330 bits_wanted = osb->bitmap_cpg - 1;
2332 status = ocfs2_claim_suballoc_bits(ac,
2338 BUG_ON(res.sr_blkno); /* cluster alloc can't set */
2340 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
2343 atomic_inc(&osb->alloc_stats.bitmap_data);
2344 *num_clusters = res.sr_bits;
2348 if (status != -ENOSPC)
2353 ac->ac_bits_given += *num_clusters;
2361 int ocfs2_claim_clusters(handle_t *handle,
2362 struct ocfs2_alloc_context *ac,
2367 unsigned int bits_wanted = ac->ac_bits_wanted - ac->ac_bits_given;
2369 return __ocfs2_claim_clusters(handle, ac, min_clusters,
2370 bits_wanted, cluster_start, num_clusters);
2373 static int ocfs2_block_group_clear_bits(handle_t *handle,
2374 struct inode *alloc_inode,
2375 struct ocfs2_group_desc *bg,
2376 struct buffer_head *group_bh,
2377 unsigned int bit_off,
2378 unsigned int num_bits,
2379 void (*undo_fn)(unsigned int bit,
2380 unsigned long *bmap))
2384 struct ocfs2_group_desc *undo_bg = NULL;
2386 /* The caller got this descriptor from
2387 * ocfs2_read_group_descriptor(). Any corruption is a code bug. */
2388 BUG_ON(!OCFS2_IS_VALID_GROUP_DESC(bg));
2390 trace_ocfs2_block_group_clear_bits(bit_off, num_bits);
2392 BUG_ON(undo_fn && !ocfs2_is_cluster_bitmap(alloc_inode));
2393 status = ocfs2_journal_access_gd(handle, INODE_CACHE(alloc_inode),
2396 OCFS2_JOURNAL_ACCESS_UNDO :
2397 OCFS2_JOURNAL_ACCESS_WRITE);
2404 jbd_lock_bh_state(group_bh);
2405 undo_bg = (struct ocfs2_group_desc *)
2406 bh2jh(group_bh)->b_committed_data;
2412 ocfs2_clear_bit((bit_off + tmp),
2413 (unsigned long *) bg->bg_bitmap);
2415 undo_fn(bit_off + tmp,
2416 (unsigned long *) undo_bg->bg_bitmap);
2418 le16_add_cpu(&bg->bg_free_bits_count, num_bits);
2419 if (le16_to_cpu(bg->bg_free_bits_count) > le16_to_cpu(bg->bg_bits)) {
2420 return ocfs2_error(alloc_inode->i_sb, "Group descriptor # %llu has bit"
2421 " count %u but claims %u are freed. num_bits %d",
2422 (unsigned long long)le64_to_cpu(bg->bg_blkno),
2423 le16_to_cpu(bg->bg_bits),
2424 le16_to_cpu(bg->bg_free_bits_count), num_bits);
2428 jbd_unlock_bh_state(group_bh);
2430 ocfs2_journal_dirty(handle, group_bh);
2436 * expects the suballoc inode to already be locked.
2438 static int _ocfs2_free_suballoc_bits(handle_t *handle,
2439 struct inode *alloc_inode,
2440 struct buffer_head *alloc_bh,
2441 unsigned int start_bit,
2444 void (*undo_fn)(unsigned int bit,
2445 unsigned long *bitmap))
2449 struct ocfs2_dinode *fe = (struct ocfs2_dinode *) alloc_bh->b_data;
2450 struct ocfs2_chain_list *cl = &fe->id2.i_chain;
2451 struct buffer_head *group_bh = NULL;
2452 struct ocfs2_group_desc *group;
2454 /* The alloc_bh comes from ocfs2_free_dinode() or
2455 * ocfs2_free_clusters(). The callers have all locked the
2456 * allocator and gotten alloc_bh from the lock call. This
2457 * validates the dinode buffer. Any corruption that has happened
2459 BUG_ON(!OCFS2_IS_VALID_DINODE(fe));
2460 BUG_ON((count + start_bit) > ocfs2_bits_per_group(cl));
2462 trace_ocfs2_free_suballoc_bits(
2463 (unsigned long long)OCFS2_I(alloc_inode)->ip_blkno,
2464 (unsigned long long)bg_blkno,
2467 status = ocfs2_read_group_descriptor(alloc_inode, fe, bg_blkno,
2473 group = (struct ocfs2_group_desc *) group_bh->b_data;
2475 BUG_ON((count + start_bit) > le16_to_cpu(group->bg_bits));
2477 status = ocfs2_block_group_clear_bits(handle, alloc_inode,
2479 start_bit, count, undo_fn);
2485 status = ocfs2_journal_access_di(handle, INODE_CACHE(alloc_inode),
2486 alloc_bh, OCFS2_JOURNAL_ACCESS_WRITE);
2489 ocfs2_block_group_set_bits(handle, alloc_inode, group, group_bh,
2494 le32_add_cpu(&cl->cl_recs[le16_to_cpu(group->bg_chain)].c_free,
2496 tmp_used = le32_to_cpu(fe->id1.bitmap1.i_used);
2497 fe->id1.bitmap1.i_used = cpu_to_le32(tmp_used - count);
2498 ocfs2_journal_dirty(handle, alloc_bh);
2508 int ocfs2_free_suballoc_bits(handle_t *handle,
2509 struct inode *alloc_inode,
2510 struct buffer_head *alloc_bh,
2511 unsigned int start_bit,
2515 return _ocfs2_free_suballoc_bits(handle, alloc_inode, alloc_bh,
2516 start_bit, bg_blkno, count, NULL);
2519 int ocfs2_free_dinode(handle_t *handle,
2520 struct inode *inode_alloc_inode,
2521 struct buffer_head *inode_alloc_bh,
2522 struct ocfs2_dinode *di)
2524 u64 blk = le64_to_cpu(di->i_blkno);
2525 u16 bit = le16_to_cpu(di->i_suballoc_bit);
2526 u64 bg_blkno = ocfs2_which_suballoc_group(blk, bit);
2528 if (di->i_suballoc_loc)
2529 bg_blkno = le64_to_cpu(di->i_suballoc_loc);
2530 return ocfs2_free_suballoc_bits(handle, inode_alloc_inode,
2531 inode_alloc_bh, bit, bg_blkno, 1);
2534 static int _ocfs2_free_clusters(handle_t *handle,
2535 struct inode *bitmap_inode,
2536 struct buffer_head *bitmap_bh,
2538 unsigned int num_clusters,
2539 void (*undo_fn)(unsigned int bit,
2540 unsigned long *bitmap))
2545 struct ocfs2_dinode *fe;
2547 /* You can't ever have a contiguous set of clusters
2548 * bigger than a block group bitmap so we never have to worry
2549 * about looping on them.
2550 * This is expensive. We can safely remove once this stuff has
2551 * gotten tested really well. */
2552 BUG_ON(start_blk != ocfs2_clusters_to_blocks(bitmap_inode->i_sb, ocfs2_blocks_to_clusters(bitmap_inode->i_sb, start_blk)));
2554 fe = (struct ocfs2_dinode *) bitmap_bh->b_data;
2556 ocfs2_block_to_cluster_group(bitmap_inode, start_blk, &bg_blkno,
2559 trace_ocfs2_free_clusters((unsigned long long)bg_blkno,
2560 (unsigned long long)start_blk,
2561 bg_start_bit, num_clusters);
2563 status = _ocfs2_free_suballoc_bits(handle, bitmap_inode, bitmap_bh,
2564 bg_start_bit, bg_blkno,
2565 num_clusters, undo_fn);
2571 ocfs2_local_alloc_seen_free_bits(OCFS2_SB(bitmap_inode->i_sb),
2580 int ocfs2_free_clusters(handle_t *handle,
2581 struct inode *bitmap_inode,
2582 struct buffer_head *bitmap_bh,
2584 unsigned int num_clusters)
2586 return _ocfs2_free_clusters(handle, bitmap_inode, bitmap_bh,
2587 start_blk, num_clusters,
2592 * Give never-used clusters back to the global bitmap. We don't need
2593 * to protect these bits in the undo buffer.
2595 int ocfs2_release_clusters(handle_t *handle,
2596 struct inode *bitmap_inode,
2597 struct buffer_head *bitmap_bh,
2599 unsigned int num_clusters)
2601 return _ocfs2_free_clusters(handle, bitmap_inode, bitmap_bh,
2602 start_blk, num_clusters,
2606 static inline void ocfs2_debug_bg(struct ocfs2_group_desc *bg)
2608 printk("Block Group:\n");
2609 printk("bg_signature: %s\n", bg->bg_signature);
2610 printk("bg_size: %u\n", bg->bg_size);
2611 printk("bg_bits: %u\n", bg->bg_bits);
2612 printk("bg_free_bits_count: %u\n", bg->bg_free_bits_count);
2613 printk("bg_chain: %u\n", bg->bg_chain);
2614 printk("bg_generation: %u\n", le32_to_cpu(bg->bg_generation));
2615 printk("bg_next_group: %llu\n",
2616 (unsigned long long)bg->bg_next_group);
2617 printk("bg_parent_dinode: %llu\n",
2618 (unsigned long long)bg->bg_parent_dinode);
2619 printk("bg_blkno: %llu\n",
2620 (unsigned long long)bg->bg_blkno);
2623 static inline void ocfs2_debug_suballoc_inode(struct ocfs2_dinode *fe)
2627 printk("Suballoc Inode %llu:\n", (unsigned long long)fe->i_blkno);
2628 printk("i_signature: %s\n", fe->i_signature);
2629 printk("i_size: %llu\n",
2630 (unsigned long long)fe->i_size);
2631 printk("i_clusters: %u\n", fe->i_clusters);
2632 printk("i_generation: %u\n",
2633 le32_to_cpu(fe->i_generation));
2634 printk("id1.bitmap1.i_used: %u\n",
2635 le32_to_cpu(fe->id1.bitmap1.i_used));
2636 printk("id1.bitmap1.i_total: %u\n",
2637 le32_to_cpu(fe->id1.bitmap1.i_total));
2638 printk("id2.i_chain.cl_cpg: %u\n", fe->id2.i_chain.cl_cpg);
2639 printk("id2.i_chain.cl_bpc: %u\n", fe->id2.i_chain.cl_bpc);
2640 printk("id2.i_chain.cl_count: %u\n", fe->id2.i_chain.cl_count);
2641 printk("id2.i_chain.cl_next_free_rec: %u\n",
2642 fe->id2.i_chain.cl_next_free_rec);
2643 for(i = 0; i < fe->id2.i_chain.cl_next_free_rec; i++) {
2644 printk("fe->id2.i_chain.cl_recs[%d].c_free: %u\n", i,
2645 fe->id2.i_chain.cl_recs[i].c_free);
2646 printk("fe->id2.i_chain.cl_recs[%d].c_total: %u\n", i,
2647 fe->id2.i_chain.cl_recs[i].c_total);
2648 printk("fe->id2.i_chain.cl_recs[%d].c_blkno: %llu\n", i,
2649 (unsigned long long)fe->id2.i_chain.cl_recs[i].c_blkno);
2654 * For a given allocation, determine which allocators will need to be
2655 * accessed, and lock them, reserving the appropriate number of bits.
2657 * Sparse file systems call this from ocfs2_write_begin_nolock()
2658 * and ocfs2_allocate_unwritten_extents().
2660 * File systems which don't support holes call this from
2661 * ocfs2_extend_allocation().
2663 int ocfs2_lock_allocators(struct inode *inode,
2664 struct ocfs2_extent_tree *et,
2665 u32 clusters_to_add, u32 extents_to_split,
2666 struct ocfs2_alloc_context **data_ac,
2667 struct ocfs2_alloc_context **meta_ac)
2669 int ret = 0, num_free_extents;
2670 unsigned int max_recs_needed = clusters_to_add + 2 * extents_to_split;
2671 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
2677 BUG_ON(clusters_to_add != 0 && data_ac == NULL);
2679 num_free_extents = ocfs2_num_free_extents(osb, et);
2680 if (num_free_extents < 0) {
2681 ret = num_free_extents;
2687 * Sparse allocation file systems need to be more conservative
2688 * with reserving room for expansion - the actual allocation
2689 * happens while we've got a journal handle open so re-taking
2690 * a cluster lock (because we ran out of room for another
2691 * extent) will violate ordering rules.
2693 * Most of the time we'll only be seeing this 1 cluster at a time
2696 * Always lock for any unwritten extents - we might want to
2697 * add blocks during a split.
2699 if (!num_free_extents ||
2700 (ocfs2_sparse_alloc(osb) && num_free_extents < max_recs_needed)) {
2701 ret = ocfs2_reserve_new_metadata(osb, et->et_root_el, meta_ac);
2709 if (clusters_to_add == 0)
2712 ret = ocfs2_reserve_clusters(osb, clusters_to_add, data_ac);
2722 ocfs2_free_alloc_context(*meta_ac);
2727 * We cannot have an error and a non null *data_ac.
2735 * Read the inode specified by blkno to get suballoc_slot and
2738 static int ocfs2_get_suballoc_slot_bit(struct ocfs2_super *osb, u64 blkno,
2739 u16 *suballoc_slot, u64 *group_blkno,
2743 struct buffer_head *inode_bh = NULL;
2744 struct ocfs2_dinode *inode_fe;
2746 trace_ocfs2_get_suballoc_slot_bit((unsigned long long)blkno);
2748 /* dirty read disk */
2749 status = ocfs2_read_blocks_sync(osb, blkno, 1, &inode_bh);
2751 mlog(ML_ERROR, "read block %llu failed %d\n",
2752 (unsigned long long)blkno, status);
2756 inode_fe = (struct ocfs2_dinode *) inode_bh->b_data;
2757 if (!OCFS2_IS_VALID_DINODE(inode_fe)) {
2758 mlog(ML_ERROR, "invalid inode %llu requested\n",
2759 (unsigned long long)blkno);
2764 if (le16_to_cpu(inode_fe->i_suballoc_slot) != (u16)OCFS2_INVALID_SLOT &&
2765 (u32)le16_to_cpu(inode_fe->i_suballoc_slot) > osb->max_slots - 1) {
2766 mlog(ML_ERROR, "inode %llu has invalid suballoc slot %u\n",
2767 (unsigned long long)blkno,
2768 (u32)le16_to_cpu(inode_fe->i_suballoc_slot));
2774 *suballoc_slot = le16_to_cpu(inode_fe->i_suballoc_slot);
2776 *suballoc_bit = le16_to_cpu(inode_fe->i_suballoc_bit);
2778 *group_blkno = le64_to_cpu(inode_fe->i_suballoc_loc);
2789 * test whether bit is SET in allocator bitmap or not. on success, 0
2790 * is returned and *res is 1 for SET; 0 otherwise. when fails, errno
2791 * is returned and *res is meaningless. Call this after you have
2792 * cluster locked against suballoc, or you may get a result based on
2793 * non-up2date contents
2795 static int ocfs2_test_suballoc_bit(struct ocfs2_super *osb,
2796 struct inode *suballoc,
2797 struct buffer_head *alloc_bh,
2798 u64 group_blkno, u64 blkno,
2801 struct ocfs2_dinode *alloc_di;
2802 struct ocfs2_group_desc *group;
2803 struct buffer_head *group_bh = NULL;
2807 trace_ocfs2_test_suballoc_bit((unsigned long long)blkno,
2810 alloc_di = (struct ocfs2_dinode *)alloc_bh->b_data;
2811 if ((bit + 1) > ocfs2_bits_per_group(&alloc_di->id2.i_chain)) {
2812 mlog(ML_ERROR, "suballoc bit %u out of range of %u\n",
2814 ocfs2_bits_per_group(&alloc_di->id2.i_chain));
2819 bg_blkno = group_blkno ? group_blkno :
2820 ocfs2_which_suballoc_group(blkno, bit);
2821 status = ocfs2_read_group_descriptor(suballoc, alloc_di, bg_blkno,
2824 mlog(ML_ERROR, "read group %llu failed %d\n",
2825 (unsigned long long)bg_blkno, status);
2829 group = (struct ocfs2_group_desc *) group_bh->b_data;
2830 *res = ocfs2_test_bit(bit, (unsigned long *)group->bg_bitmap);
2841 * Test if the bit representing this inode (blkno) is set in the
2844 * On success, 0 is returned and *res is 1 for SET; 0 otherwise.
2846 * In the event of failure, a negative value is returned and *res is
2849 * Callers must make sure to hold nfs_sync_lock to prevent
2850 * ocfs2_delete_inode() on another node from accessing the same
2851 * suballocator concurrently.
2853 int ocfs2_test_inode_bit(struct ocfs2_super *osb, u64 blkno, int *res)
2856 u64 group_blkno = 0;
2857 u16 suballoc_bit = 0, suballoc_slot = 0;
2858 struct inode *inode_alloc_inode;
2859 struct buffer_head *alloc_bh = NULL;
2861 trace_ocfs2_test_inode_bit((unsigned long long)blkno);
2863 status = ocfs2_get_suballoc_slot_bit(osb, blkno, &suballoc_slot,
2864 &group_blkno, &suballoc_bit);
2866 mlog(ML_ERROR, "get alloc slot and bit failed %d\n", status);
2871 ocfs2_get_system_file_inode(osb, INODE_ALLOC_SYSTEM_INODE,
2873 if (!inode_alloc_inode) {
2874 /* the error code could be inaccurate, but we are not able to
2875 * get the correct one. */
2877 mlog(ML_ERROR, "unable to get alloc inode in slot %u\n",
2878 (u32)suballoc_slot);
2882 mutex_lock(&inode_alloc_inode->i_mutex);
2883 status = ocfs2_inode_lock(inode_alloc_inode, &alloc_bh, 0);
2885 mutex_unlock(&inode_alloc_inode->i_mutex);
2886 iput(inode_alloc_inode);
2887 mlog(ML_ERROR, "lock on alloc inode on slot %u failed %d\n",
2888 (u32)suballoc_slot, status);
2892 status = ocfs2_test_suballoc_bit(osb, inode_alloc_inode, alloc_bh,
2893 group_blkno, blkno, suballoc_bit, res);
2895 mlog(ML_ERROR, "test suballoc bit failed %d\n", status);
2897 ocfs2_inode_unlock(inode_alloc_inode, 0);
2898 mutex_unlock(&inode_alloc_inode->i_mutex);
2900 iput(inode_alloc_inode);