1 /* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
6 * Node local data allocation
8 * Copyright (C) 2002, 2004 Oracle. All rights reserved.
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public
12 * License as published by the Free Software Foundation; either
13 * version 2 of the License, or (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
20 * You should have received a copy of the GNU General Public
21 * License along with this program; if not, write to the
22 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
23 * Boston, MA 021110-1307, USA.
27 #include <linux/types.h>
28 #include <linux/slab.h>
29 #include <linux/highmem.h>
30 #include <linux/bitops.h>
32 #include <cluster/masklog.h>
37 #include "blockcheck.h"
41 #include "localalloc.h"
45 #include "ocfs2_trace.h"
47 #include "buffer_head_io.h"
49 #define OCFS2_LOCAL_ALLOC(dinode) (&((dinode)->id2.i_lab))
51 static u32 ocfs2_local_alloc_count_bits(struct ocfs2_dinode *alloc);
53 static int ocfs2_local_alloc_find_clear_bits(struct ocfs2_super *osb,
54 struct ocfs2_dinode *alloc,
56 struct ocfs2_alloc_reservation *resv);
58 static void ocfs2_clear_local_alloc(struct ocfs2_dinode *alloc);
60 static int ocfs2_sync_local_to_main(struct ocfs2_super *osb,
62 struct ocfs2_dinode *alloc,
63 struct inode *main_bm_inode,
64 struct buffer_head *main_bm_bh);
66 static int ocfs2_local_alloc_reserve_for_window(struct ocfs2_super *osb,
67 struct ocfs2_alloc_context **ac,
68 struct inode **bitmap_inode,
69 struct buffer_head **bitmap_bh);
71 static int ocfs2_local_alloc_new_window(struct ocfs2_super *osb,
73 struct ocfs2_alloc_context *ac);
75 static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
76 struct inode *local_alloc_inode);
79 * ocfs2_la_default_mb() - determine a default size, in megabytes of
82 * Generally, we'd like to pick as large a local alloc as
83 * possible. Performance on large workloads tends to scale
84 * proportionally to la size. In addition to that, the reservations
85 * code functions more efficiently as it can reserve more windows for
88 * Some things work against us when trying to choose a large local alloc:
90 * - We need to ensure our sizing is picked to leave enough space in
91 * group descriptors for other allocations (such as block groups,
92 * etc). Picking default sizes which are a multiple of 4 could help
93 * - block groups are allocated in 2mb and 4mb chunks.
95 * - Likewise, we don't want to starve other nodes of bits on small
96 * file systems. This can easily be taken care of by limiting our
97 * default to a reasonable size (256M) on larger cluster sizes.
99 * - Some file systems can't support very large sizes - 4k and 8k in
100 * particular are limited to less than 128 and 256 megabytes respectively.
102 * The following reference table shows group descriptor and local
103 * alloc maximums at various cluster sizes (4k blocksize)
105 * csize: 4K group: 126M la: 121M
106 * csize: 8K group: 252M la: 243M
107 * csize: 16K group: 504M la: 486M
108 * csize: 32K group: 1008M la: 972M
109 * csize: 64K group: 2016M la: 1944M
110 * csize: 128K group: 4032M la: 3888M
111 * csize: 256K group: 8064M la: 7776M
112 * csize: 512K group: 16128M la: 15552M
113 * csize: 1024K group: 32256M la: 31104M
115 #define OCFS2_LA_MAX_DEFAULT_MB 256
116 #define OCFS2_LA_OLD_DEFAULT 8
117 unsigned int ocfs2_la_default_mb(struct ocfs2_super *osb)
121 unsigned int la_max_mb;
122 unsigned int megs_per_slot;
123 struct super_block *sb = osb->sb;
125 gd_mb = ocfs2_clusters_to_megabytes(osb->sb,
126 8 * ocfs2_group_bitmap_size(sb, 0, osb->s_feature_incompat));
129 * This takes care of files systems with very small group
130 * descriptors - 512 byte blocksize at cluster sizes lower
131 * than 16K and also 1k blocksize with 4k cluster size.
133 if ((sb->s_blocksize == 512 && osb->s_clustersize <= 8192)
134 || (sb->s_blocksize == 1024 && osb->s_clustersize == 4096))
135 return OCFS2_LA_OLD_DEFAULT;
138 * Leave enough room for some block groups and make the final
139 * value we work from a multiple of 4.
147 * Keep window sizes down to a reasonable default
149 if (la_mb > OCFS2_LA_MAX_DEFAULT_MB) {
151 * Some clustersize / blocksize combinations will have
152 * given us a larger than OCFS2_LA_MAX_DEFAULT_MB
153 * default size, but get poor distribution when
154 * limited to exactly 256 megabytes.
156 * As an example, 16K clustersize at 4K blocksize
157 * gives us a cluster group size of 504M. Paring the
158 * local alloc size down to 256 however, would give us
159 * only one window and around 200MB left in the
160 * cluster group. Instead, find the first size below
161 * 256 which would give us an even distribution.
163 * Larger cluster group sizes actually work out pretty
164 * well when pared to 256, so we don't have to do this
165 * for any group that fits more than two
166 * OCFS2_LA_MAX_DEFAULT_MB windows.
168 if (gd_mb > (2 * OCFS2_LA_MAX_DEFAULT_MB))
171 unsigned int gd_mult = gd_mb;
173 while (gd_mult > 256)
174 gd_mult = gd_mult >> 1;
180 megs_per_slot = osb->osb_clusters_at_boot / osb->max_slots;
181 megs_per_slot = ocfs2_clusters_to_megabytes(osb->sb, megs_per_slot);
182 /* Too many nodes, too few disk clusters. */
183 if (megs_per_slot < la_mb)
184 la_mb = megs_per_slot;
186 /* We can't store more bits than we can in a block. */
187 la_max_mb = ocfs2_clusters_to_megabytes(osb->sb,
188 ocfs2_local_alloc_size(sb) * 8);
189 if (la_mb > la_max_mb)
195 void ocfs2_la_set_sizes(struct ocfs2_super *osb, int requested_mb)
197 struct super_block *sb = osb->sb;
198 unsigned int la_default_mb = ocfs2_la_default_mb(osb);
199 unsigned int la_max_mb;
201 la_max_mb = ocfs2_clusters_to_megabytes(sb,
202 ocfs2_local_alloc_size(sb) * 8);
204 trace_ocfs2_la_set_sizes(requested_mb, la_max_mb, la_default_mb);
206 if (requested_mb == -1) {
207 /* No user request - use defaults */
208 osb->local_alloc_default_bits =
209 ocfs2_megabytes_to_clusters(sb, la_default_mb);
210 } else if (requested_mb > la_max_mb) {
211 /* Request is too big, we give the maximum available */
212 osb->local_alloc_default_bits =
213 ocfs2_megabytes_to_clusters(sb, la_max_mb);
215 osb->local_alloc_default_bits =
216 ocfs2_megabytes_to_clusters(sb, requested_mb);
219 osb->local_alloc_bits = osb->local_alloc_default_bits;
222 static inline int ocfs2_la_state_enabled(struct ocfs2_super *osb)
224 return (osb->local_alloc_state == OCFS2_LA_THROTTLED ||
225 osb->local_alloc_state == OCFS2_LA_ENABLED);
228 void ocfs2_local_alloc_seen_free_bits(struct ocfs2_super *osb,
229 unsigned int num_clusters)
231 spin_lock(&osb->osb_lock);
232 if (osb->local_alloc_state == OCFS2_LA_DISABLED ||
233 osb->local_alloc_state == OCFS2_LA_THROTTLED)
234 if (num_clusters >= osb->local_alloc_default_bits) {
235 cancel_delayed_work(&osb->la_enable_wq);
236 osb->local_alloc_state = OCFS2_LA_ENABLED;
238 spin_unlock(&osb->osb_lock);
241 void ocfs2_la_enable_worker(struct work_struct *work)
243 struct ocfs2_super *osb =
244 container_of(work, struct ocfs2_super,
246 spin_lock(&osb->osb_lock);
247 osb->local_alloc_state = OCFS2_LA_ENABLED;
248 spin_unlock(&osb->osb_lock);
252 * Tell us whether a given allocation should use the local alloc
253 * file. Otherwise, it has to go to the main bitmap.
255 * This function does semi-dirty reads of local alloc size and state!
256 * This is ok however, as the values are re-checked once under mutex.
258 int ocfs2_alloc_should_use_local(struct ocfs2_super *osb, u64 bits)
263 spin_lock(&osb->osb_lock);
264 la_bits = osb->local_alloc_bits;
266 if (!ocfs2_la_state_enabled(osb))
269 /* la_bits should be at least twice the size (in clusters) of
270 * a new block group. We want to be sure block group
271 * allocations go through the local alloc, so allow an
272 * allocation to take up to half the bitmap. */
273 if (bits > (la_bits / 2))
278 trace_ocfs2_alloc_should_use_local(
279 (unsigned long long)bits, osb->local_alloc_state, la_bits, ret);
280 spin_unlock(&osb->osb_lock);
284 int ocfs2_load_local_alloc(struct ocfs2_super *osb)
287 struct ocfs2_dinode *alloc = NULL;
288 struct buffer_head *alloc_bh = NULL;
290 struct inode *inode = NULL;
291 struct ocfs2_local_alloc *la;
293 if (osb->local_alloc_bits == 0)
296 if (osb->local_alloc_bits >= osb->bitmap_cpg) {
297 mlog(ML_NOTICE, "Requested local alloc window %d is larger "
298 "than max possible %u. Using defaults.\n",
299 osb->local_alloc_bits, (osb->bitmap_cpg - 1));
300 osb->local_alloc_bits =
301 ocfs2_megabytes_to_clusters(osb->sb,
302 ocfs2_la_default_mb(osb));
305 /* read the alloc off disk */
306 inode = ocfs2_get_system_file_inode(osb, LOCAL_ALLOC_SYSTEM_INODE,
314 status = ocfs2_read_inode_block_full(inode, &alloc_bh,
315 OCFS2_BH_IGNORE_CACHE);
321 alloc = (struct ocfs2_dinode *) alloc_bh->b_data;
322 la = OCFS2_LOCAL_ALLOC(alloc);
324 if (!(le32_to_cpu(alloc->i_flags) &
325 (OCFS2_LOCAL_ALLOC_FL|OCFS2_BITMAP_FL))) {
326 mlog(ML_ERROR, "Invalid local alloc inode, %llu\n",
327 (unsigned long long)OCFS2_I(inode)->ip_blkno);
332 if ((la->la_size == 0) ||
333 (le16_to_cpu(la->la_size) > ocfs2_local_alloc_size(inode->i_sb))) {
334 mlog(ML_ERROR, "Local alloc size is invalid (la_size = %u)\n",
335 le16_to_cpu(la->la_size));
340 /* do a little verification. */
341 num_used = ocfs2_local_alloc_count_bits(alloc);
343 /* hopefully the local alloc has always been recovered before
346 || alloc->id1.bitmap1.i_used
347 || alloc->id1.bitmap1.i_total
349 mlog(ML_ERROR, "Local alloc hasn't been recovered!\n"
350 "found = %u, set = %u, taken = %u, off = %u\n",
351 num_used, le32_to_cpu(alloc->id1.bitmap1.i_used),
352 le32_to_cpu(alloc->id1.bitmap1.i_total),
353 OCFS2_LOCAL_ALLOC(alloc)->la_bm_off);
355 osb->local_alloc_bh = alloc_bh;
356 osb->local_alloc_state = OCFS2_LA_ENABLED;
363 trace_ocfs2_load_local_alloc(osb->local_alloc_bits);
371 * return any unused bits to the bitmap and write out a clean
374 * local_alloc_bh is optional. If not passed, we will simply use the
375 * one off osb. If you do pass it however, be warned that it *will* be
376 * returned brelse'd and NULL'd out.*/
377 void ocfs2_shutdown_local_alloc(struct ocfs2_super *osb)
381 struct inode *local_alloc_inode = NULL;
382 struct buffer_head *bh = NULL;
383 struct buffer_head *main_bm_bh = NULL;
384 struct inode *main_bm_inode = NULL;
385 struct ocfs2_dinode *alloc_copy = NULL;
386 struct ocfs2_dinode *alloc = NULL;
388 cancel_delayed_work(&osb->la_enable_wq);
389 flush_workqueue(osb->ocfs2_wq);
391 if (osb->local_alloc_state == OCFS2_LA_UNUSED)
395 ocfs2_get_system_file_inode(osb,
396 LOCAL_ALLOC_SYSTEM_INODE,
398 if (!local_alloc_inode) {
404 osb->local_alloc_state = OCFS2_LA_DISABLED;
406 ocfs2_resmap_uninit(&osb->osb_la_resmap);
408 main_bm_inode = ocfs2_get_system_file_inode(osb,
409 GLOBAL_BITMAP_SYSTEM_INODE,
411 if (!main_bm_inode) {
417 inode_lock(main_bm_inode);
419 status = ocfs2_inode_lock(main_bm_inode, &main_bm_bh, 1);
425 /* WINDOW_MOVE_CREDITS is a bit heavy... */
426 handle = ocfs2_start_trans(osb, OCFS2_WINDOW_MOVE_CREDITS);
427 if (IS_ERR(handle)) {
428 mlog_errno(PTR_ERR(handle));
433 bh = osb->local_alloc_bh;
434 alloc = (struct ocfs2_dinode *) bh->b_data;
436 alloc_copy = kmalloc(bh->b_size, GFP_NOFS);
441 memcpy(alloc_copy, alloc, bh->b_size);
443 status = ocfs2_journal_access_di(handle, INODE_CACHE(local_alloc_inode),
444 bh, OCFS2_JOURNAL_ACCESS_WRITE);
450 ocfs2_clear_local_alloc(alloc);
451 ocfs2_journal_dirty(handle, bh);
454 osb->local_alloc_bh = NULL;
455 osb->local_alloc_state = OCFS2_LA_UNUSED;
457 status = ocfs2_sync_local_to_main(osb, handle, alloc_copy,
458 main_bm_inode, main_bm_bh);
463 ocfs2_commit_trans(osb, handle);
468 ocfs2_inode_unlock(main_bm_inode, 1);
471 inode_unlock(main_bm_inode);
475 iput(local_alloc_inode);
481 * We want to free the bitmap bits outside of any recovery context as
482 * we'll need a cluster lock to do so, but we must clear the local
483 * alloc before giving up the recovered nodes journal. To solve this,
484 * we kmalloc a copy of the local alloc before it's change for the
485 * caller to process with ocfs2_complete_local_alloc_recovery
487 int ocfs2_begin_local_alloc_recovery(struct ocfs2_super *osb,
489 struct ocfs2_dinode **alloc_copy)
492 struct buffer_head *alloc_bh = NULL;
493 struct inode *inode = NULL;
494 struct ocfs2_dinode *alloc;
496 trace_ocfs2_begin_local_alloc_recovery(slot_num);
500 inode = ocfs2_get_system_file_inode(osb,
501 LOCAL_ALLOC_SYSTEM_INODE,
511 status = ocfs2_read_inode_block_full(inode, &alloc_bh,
512 OCFS2_BH_IGNORE_CACHE);
518 *alloc_copy = kmalloc(alloc_bh->b_size, GFP_KERNEL);
519 if (!(*alloc_copy)) {
523 memcpy((*alloc_copy), alloc_bh->b_data, alloc_bh->b_size);
525 alloc = (struct ocfs2_dinode *) alloc_bh->b_data;
526 ocfs2_clear_local_alloc(alloc);
528 ocfs2_compute_meta_ecc(osb->sb, alloc_bh->b_data, &alloc->i_check);
529 status = ocfs2_write_block(osb, alloc_bh, INODE_CACHE(inode));
552 * Step 2: By now, we've completed the journal recovery, we've stamped
553 * a clean local alloc on disk and dropped the node out of the
554 * recovery map. Dlm locks will no longer stall, so lets clear out the
557 int ocfs2_complete_local_alloc_recovery(struct ocfs2_super *osb,
558 struct ocfs2_dinode *alloc)
562 struct buffer_head *main_bm_bh = NULL;
563 struct inode *main_bm_inode;
565 main_bm_inode = ocfs2_get_system_file_inode(osb,
566 GLOBAL_BITMAP_SYSTEM_INODE,
568 if (!main_bm_inode) {
574 inode_lock(main_bm_inode);
576 status = ocfs2_inode_lock(main_bm_inode, &main_bm_bh, 1);
582 handle = ocfs2_start_trans(osb, OCFS2_WINDOW_MOVE_CREDITS);
583 if (IS_ERR(handle)) {
584 status = PTR_ERR(handle);
590 /* we want the bitmap change to be recorded on disk asap */
593 status = ocfs2_sync_local_to_main(osb, handle, alloc,
594 main_bm_inode, main_bm_bh);
598 ocfs2_commit_trans(osb, handle);
601 ocfs2_inode_unlock(main_bm_inode, 1);
604 inode_unlock(main_bm_inode);
612 ocfs2_init_steal_slots(osb);
619 * make sure we've got at least bits_wanted contiguous bits in the
620 * local alloc. You lose them when you drop i_mutex.
622 * We will add ourselves to the transaction passed in, but may start
623 * our own in order to shift windows.
625 int ocfs2_reserve_local_alloc_bits(struct ocfs2_super *osb,
627 struct ocfs2_alloc_context *ac)
630 struct ocfs2_dinode *alloc;
631 struct inode *local_alloc_inode;
632 unsigned int free_bits;
637 ocfs2_get_system_file_inode(osb,
638 LOCAL_ALLOC_SYSTEM_INODE,
640 if (!local_alloc_inode) {
646 inode_lock(local_alloc_inode);
649 * We must double check state and allocator bits because
650 * another process may have changed them while holding i_mutex.
652 spin_lock(&osb->osb_lock);
653 if (!ocfs2_la_state_enabled(osb) ||
654 (bits_wanted > osb->local_alloc_bits)) {
655 spin_unlock(&osb->osb_lock);
659 spin_unlock(&osb->osb_lock);
661 alloc = (struct ocfs2_dinode *) osb->local_alloc_bh->b_data;
663 #ifdef CONFIG_OCFS2_DEBUG_FS
664 if (le32_to_cpu(alloc->id1.bitmap1.i_used) !=
665 ocfs2_local_alloc_count_bits(alloc)) {
666 ocfs2_error(osb->sb, "local alloc inode %llu says it has %u used bits, but a count shows %u\n",
667 (unsigned long long)le64_to_cpu(alloc->i_blkno),
668 le32_to_cpu(alloc->id1.bitmap1.i_used),
669 ocfs2_local_alloc_count_bits(alloc));
675 free_bits = le32_to_cpu(alloc->id1.bitmap1.i_total) -
676 le32_to_cpu(alloc->id1.bitmap1.i_used);
677 if (bits_wanted > free_bits) {
678 /* uhoh, window change time. */
680 ocfs2_local_alloc_slide_window(osb, local_alloc_inode);
682 if (status != -ENOSPC)
688 * Under certain conditions, the window slide code
689 * might have reduced the number of bits available or
690 * disabled the the local alloc entirely. Re-check
691 * here and return -ENOSPC if necessary.
694 if (!ocfs2_la_state_enabled(osb))
697 free_bits = le32_to_cpu(alloc->id1.bitmap1.i_total) -
698 le32_to_cpu(alloc->id1.bitmap1.i_used);
699 if (bits_wanted > free_bits)
703 ac->ac_inode = local_alloc_inode;
704 /* We should never use localalloc from another slot */
705 ac->ac_alloc_slot = osb->slot_num;
706 ac->ac_which = OCFS2_AC_USE_LOCAL;
707 get_bh(osb->local_alloc_bh);
708 ac->ac_bh = osb->local_alloc_bh;
711 if (status < 0 && local_alloc_inode) {
712 inode_unlock(local_alloc_inode);
713 iput(local_alloc_inode);
716 trace_ocfs2_reserve_local_alloc_bits(
717 (unsigned long long)ac->ac_max_block,
718 bits_wanted, osb->slot_num, status);
725 int ocfs2_claim_local_alloc_bits(struct ocfs2_super *osb,
727 struct ocfs2_alloc_context *ac,
733 struct inode *local_alloc_inode;
735 struct ocfs2_dinode *alloc;
736 struct ocfs2_local_alloc *la;
738 BUG_ON(ac->ac_which != OCFS2_AC_USE_LOCAL);
740 local_alloc_inode = ac->ac_inode;
741 alloc = (struct ocfs2_dinode *) osb->local_alloc_bh->b_data;
742 la = OCFS2_LOCAL_ALLOC(alloc);
744 start = ocfs2_local_alloc_find_clear_bits(osb, alloc, &bits_wanted,
747 /* TODO: Shouldn't we just BUG here? */
753 bitmap = la->la_bitmap;
754 *bit_off = le32_to_cpu(la->la_bm_off) + start;
755 *num_bits = bits_wanted;
757 status = ocfs2_journal_access_di(handle,
758 INODE_CACHE(local_alloc_inode),
760 OCFS2_JOURNAL_ACCESS_WRITE);
766 ocfs2_resmap_claimed_bits(&osb->osb_la_resmap, ac->ac_resv, start,
770 ocfs2_set_bit(start++, bitmap);
772 le32_add_cpu(&alloc->id1.bitmap1.i_used, *num_bits);
773 ocfs2_journal_dirty(handle, osb->local_alloc_bh);
781 int ocfs2_free_local_alloc_bits(struct ocfs2_super *osb,
783 struct ocfs2_alloc_context *ac,
789 struct inode *local_alloc_inode;
791 struct ocfs2_dinode *alloc;
792 struct ocfs2_local_alloc *la;
794 BUG_ON(ac->ac_which != OCFS2_AC_USE_LOCAL);
796 local_alloc_inode = ac->ac_inode;
797 alloc = (struct ocfs2_dinode *) osb->local_alloc_bh->b_data;
798 la = OCFS2_LOCAL_ALLOC(alloc);
800 bitmap = la->la_bitmap;
801 start = bit_off - le32_to_cpu(la->la_bm_off);
802 clear_bits = num_bits;
804 status = ocfs2_journal_access_di(handle,
805 INODE_CACHE(local_alloc_inode),
807 OCFS2_JOURNAL_ACCESS_WRITE);
814 ocfs2_clear_bit(start++, bitmap);
816 le32_add_cpu(&alloc->id1.bitmap1.i_used, -num_bits);
817 ocfs2_journal_dirty(handle, osb->local_alloc_bh);
823 static u32 ocfs2_local_alloc_count_bits(struct ocfs2_dinode *alloc)
826 struct ocfs2_local_alloc *la = OCFS2_LOCAL_ALLOC(alloc);
828 count = memweight(la->la_bitmap, le16_to_cpu(la->la_size));
830 trace_ocfs2_local_alloc_count_bits(count);
834 static int ocfs2_local_alloc_find_clear_bits(struct ocfs2_super *osb,
835 struct ocfs2_dinode *alloc,
837 struct ocfs2_alloc_reservation *resv)
839 int numfound = 0, bitoff, left, startoff, lastzero;
841 struct ocfs2_alloc_reservation r;
843 struct ocfs2_reservation_map *resmap = &osb->osb_la_resmap;
845 if (!alloc->id1.bitmap1.i_total) {
852 ocfs2_resv_init_once(&r);
853 ocfs2_resv_set_type(&r, OCFS2_RESV_FLAG_TMP);
858 if (ocfs2_resmap_resv_bits(resmap, resv, &bitoff, &numfound) == 0) {
859 if (numfound < *numbits)
865 * Code error. While reservations are enabled, local
866 * allocation should _always_ go through them.
868 BUG_ON(osb->osb_resv_level != 0);
871 * Reservations are disabled. Handle this the old way.
874 bitmap = OCFS2_LOCAL_ALLOC(alloc)->la_bitmap;
876 numfound = bitoff = startoff = 0;
878 left = le32_to_cpu(alloc->id1.bitmap1.i_total);
879 while ((bitoff = ocfs2_find_next_zero_bit(bitmap, left, startoff)) != -1) {
880 if (bitoff == left) {
881 /* mlog(0, "bitoff (%d) == left", bitoff); */
884 /* mlog(0, "Found a zero: bitoff = %d, startoff = %d, "
885 "numfound = %d\n", bitoff, startoff, numfound);*/
887 /* Ok, we found a zero bit... is it contig. or do we
889 if (bitoff == startoff) {
890 /* we found a zero */
894 /* got a zero after some ones */
898 /* we got everything we needed */
899 if (numfound == *numbits) {
900 /* mlog(0, "Found it all!\n"); */
905 trace_ocfs2_local_alloc_find_clear_bits_search_bitmap(bitoff, numfound);
907 if (numfound == *numbits)
908 bitoff = startoff - numfound;
914 ocfs2_resv_discard(resmap, resv);
916 trace_ocfs2_local_alloc_find_clear_bits(*numbits,
917 le32_to_cpu(alloc->id1.bitmap1.i_total),
923 static void ocfs2_clear_local_alloc(struct ocfs2_dinode *alloc)
925 struct ocfs2_local_alloc *la = OCFS2_LOCAL_ALLOC(alloc);
928 alloc->id1.bitmap1.i_total = 0;
929 alloc->id1.bitmap1.i_used = 0;
931 for(i = 0; i < le16_to_cpu(la->la_size); i++)
932 la->la_bitmap[i] = 0;
936 /* turn this on and uncomment below to aid debugging window shifts. */
937 static void ocfs2_verify_zero_bits(unsigned long *bitmap,
941 unsigned int tmp = count;
943 if (ocfs2_test_bit(start + tmp, bitmap)) {
944 printk("ocfs2_verify_zero_bits: start = %u, count = "
945 "%u\n", start, count);
946 printk("ocfs2_verify_zero_bits: bit %u is set!",
955 * sync the local alloc to main bitmap.
957 * assumes you've already locked the main bitmap -- the bitmap inode
958 * passed is used for caching.
960 static int ocfs2_sync_local_to_main(struct ocfs2_super *osb,
962 struct ocfs2_dinode *alloc,
963 struct inode *main_bm_inode,
964 struct buffer_head *main_bm_bh)
967 int bit_off, left, count, start;
971 struct ocfs2_local_alloc *la = OCFS2_LOCAL_ALLOC(alloc);
973 trace_ocfs2_sync_local_to_main(
974 le32_to_cpu(alloc->id1.bitmap1.i_total),
975 le32_to_cpu(alloc->id1.bitmap1.i_used));
977 if (!alloc->id1.bitmap1.i_total) {
981 if (le32_to_cpu(alloc->id1.bitmap1.i_used) ==
982 le32_to_cpu(alloc->id1.bitmap1.i_total)) {
986 la_start_blk = ocfs2_clusters_to_blocks(osb->sb,
987 le32_to_cpu(la->la_bm_off));
988 bitmap = la->la_bitmap;
989 start = count = bit_off = 0;
990 left = le32_to_cpu(alloc->id1.bitmap1.i_total);
992 while ((bit_off = ocfs2_find_next_zero_bit(bitmap, left, start))
994 if ((bit_off < left) && (bit_off == start)) {
1000 blkno = la_start_blk +
1001 ocfs2_clusters_to_blocks(osb->sb,
1004 trace_ocfs2_sync_local_to_main_free(
1005 count, start - count,
1006 (unsigned long long)la_start_blk,
1007 (unsigned long long)blkno);
1009 status = ocfs2_release_clusters(handle,
1018 if (bit_off >= left)
1021 start = bit_off + 1;
1030 enum ocfs2_la_event {
1031 OCFS2_LA_EVENT_SLIDE, /* Normal window slide. */
1032 OCFS2_LA_EVENT_FRAGMENTED, /* The global bitmap has
1033 * enough bits theoretically
1034 * free, but a contiguous
1035 * allocation could not be
1037 OCFS2_LA_EVENT_ENOSPC, /* Global bitmap doesn't have
1038 * enough bits free to satisfy
1041 #define OCFS2_LA_ENABLE_INTERVAL (30 * HZ)
1043 * Given an event, calculate the size of our next local alloc window.
1045 * This should always be called under i_mutex of the local alloc inode
1046 * so that local alloc disabling doesn't race with processes trying to
1047 * use the allocator.
1049 * Returns the state which the local alloc was left in. This value can
1050 * be ignored by some paths.
1052 static int ocfs2_recalc_la_window(struct ocfs2_super *osb,
1053 enum ocfs2_la_event event)
1058 spin_lock(&osb->osb_lock);
1059 if (osb->local_alloc_state == OCFS2_LA_DISABLED) {
1060 WARN_ON_ONCE(osb->local_alloc_state == OCFS2_LA_DISABLED);
1065 * ENOSPC and fragmentation are treated similarly for now.
1067 if (event == OCFS2_LA_EVENT_ENOSPC ||
1068 event == OCFS2_LA_EVENT_FRAGMENTED) {
1070 * We ran out of contiguous space in the primary
1071 * bitmap. Drastically reduce the number of bits used
1072 * by local alloc until we have to disable it.
1074 bits = osb->local_alloc_bits >> 1;
1075 if (bits > ocfs2_megabytes_to_clusters(osb->sb, 1)) {
1077 * By setting state to THROTTLED, we'll keep
1078 * the number of local alloc bits used down
1079 * until an event occurs which would give us
1080 * reason to assume the bitmap situation might
1083 osb->local_alloc_state = OCFS2_LA_THROTTLED;
1084 osb->local_alloc_bits = bits;
1086 osb->local_alloc_state = OCFS2_LA_DISABLED;
1088 queue_delayed_work(osb->ocfs2_wq, &osb->la_enable_wq,
1089 OCFS2_LA_ENABLE_INTERVAL);
1094 * Don't increase the size of the local alloc window until we
1095 * know we might be able to fulfill the request. Otherwise, we
1096 * risk bouncing around the global bitmap during periods of
1099 if (osb->local_alloc_state != OCFS2_LA_THROTTLED)
1100 osb->local_alloc_bits = osb->local_alloc_default_bits;
1103 state = osb->local_alloc_state;
1104 spin_unlock(&osb->osb_lock);
1109 static int ocfs2_local_alloc_reserve_for_window(struct ocfs2_super *osb,
1110 struct ocfs2_alloc_context **ac,
1111 struct inode **bitmap_inode,
1112 struct buffer_head **bitmap_bh)
1116 *ac = kzalloc(sizeof(struct ocfs2_alloc_context), GFP_KERNEL);
1124 (*ac)->ac_bits_wanted = osb->local_alloc_bits;
1125 status = ocfs2_reserve_cluster_bitmap_bits(osb, *ac);
1126 if (status == -ENOSPC) {
1127 if (ocfs2_recalc_la_window(osb, OCFS2_LA_EVENT_ENOSPC) ==
1131 ocfs2_free_ac_resource(*ac);
1132 memset(*ac, 0, sizeof(struct ocfs2_alloc_context));
1140 *bitmap_inode = (*ac)->ac_inode;
1141 igrab(*bitmap_inode);
1142 *bitmap_bh = (*ac)->ac_bh;
1146 if ((status < 0) && *ac) {
1147 ocfs2_free_alloc_context(*ac);
1157 * pass it the bitmap lock in lock_bh if you have it.
1159 static int ocfs2_local_alloc_new_window(struct ocfs2_super *osb,
1161 struct ocfs2_alloc_context *ac)
1164 u32 cluster_off, cluster_count;
1165 struct ocfs2_dinode *alloc = NULL;
1166 struct ocfs2_local_alloc *la;
1168 alloc = (struct ocfs2_dinode *) osb->local_alloc_bh->b_data;
1169 la = OCFS2_LOCAL_ALLOC(alloc);
1171 trace_ocfs2_local_alloc_new_window(
1172 le32_to_cpu(alloc->id1.bitmap1.i_total),
1173 osb->local_alloc_bits);
1175 /* Instruct the allocation code to try the most recently used
1176 * cluster group. We'll re-record the group used this pass
1178 ac->ac_last_group = osb->la_last_gd;
1180 /* we used the generic suballoc reserve function, but we set
1181 * everything up nicely, so there's no reason why we can't use
1182 * the more specific cluster api to claim bits. */
1183 status = ocfs2_claim_clusters(handle, ac, osb->local_alloc_bits,
1184 &cluster_off, &cluster_count);
1185 if (status == -ENOSPC) {
1188 * Note: We could also try syncing the journal here to
1189 * allow use of any free bits which the current
1190 * transaction can't give us access to. --Mark
1192 if (ocfs2_recalc_la_window(osb, OCFS2_LA_EVENT_FRAGMENTED) ==
1196 ac->ac_bits_wanted = osb->local_alloc_bits;
1197 status = ocfs2_claim_clusters(handle, ac,
1198 osb->local_alloc_bits,
1201 if (status == -ENOSPC)
1204 * We only shrunk the *minimum* number of in our
1205 * request - it's entirely possible that the allocator
1206 * might give us more than we asked for.
1209 spin_lock(&osb->osb_lock);
1210 osb->local_alloc_bits = cluster_count;
1211 spin_unlock(&osb->osb_lock);
1215 if (status != -ENOSPC)
1220 osb->la_last_gd = ac->ac_last_group;
1222 la->la_bm_off = cpu_to_le32(cluster_off);
1223 alloc->id1.bitmap1.i_total = cpu_to_le32(cluster_count);
1224 /* just in case... In the future when we find space ourselves,
1225 * we don't have to get all contiguous -- but we'll have to
1226 * set all previously used bits in bitmap and update
1227 * la_bits_set before setting the bits in the main bitmap. */
1228 alloc->id1.bitmap1.i_used = 0;
1229 memset(OCFS2_LOCAL_ALLOC(alloc)->la_bitmap, 0,
1230 le16_to_cpu(la->la_size));
1232 ocfs2_resmap_restart(&osb->osb_la_resmap, cluster_count,
1233 OCFS2_LOCAL_ALLOC(alloc)->la_bitmap);
1235 trace_ocfs2_local_alloc_new_window_result(
1236 OCFS2_LOCAL_ALLOC(alloc)->la_bm_off,
1237 le32_to_cpu(alloc->id1.bitmap1.i_total));
1245 /* Note that we do *NOT* lock the local alloc inode here as
1246 * it's been locked already for us. */
1247 static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
1248 struct inode *local_alloc_inode)
1251 struct buffer_head *main_bm_bh = NULL;
1252 struct inode *main_bm_inode = NULL;
1253 handle_t *handle = NULL;
1254 struct ocfs2_dinode *alloc;
1255 struct ocfs2_dinode *alloc_copy = NULL;
1256 struct ocfs2_alloc_context *ac = NULL;
1258 ocfs2_recalc_la_window(osb, OCFS2_LA_EVENT_SLIDE);
1260 /* This will lock the main bitmap for us. */
1261 status = ocfs2_local_alloc_reserve_for_window(osb,
1266 if (status != -ENOSPC)
1271 handle = ocfs2_start_trans(osb, OCFS2_WINDOW_MOVE_CREDITS);
1272 if (IS_ERR(handle)) {
1273 status = PTR_ERR(handle);
1279 alloc = (struct ocfs2_dinode *) osb->local_alloc_bh->b_data;
1281 /* We want to clear the local alloc before doing anything
1282 * else, so that if we error later during this operation,
1283 * local alloc shutdown won't try to double free main bitmap
1284 * bits. Make a copy so the sync function knows which bits to
1286 alloc_copy = kmalloc(osb->local_alloc_bh->b_size, GFP_NOFS);
1292 memcpy(alloc_copy, alloc, osb->local_alloc_bh->b_size);
1294 status = ocfs2_journal_access_di(handle,
1295 INODE_CACHE(local_alloc_inode),
1296 osb->local_alloc_bh,
1297 OCFS2_JOURNAL_ACCESS_WRITE);
1303 ocfs2_clear_local_alloc(alloc);
1304 ocfs2_journal_dirty(handle, osb->local_alloc_bh);
1306 status = ocfs2_sync_local_to_main(osb, handle, alloc_copy,
1307 main_bm_inode, main_bm_bh);
1313 status = ocfs2_local_alloc_new_window(osb, handle, ac);
1315 if (status != -ENOSPC)
1320 atomic_inc(&osb->alloc_stats.moves);
1324 ocfs2_commit_trans(osb, handle);
1328 iput(main_bm_inode);
1332 ocfs2_free_alloc_context(ac);