ext4: set goal start correctly in ext4_mb_normalize_request
[linux-2.6-microblaze.git] / fs / ext4 / mballoc.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com
4  * Written by Alex Tomas <alex@clusterfs.com>
5  */
6
7
8 /*
9  * mballoc.c contains the multiblocks allocation routines
10  */
11
12 #include "ext4_jbd2.h"
13 #include "mballoc.h"
14 #include <linux/log2.h>
15 #include <linux/module.h>
16 #include <linux/slab.h>
17 #include <linux/nospec.h>
18 #include <linux/backing-dev.h>
19 #include <trace/events/ext4.h>
20
21 /*
22  * MUSTDO:
23  *   - test ext4_ext_search_left() and ext4_ext_search_right()
24  *   - search for metadata in few groups
25  *
26  * TODO v4:
27  *   - normalization should take into account whether file is still open
28  *   - discard preallocations if no free space left (policy?)
29  *   - don't normalize tails
30  *   - quota
31  *   - reservation for superuser
32  *
33  * TODO v3:
34  *   - bitmap read-ahead (proposed by Oleg Drokin aka green)
35  *   - track min/max extents in each group for better group selection
36  *   - mb_mark_used() may allocate chunk right after splitting buddy
37  *   - tree of groups sorted by number of free blocks
38  *   - error handling
39  */
40
41 /*
42  * The allocation request involve request for multiple number of blocks
43  * near to the goal(block) value specified.
44  *
45  * During initialization phase of the allocator we decide to use the
46  * group preallocation or inode preallocation depending on the size of
47  * the file. The size of the file could be the resulting file size we
48  * would have after allocation, or the current file size, which ever
49  * is larger. If the size is less than sbi->s_mb_stream_request we
50  * select to use the group preallocation. The default value of
51  * s_mb_stream_request is 16 blocks. This can also be tuned via
52  * /sys/fs/ext4/<partition>/mb_stream_req. The value is represented in
53  * terms of number of blocks.
54  *
55  * The main motivation for having small file use group preallocation is to
56  * ensure that we have small files closer together on the disk.
57  *
58  * First stage the allocator looks at the inode prealloc list,
59  * ext4_inode_info->i_prealloc_list, which contains list of prealloc
60  * spaces for this particular inode. The inode prealloc space is
61  * represented as:
62  *
63  * pa_lstart -> the logical start block for this prealloc space
64  * pa_pstart -> the physical start block for this prealloc space
65  * pa_len    -> length for this prealloc space (in clusters)
66  * pa_free   ->  free space available in this prealloc space (in clusters)
67  *
68  * The inode preallocation space is used looking at the _logical_ start
69  * block. If only the logical file block falls within the range of prealloc
70  * space we will consume the particular prealloc space. This makes sure that
71  * we have contiguous physical blocks representing the file blocks
72  *
73  * The important thing to be noted in case of inode prealloc space is that
74  * we don't modify the values associated to inode prealloc space except
75  * pa_free.
76  *
77  * If we are not able to find blocks in the inode prealloc space and if we
78  * have the group allocation flag set then we look at the locality group
79  * prealloc space. These are per CPU prealloc list represented as
80  *
81  * ext4_sb_info.s_locality_groups[smp_processor_id()]
82  *
83  * The reason for having a per cpu locality group is to reduce the contention
84  * between CPUs. It is possible to get scheduled at this point.
85  *
86  * The locality group prealloc space is used looking at whether we have
87  * enough free space (pa_free) within the prealloc space.
88  *
89  * If we can't allocate blocks via inode prealloc or/and locality group
90  * prealloc then we look at the buddy cache. The buddy cache is represented
91  * by ext4_sb_info.s_buddy_cache (struct inode) whose file offset gets
92  * mapped to the buddy and bitmap information regarding different
93  * groups. The buddy information is attached to buddy cache inode so that
94  * we can access them through the page cache. The information regarding
95  * each group is loaded via ext4_mb_load_buddy.  The information involve
96  * block bitmap and buddy information. The information are stored in the
97  * inode as:
98  *
99  *  {                        page                        }
100  *  [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]...
101  *
102  *
103  * one block each for bitmap and buddy information.  So for each group we
104  * take up 2 blocks. A page can contain blocks_per_page (PAGE_SIZE /
105  * blocksize) blocks.  So it can have information regarding groups_per_page
106  * which is blocks_per_page/2
107  *
108  * The buddy cache inode is not stored on disk. The inode is thrown
109  * away when the filesystem is unmounted.
110  *
111  * We look for count number of blocks in the buddy cache. If we were able
112  * to locate that many free blocks we return with additional information
113  * regarding rest of the contiguous physical block available
114  *
115  * Before allocating blocks via buddy cache we normalize the request
116  * blocks. This ensure we ask for more blocks that we needed. The extra
117  * blocks that we get after allocation is added to the respective prealloc
118  * list. In case of inode preallocation we follow a list of heuristics
119  * based on file size. This can be found in ext4_mb_normalize_request. If
120  * we are doing a group prealloc we try to normalize the request to
121  * sbi->s_mb_group_prealloc.  The default value of s_mb_group_prealloc is
122  * dependent on the cluster size; for non-bigalloc file systems, it is
123  * 512 blocks. This can be tuned via
124  * /sys/fs/ext4/<partition>/mb_group_prealloc. The value is represented in
125  * terms of number of blocks. If we have mounted the file system with -O
126  * stripe=<value> option the group prealloc request is normalized to the
127  * smallest multiple of the stripe value (sbi->s_stripe) which is
128  * greater than the default mb_group_prealloc.
129  *
130  * If "mb_optimize_scan" mount option is set, we maintain in memory group info
131  * structures in two data structures:
132  *
133  * 1) Array of largest free order lists (sbi->s_mb_largest_free_orders)
134  *
135  *    Locking: sbi->s_mb_largest_free_orders_locks(array of rw locks)
136  *
137  *    This is an array of lists where the index in the array represents the
138  *    largest free order in the buddy bitmap of the participating group infos of
139  *    that list. So, there are exactly MB_NUM_ORDERS(sb) (which means total
140  *    number of buddy bitmap orders possible) number of lists. Group-infos are
141  *    placed in appropriate lists.
142  *
143  * 2) Average fragment size lists (sbi->s_mb_avg_fragment_size)
144  *
145  *    Locking: sbi->s_mb_avg_fragment_size_locks(array of rw locks)
146  *
147  *    This is an array of lists where in the i-th list there are groups with
148  *    average fragment size >= 2^i and < 2^(i+1). The average fragment size
149  *    is computed as ext4_group_info->bb_free / ext4_group_info->bb_fragments.
150  *    Note that we don't bother with a special list for completely empty groups
151  *    so we only have MB_NUM_ORDERS(sb) lists.
152  *
153  * When "mb_optimize_scan" mount option is set, mballoc consults the above data
154  * structures to decide the order in which groups are to be traversed for
155  * fulfilling an allocation request.
156  *
157  * At CR = 0, we look for groups which have the largest_free_order >= the order
158  * of the request. We directly look at the largest free order list in the data
159  * structure (1) above where largest_free_order = order of the request. If that
160  * list is empty, we look at remaining list in the increasing order of
161  * largest_free_order. This allows us to perform CR = 0 lookup in O(1) time.
162  *
163  * At CR = 1, we only consider groups where average fragment size > request
164  * size. So, we lookup a group which has average fragment size just above or
165  * equal to request size using our average fragment size group lists (data
166  * structure 2) in O(1) time.
167  *
168  * If "mb_optimize_scan" mount option is not set, mballoc traverses groups in
169  * linear order which requires O(N) search time for each CR 0 and CR 1 phase.
170  *
171  * The regular allocator (using the buddy cache) supports a few tunables.
172  *
173  * /sys/fs/ext4/<partition>/mb_min_to_scan
174  * /sys/fs/ext4/<partition>/mb_max_to_scan
175  * /sys/fs/ext4/<partition>/mb_order2_req
176  * /sys/fs/ext4/<partition>/mb_linear_limit
177  *
178  * The regular allocator uses buddy scan only if the request len is power of
179  * 2 blocks and the order of allocation is >= sbi->s_mb_order2_reqs. The
180  * value of s_mb_order2_reqs can be tuned via
181  * /sys/fs/ext4/<partition>/mb_order2_req.  If the request len is equal to
182  * stripe size (sbi->s_stripe), we try to search for contiguous block in
183  * stripe size. This should result in better allocation on RAID setups. If
184  * not, we search in the specific group using bitmap for best extents. The
185  * tunable min_to_scan and max_to_scan control the behaviour here.
186  * min_to_scan indicate how long the mballoc __must__ look for a best
187  * extent and max_to_scan indicates how long the mballoc __can__ look for a
188  * best extent in the found extents. Searching for the blocks starts with
189  * the group specified as the goal value in allocation context via
190  * ac_g_ex. Each group is first checked based on the criteria whether it
191  * can be used for allocation. ext4_mb_good_group explains how the groups are
192  * checked.
193  *
194  * When "mb_optimize_scan" is turned on, as mentioned above, the groups may not
195  * get traversed linearly. That may result in subsequent allocations being not
196  * close to each other. And so, the underlying device may get filled up in a
197  * non-linear fashion. While that may not matter on non-rotational devices, for
198  * rotational devices that may result in higher seek times. "mb_linear_limit"
199  * tells mballoc how many groups mballoc should search linearly before
200  * performing consulting above data structures for more efficient lookups. For
201  * non rotational devices, this value defaults to 0 and for rotational devices
202  * this is set to MB_DEFAULT_LINEAR_LIMIT.
203  *
204  * Both the prealloc space are getting populated as above. So for the first
205  * request we will hit the buddy cache which will result in this prealloc
206  * space getting filled. The prealloc space is then later used for the
207  * subsequent request.
208  */
209
210 /*
211  * mballoc operates on the following data:
212  *  - on-disk bitmap
213  *  - in-core buddy (actually includes buddy and bitmap)
214  *  - preallocation descriptors (PAs)
215  *
216  * there are two types of preallocations:
217  *  - inode
218  *    assiged to specific inode and can be used for this inode only.
219  *    it describes part of inode's space preallocated to specific
220  *    physical blocks. any block from that preallocated can be used
221  *    independent. the descriptor just tracks number of blocks left
222  *    unused. so, before taking some block from descriptor, one must
223  *    make sure corresponded logical block isn't allocated yet. this
224  *    also means that freeing any block within descriptor's range
225  *    must discard all preallocated blocks.
226  *  - locality group
227  *    assigned to specific locality group which does not translate to
228  *    permanent set of inodes: inode can join and leave group. space
229  *    from this type of preallocation can be used for any inode. thus
230  *    it's consumed from the beginning to the end.
231  *
232  * relation between them can be expressed as:
233  *    in-core buddy = on-disk bitmap + preallocation descriptors
234  *
235  * this mean blocks mballoc considers used are:
236  *  - allocated blocks (persistent)
237  *  - preallocated blocks (non-persistent)
238  *
239  * consistency in mballoc world means that at any time a block is either
240  * free or used in ALL structures. notice: "any time" should not be read
241  * literally -- time is discrete and delimited by locks.
242  *
243  *  to keep it simple, we don't use block numbers, instead we count number of
244  *  blocks: how many blocks marked used/free in on-disk bitmap, buddy and PA.
245  *
246  * all operations can be expressed as:
247  *  - init buddy:                       buddy = on-disk + PAs
248  *  - new PA:                           buddy += N; PA = N
249  *  - use inode PA:                     on-disk += N; PA -= N
250  *  - discard inode PA                  buddy -= on-disk - PA; PA = 0
251  *  - use locality group PA             on-disk += N; PA -= N
252  *  - discard locality group PA         buddy -= PA; PA = 0
253  *  note: 'buddy -= on-disk - PA' is used to show that on-disk bitmap
254  *        is used in real operation because we can't know actual used
255  *        bits from PA, only from on-disk bitmap
256  *
257  * if we follow this strict logic, then all operations above should be atomic.
258  * given some of them can block, we'd have to use something like semaphores
259  * killing performance on high-end SMP hardware. let's try to relax it using
260  * the following knowledge:
261  *  1) if buddy is referenced, it's already initialized
262  *  2) while block is used in buddy and the buddy is referenced,
263  *     nobody can re-allocate that block
264  *  3) we work on bitmaps and '+' actually means 'set bits'. if on-disk has
265  *     bit set and PA claims same block, it's OK. IOW, one can set bit in
266  *     on-disk bitmap if buddy has same bit set or/and PA covers corresponded
267  *     block
268  *
269  * so, now we're building a concurrency table:
270  *  - init buddy vs.
271  *    - new PA
272  *      blocks for PA are allocated in the buddy, buddy must be referenced
273  *      until PA is linked to allocation group to avoid concurrent buddy init
274  *    - use inode PA
275  *      we need to make sure that either on-disk bitmap or PA has uptodate data
276  *      given (3) we care that PA-=N operation doesn't interfere with init
277  *    - discard inode PA
278  *      the simplest way would be to have buddy initialized by the discard
279  *    - use locality group PA
280  *      again PA-=N must be serialized with init
281  *    - discard locality group PA
282  *      the simplest way would be to have buddy initialized by the discard
283  *  - new PA vs.
284  *    - use inode PA
285  *      i_data_sem serializes them
286  *    - discard inode PA
287  *      discard process must wait until PA isn't used by another process
288  *    - use locality group PA
289  *      some mutex should serialize them
290  *    - discard locality group PA
291  *      discard process must wait until PA isn't used by another process
292  *  - use inode PA
293  *    - use inode PA
294  *      i_data_sem or another mutex should serializes them
295  *    - discard inode PA
296  *      discard process must wait until PA isn't used by another process
297  *    - use locality group PA
298  *      nothing wrong here -- they're different PAs covering different blocks
299  *    - discard locality group PA
300  *      discard process must wait until PA isn't used by another process
301  *
302  * now we're ready to make few consequences:
303  *  - PA is referenced and while it is no discard is possible
304  *  - PA is referenced until block isn't marked in on-disk bitmap
305  *  - PA changes only after on-disk bitmap
306  *  - discard must not compete with init. either init is done before
307  *    any discard or they're serialized somehow
308  *  - buddy init as sum of on-disk bitmap and PAs is done atomically
309  *
310  * a special case when we've used PA to emptiness. no need to modify buddy
311  * in this case, but we should care about concurrent init
312  *
313  */
314
315  /*
316  * Logic in few words:
317  *
318  *  - allocation:
319  *    load group
320  *    find blocks
321  *    mark bits in on-disk bitmap
322  *    release group
323  *
324  *  - use preallocation:
325  *    find proper PA (per-inode or group)
326  *    load group
327  *    mark bits in on-disk bitmap
328  *    release group
329  *    release PA
330  *
331  *  - free:
332  *    load group
333  *    mark bits in on-disk bitmap
334  *    release group
335  *
336  *  - discard preallocations in group:
337  *    mark PAs deleted
338  *    move them onto local list
339  *    load on-disk bitmap
340  *    load group
341  *    remove PA from object (inode or locality group)
342  *    mark free blocks in-core
343  *
344  *  - discard inode's preallocations:
345  */
346
347 /*
348  * Locking rules
349  *
350  * Locks:
351  *  - bitlock on a group        (group)
352  *  - object (inode/locality)   (object)
353  *  - per-pa lock               (pa)
354  *  - cr0 lists lock            (cr0)
355  *  - cr1 tree lock             (cr1)
356  *
357  * Paths:
358  *  - new pa
359  *    object
360  *    group
361  *
362  *  - find and use pa:
363  *    pa
364  *
365  *  - release consumed pa:
366  *    pa
367  *    group
368  *    object
369  *
370  *  - generate in-core bitmap:
371  *    group
372  *        pa
373  *
374  *  - discard all for given object (inode, locality group):
375  *    object
376  *        pa
377  *    group
378  *
379  *  - discard all for given group:
380  *    group
381  *        pa
382  *    group
383  *        object
384  *
385  *  - allocation path (ext4_mb_regular_allocator)
386  *    group
387  *    cr0/cr1
388  */
389 static struct kmem_cache *ext4_pspace_cachep;
390 static struct kmem_cache *ext4_ac_cachep;
391 static struct kmem_cache *ext4_free_data_cachep;
392
393 /* We create slab caches for groupinfo data structures based on the
394  * superblock block size.  There will be one per mounted filesystem for
395  * each unique s_blocksize_bits */
396 #define NR_GRPINFO_CACHES 8
397 static struct kmem_cache *ext4_groupinfo_caches[NR_GRPINFO_CACHES];
398
399 static const char * const ext4_groupinfo_slab_names[NR_GRPINFO_CACHES] = {
400         "ext4_groupinfo_1k", "ext4_groupinfo_2k", "ext4_groupinfo_4k",
401         "ext4_groupinfo_8k", "ext4_groupinfo_16k", "ext4_groupinfo_32k",
402         "ext4_groupinfo_64k", "ext4_groupinfo_128k"
403 };
404
405 static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
406                                         ext4_group_t group);
407 static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap,
408                                                 ext4_group_t group);
409 static void ext4_mb_new_preallocation(struct ext4_allocation_context *ac);
410
411 static bool ext4_mb_good_group(struct ext4_allocation_context *ac,
412                                ext4_group_t group, int cr);
413
414 static int ext4_try_to_trim_range(struct super_block *sb,
415                 struct ext4_buddy *e4b, ext4_grpblk_t start,
416                 ext4_grpblk_t max, ext4_grpblk_t minblocks);
417
418 /*
419  * The algorithm using this percpu seq counter goes below:
420  * 1. We sample the percpu discard_pa_seq counter before trying for block
421  *    allocation in ext4_mb_new_blocks().
422  * 2. We increment this percpu discard_pa_seq counter when we either allocate
423  *    or free these blocks i.e. while marking those blocks as used/free in
424  *    mb_mark_used()/mb_free_blocks().
425  * 3. We also increment this percpu seq counter when we successfully identify
426  *    that the bb_prealloc_list is not empty and hence proceed for discarding
427  *    of those PAs inside ext4_mb_discard_group_preallocations().
428  *
429  * Now to make sure that the regular fast path of block allocation is not
430  * affected, as a small optimization we only sample the percpu seq counter
431  * on that cpu. Only when the block allocation fails and when freed blocks
432  * found were 0, that is when we sample percpu seq counter for all cpus using
433  * below function ext4_get_discard_pa_seq_sum(). This happens after making
434  * sure that all the PAs on grp->bb_prealloc_list got freed or if it's empty.
435  */
436 static DEFINE_PER_CPU(u64, discard_pa_seq);
437 static inline u64 ext4_get_discard_pa_seq_sum(void)
438 {
439         int __cpu;
440         u64 __seq = 0;
441
442         for_each_possible_cpu(__cpu)
443                 __seq += per_cpu(discard_pa_seq, __cpu);
444         return __seq;
445 }
446
447 static inline void *mb_correct_addr_and_bit(int *bit, void *addr)
448 {
449 #if BITS_PER_LONG == 64
450         *bit += ((unsigned long) addr & 7UL) << 3;
451         addr = (void *) ((unsigned long) addr & ~7UL);
452 #elif BITS_PER_LONG == 32
453         *bit += ((unsigned long) addr & 3UL) << 3;
454         addr = (void *) ((unsigned long) addr & ~3UL);
455 #else
456 #error "how many bits you are?!"
457 #endif
458         return addr;
459 }
460
461 static inline int mb_test_bit(int bit, void *addr)
462 {
463         /*
464          * ext4_test_bit on architecture like powerpc
465          * needs unsigned long aligned address
466          */
467         addr = mb_correct_addr_and_bit(&bit, addr);
468         return ext4_test_bit(bit, addr);
469 }
470
471 static inline void mb_set_bit(int bit, void *addr)
472 {
473         addr = mb_correct_addr_and_bit(&bit, addr);
474         ext4_set_bit(bit, addr);
475 }
476
477 static inline void mb_clear_bit(int bit, void *addr)
478 {
479         addr = mb_correct_addr_and_bit(&bit, addr);
480         ext4_clear_bit(bit, addr);
481 }
482
483 static inline int mb_test_and_clear_bit(int bit, void *addr)
484 {
485         addr = mb_correct_addr_and_bit(&bit, addr);
486         return ext4_test_and_clear_bit(bit, addr);
487 }
488
489 static inline int mb_find_next_zero_bit(void *addr, int max, int start)
490 {
491         int fix = 0, ret, tmpmax;
492         addr = mb_correct_addr_and_bit(&fix, addr);
493         tmpmax = max + fix;
494         start += fix;
495
496         ret = ext4_find_next_zero_bit(addr, tmpmax, start) - fix;
497         if (ret > max)
498                 return max;
499         return ret;
500 }
501
502 static inline int mb_find_next_bit(void *addr, int max, int start)
503 {
504         int fix = 0, ret, tmpmax;
505         addr = mb_correct_addr_and_bit(&fix, addr);
506         tmpmax = max + fix;
507         start += fix;
508
509         ret = ext4_find_next_bit(addr, tmpmax, start) - fix;
510         if (ret > max)
511                 return max;
512         return ret;
513 }
514
515 static void *mb_find_buddy(struct ext4_buddy *e4b, int order, int *max)
516 {
517         char *bb;
518
519         BUG_ON(e4b->bd_bitmap == e4b->bd_buddy);
520         BUG_ON(max == NULL);
521
522         if (order > e4b->bd_blkbits + 1) {
523                 *max = 0;
524                 return NULL;
525         }
526
527         /* at order 0 we see each particular block */
528         if (order == 0) {
529                 *max = 1 << (e4b->bd_blkbits + 3);
530                 return e4b->bd_bitmap;
531         }
532
533         bb = e4b->bd_buddy + EXT4_SB(e4b->bd_sb)->s_mb_offsets[order];
534         *max = EXT4_SB(e4b->bd_sb)->s_mb_maxs[order];
535
536         return bb;
537 }
538
539 #ifdef DOUBLE_CHECK
540 static void mb_free_blocks_double(struct inode *inode, struct ext4_buddy *e4b,
541                            int first, int count)
542 {
543         int i;
544         struct super_block *sb = e4b->bd_sb;
545
546         if (unlikely(e4b->bd_info->bb_bitmap == NULL))
547                 return;
548         assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group));
549         for (i = 0; i < count; i++) {
550                 if (!mb_test_bit(first + i, e4b->bd_info->bb_bitmap)) {
551                         ext4_fsblk_t blocknr;
552
553                         blocknr = ext4_group_first_block_no(sb, e4b->bd_group);
554                         blocknr += EXT4_C2B(EXT4_SB(sb), first + i);
555                         ext4_grp_locked_error(sb, e4b->bd_group,
556                                               inode ? inode->i_ino : 0,
557                                               blocknr,
558                                               "freeing block already freed "
559                                               "(bit %u)",
560                                               first + i);
561                         ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group,
562                                         EXT4_GROUP_INFO_BBITMAP_CORRUPT);
563                 }
564                 mb_clear_bit(first + i, e4b->bd_info->bb_bitmap);
565         }
566 }
567
568 static void mb_mark_used_double(struct ext4_buddy *e4b, int first, int count)
569 {
570         int i;
571
572         if (unlikely(e4b->bd_info->bb_bitmap == NULL))
573                 return;
574         assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group));
575         for (i = 0; i < count; i++) {
576                 BUG_ON(mb_test_bit(first + i, e4b->bd_info->bb_bitmap));
577                 mb_set_bit(first + i, e4b->bd_info->bb_bitmap);
578         }
579 }
580
581 static void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap)
582 {
583         if (unlikely(e4b->bd_info->bb_bitmap == NULL))
584                 return;
585         if (memcmp(e4b->bd_info->bb_bitmap, bitmap, e4b->bd_sb->s_blocksize)) {
586                 unsigned char *b1, *b2;
587                 int i;
588                 b1 = (unsigned char *) e4b->bd_info->bb_bitmap;
589                 b2 = (unsigned char *) bitmap;
590                 for (i = 0; i < e4b->bd_sb->s_blocksize; i++) {
591                         if (b1[i] != b2[i]) {
592                                 ext4_msg(e4b->bd_sb, KERN_ERR,
593                                          "corruption in group %u "
594                                          "at byte %u(%u): %x in copy != %x "
595                                          "on disk/prealloc",
596                                          e4b->bd_group, i, i * 8, b1[i], b2[i]);
597                                 BUG();
598                         }
599                 }
600         }
601 }
602
603 static void mb_group_bb_bitmap_alloc(struct super_block *sb,
604                         struct ext4_group_info *grp, ext4_group_t group)
605 {
606         struct buffer_head *bh;
607
608         grp->bb_bitmap = kmalloc(sb->s_blocksize, GFP_NOFS);
609         if (!grp->bb_bitmap)
610                 return;
611
612         bh = ext4_read_block_bitmap(sb, group);
613         if (IS_ERR_OR_NULL(bh)) {
614                 kfree(grp->bb_bitmap);
615                 grp->bb_bitmap = NULL;
616                 return;
617         }
618
619         memcpy(grp->bb_bitmap, bh->b_data, sb->s_blocksize);
620         put_bh(bh);
621 }
622
623 static void mb_group_bb_bitmap_free(struct ext4_group_info *grp)
624 {
625         kfree(grp->bb_bitmap);
626 }
627
628 #else
629 static inline void mb_free_blocks_double(struct inode *inode,
630                                 struct ext4_buddy *e4b, int first, int count)
631 {
632         return;
633 }
634 static inline void mb_mark_used_double(struct ext4_buddy *e4b,
635                                                 int first, int count)
636 {
637         return;
638 }
639 static inline void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap)
640 {
641         return;
642 }
643
644 static inline void mb_group_bb_bitmap_alloc(struct super_block *sb,
645                         struct ext4_group_info *grp, ext4_group_t group)
646 {
647         return;
648 }
649
650 static inline void mb_group_bb_bitmap_free(struct ext4_group_info *grp)
651 {
652         return;
653 }
654 #endif
655
656 #ifdef AGGRESSIVE_CHECK
657
658 #define MB_CHECK_ASSERT(assert)                                         \
659 do {                                                                    \
660         if (!(assert)) {                                                \
661                 printk(KERN_EMERG                                       \
662                         "Assertion failure in %s() at %s:%d: \"%s\"\n", \
663                         function, file, line, # assert);                \
664                 BUG();                                                  \
665         }                                                               \
666 } while (0)
667
668 static int __mb_check_buddy(struct ext4_buddy *e4b, char *file,
669                                 const char *function, int line)
670 {
671         struct super_block *sb = e4b->bd_sb;
672         int order = e4b->bd_blkbits + 1;
673         int max;
674         int max2;
675         int i;
676         int j;
677         int k;
678         int count;
679         struct ext4_group_info *grp;
680         int fragments = 0;
681         int fstart;
682         struct list_head *cur;
683         void *buddy;
684         void *buddy2;
685
686         if (e4b->bd_info->bb_check_counter++ % 10)
687                 return 0;
688
689         while (order > 1) {
690                 buddy = mb_find_buddy(e4b, order, &max);
691                 MB_CHECK_ASSERT(buddy);
692                 buddy2 = mb_find_buddy(e4b, order - 1, &max2);
693                 MB_CHECK_ASSERT(buddy2);
694                 MB_CHECK_ASSERT(buddy != buddy2);
695                 MB_CHECK_ASSERT(max * 2 == max2);
696
697                 count = 0;
698                 for (i = 0; i < max; i++) {
699
700                         if (mb_test_bit(i, buddy)) {
701                                 /* only single bit in buddy2 may be 0 */
702                                 if (!mb_test_bit(i << 1, buddy2)) {
703                                         MB_CHECK_ASSERT(
704                                                 mb_test_bit((i<<1)+1, buddy2));
705                                 }
706                                 continue;
707                         }
708
709                         /* both bits in buddy2 must be 1 */
710                         MB_CHECK_ASSERT(mb_test_bit(i << 1, buddy2));
711                         MB_CHECK_ASSERT(mb_test_bit((i << 1) + 1, buddy2));
712
713                         for (j = 0; j < (1 << order); j++) {
714                                 k = (i * (1 << order)) + j;
715                                 MB_CHECK_ASSERT(
716                                         !mb_test_bit(k, e4b->bd_bitmap));
717                         }
718                         count++;
719                 }
720                 MB_CHECK_ASSERT(e4b->bd_info->bb_counters[order] == count);
721                 order--;
722         }
723
724         fstart = -1;
725         buddy = mb_find_buddy(e4b, 0, &max);
726         for (i = 0; i < max; i++) {
727                 if (!mb_test_bit(i, buddy)) {
728                         MB_CHECK_ASSERT(i >= e4b->bd_info->bb_first_free);
729                         if (fstart == -1) {
730                                 fragments++;
731                                 fstart = i;
732                         }
733                         continue;
734                 }
735                 fstart = -1;
736                 /* check used bits only */
737                 for (j = 0; j < e4b->bd_blkbits + 1; j++) {
738                         buddy2 = mb_find_buddy(e4b, j, &max2);
739                         k = i >> j;
740                         MB_CHECK_ASSERT(k < max2);
741                         MB_CHECK_ASSERT(mb_test_bit(k, buddy2));
742                 }
743         }
744         MB_CHECK_ASSERT(!EXT4_MB_GRP_NEED_INIT(e4b->bd_info));
745         MB_CHECK_ASSERT(e4b->bd_info->bb_fragments == fragments);
746
747         grp = ext4_get_group_info(sb, e4b->bd_group);
748         list_for_each(cur, &grp->bb_prealloc_list) {
749                 ext4_group_t groupnr;
750                 struct ext4_prealloc_space *pa;
751                 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
752                 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &groupnr, &k);
753                 MB_CHECK_ASSERT(groupnr == e4b->bd_group);
754                 for (i = 0; i < pa->pa_len; i++)
755                         MB_CHECK_ASSERT(mb_test_bit(k + i, buddy));
756         }
757         return 0;
758 }
759 #undef MB_CHECK_ASSERT
760 #define mb_check_buddy(e4b) __mb_check_buddy(e4b,       \
761                                         __FILE__, __func__, __LINE__)
762 #else
763 #define mb_check_buddy(e4b)
764 #endif
765
766 /*
767  * Divide blocks started from @first with length @len into
768  * smaller chunks with power of 2 blocks.
769  * Clear the bits in bitmap which the blocks of the chunk(s) covered,
770  * then increase bb_counters[] for corresponded chunk size.
771  */
772 static void ext4_mb_mark_free_simple(struct super_block *sb,
773                                 void *buddy, ext4_grpblk_t first, ext4_grpblk_t len,
774                                         struct ext4_group_info *grp)
775 {
776         struct ext4_sb_info *sbi = EXT4_SB(sb);
777         ext4_grpblk_t min;
778         ext4_grpblk_t max;
779         ext4_grpblk_t chunk;
780         unsigned int border;
781
782         BUG_ON(len > EXT4_CLUSTERS_PER_GROUP(sb));
783
784         border = 2 << sb->s_blocksize_bits;
785
786         while (len > 0) {
787                 /* find how many blocks can be covered since this position */
788                 max = ffs(first | border) - 1;
789
790                 /* find how many blocks of power 2 we need to mark */
791                 min = fls(len) - 1;
792
793                 if (max < min)
794                         min = max;
795                 chunk = 1 << min;
796
797                 /* mark multiblock chunks only */
798                 grp->bb_counters[min]++;
799                 if (min > 0)
800                         mb_clear_bit(first >> min,
801                                      buddy + sbi->s_mb_offsets[min]);
802
803                 len -= chunk;
804                 first += chunk;
805         }
806 }
807
808 static int mb_avg_fragment_size_order(struct super_block *sb, ext4_grpblk_t len)
809 {
810         int order;
811
812         /*
813          * We don't bother with a special lists groups with only 1 block free
814          * extents and for completely empty groups.
815          */
816         order = fls(len) - 2;
817         if (order < 0)
818                 return 0;
819         if (order == MB_NUM_ORDERS(sb))
820                 order--;
821         return order;
822 }
823
824 /* Move group to appropriate avg_fragment_size list */
825 static void
826 mb_update_avg_fragment_size(struct super_block *sb, struct ext4_group_info *grp)
827 {
828         struct ext4_sb_info *sbi = EXT4_SB(sb);
829         int new_order;
830
831         if (!test_opt2(sb, MB_OPTIMIZE_SCAN) || grp->bb_free == 0)
832                 return;
833
834         new_order = mb_avg_fragment_size_order(sb,
835                                         grp->bb_free / grp->bb_fragments);
836         if (new_order == grp->bb_avg_fragment_size_order)
837                 return;
838
839         if (grp->bb_avg_fragment_size_order != -1) {
840                 write_lock(&sbi->s_mb_avg_fragment_size_locks[
841                                         grp->bb_avg_fragment_size_order]);
842                 list_del(&grp->bb_avg_fragment_size_node);
843                 write_unlock(&sbi->s_mb_avg_fragment_size_locks[
844                                         grp->bb_avg_fragment_size_order]);
845         }
846         grp->bb_avg_fragment_size_order = new_order;
847         write_lock(&sbi->s_mb_avg_fragment_size_locks[
848                                         grp->bb_avg_fragment_size_order]);
849         list_add_tail(&grp->bb_avg_fragment_size_node,
850                 &sbi->s_mb_avg_fragment_size[grp->bb_avg_fragment_size_order]);
851         write_unlock(&sbi->s_mb_avg_fragment_size_locks[
852                                         grp->bb_avg_fragment_size_order]);
853 }
854
855 /*
856  * Choose next group by traversing largest_free_order lists. Updates *new_cr if
857  * cr level needs an update.
858  */
859 static void ext4_mb_choose_next_group_cr0(struct ext4_allocation_context *ac,
860                         int *new_cr, ext4_group_t *group, ext4_group_t ngroups)
861 {
862         struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
863         struct ext4_group_info *iter, *grp;
864         int i;
865
866         if (ac->ac_status == AC_STATUS_FOUND)
867                 return;
868
869         if (unlikely(sbi->s_mb_stats && ac->ac_flags & EXT4_MB_CR0_OPTIMIZED))
870                 atomic_inc(&sbi->s_bal_cr0_bad_suggestions);
871
872         grp = NULL;
873         for (i = ac->ac_2order; i < MB_NUM_ORDERS(ac->ac_sb); i++) {
874                 if (list_empty(&sbi->s_mb_largest_free_orders[i]))
875                         continue;
876                 read_lock(&sbi->s_mb_largest_free_orders_locks[i]);
877                 if (list_empty(&sbi->s_mb_largest_free_orders[i])) {
878                         read_unlock(&sbi->s_mb_largest_free_orders_locks[i]);
879                         continue;
880                 }
881                 grp = NULL;
882                 list_for_each_entry(iter, &sbi->s_mb_largest_free_orders[i],
883                                     bb_largest_free_order_node) {
884                         if (sbi->s_mb_stats)
885                                 atomic64_inc(&sbi->s_bal_cX_groups_considered[0]);
886                         if (likely(ext4_mb_good_group(ac, iter->bb_group, 0))) {
887                                 grp = iter;
888                                 break;
889                         }
890                 }
891                 read_unlock(&sbi->s_mb_largest_free_orders_locks[i]);
892                 if (grp)
893                         break;
894         }
895
896         if (!grp) {
897                 /* Increment cr and search again */
898                 *new_cr = 1;
899         } else {
900                 *group = grp->bb_group;
901                 ac->ac_flags |= EXT4_MB_CR0_OPTIMIZED;
902         }
903 }
904
905 /*
906  * Choose next group by traversing average fragment size list of suitable
907  * order. Updates *new_cr if cr level needs an update.
908  */
909 static void ext4_mb_choose_next_group_cr1(struct ext4_allocation_context *ac,
910                 int *new_cr, ext4_group_t *group, ext4_group_t ngroups)
911 {
912         struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
913         struct ext4_group_info *grp = NULL, *iter;
914         int i;
915
916         if (unlikely(ac->ac_flags & EXT4_MB_CR1_OPTIMIZED)) {
917                 if (sbi->s_mb_stats)
918                         atomic_inc(&sbi->s_bal_cr1_bad_suggestions);
919         }
920
921         for (i = mb_avg_fragment_size_order(ac->ac_sb, ac->ac_g_ex.fe_len);
922              i < MB_NUM_ORDERS(ac->ac_sb); i++) {
923                 if (list_empty(&sbi->s_mb_avg_fragment_size[i]))
924                         continue;
925                 read_lock(&sbi->s_mb_avg_fragment_size_locks[i]);
926                 if (list_empty(&sbi->s_mb_avg_fragment_size[i])) {
927                         read_unlock(&sbi->s_mb_avg_fragment_size_locks[i]);
928                         continue;
929                 }
930                 list_for_each_entry(iter, &sbi->s_mb_avg_fragment_size[i],
931                                     bb_avg_fragment_size_node) {
932                         if (sbi->s_mb_stats)
933                                 atomic64_inc(&sbi->s_bal_cX_groups_considered[1]);
934                         if (likely(ext4_mb_good_group(ac, iter->bb_group, 1))) {
935                                 grp = iter;
936                                 break;
937                         }
938                 }
939                 read_unlock(&sbi->s_mb_avg_fragment_size_locks[i]);
940                 if (grp)
941                         break;
942         }
943
944         if (grp) {
945                 *group = grp->bb_group;
946                 ac->ac_flags |= EXT4_MB_CR1_OPTIMIZED;
947         } else {
948                 *new_cr = 2;
949         }
950 }
951
952 static inline int should_optimize_scan(struct ext4_allocation_context *ac)
953 {
954         if (unlikely(!test_opt2(ac->ac_sb, MB_OPTIMIZE_SCAN)))
955                 return 0;
956         if (ac->ac_criteria >= 2)
957                 return 0;
958         if (!ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS))
959                 return 0;
960         return 1;
961 }
962
963 /*
964  * Return next linear group for allocation. If linear traversal should not be
965  * performed, this function just returns the same group
966  */
967 static int
968 next_linear_group(struct ext4_allocation_context *ac, int group, int ngroups)
969 {
970         if (!should_optimize_scan(ac))
971                 goto inc_and_return;
972
973         if (ac->ac_groups_linear_remaining) {
974                 ac->ac_groups_linear_remaining--;
975                 goto inc_and_return;
976         }
977
978         return group;
979 inc_and_return:
980         /*
981          * Artificially restricted ngroups for non-extent
982          * files makes group > ngroups possible on first loop.
983          */
984         return group + 1 >= ngroups ? 0 : group + 1;
985 }
986
987 /*
988  * ext4_mb_choose_next_group: choose next group for allocation.
989  *
990  * @ac        Allocation Context
991  * @new_cr    This is an output parameter. If the there is no good group
992  *            available at current CR level, this field is updated to indicate
993  *            the new cr level that should be used.
994  * @group     This is an input / output parameter. As an input it indicates the
995  *            next group that the allocator intends to use for allocation. As
996  *            output, this field indicates the next group that should be used as
997  *            determined by the optimization functions.
998  * @ngroups   Total number of groups
999  */
1000 static void ext4_mb_choose_next_group(struct ext4_allocation_context *ac,
1001                 int *new_cr, ext4_group_t *group, ext4_group_t ngroups)
1002 {
1003         *new_cr = ac->ac_criteria;
1004
1005         if (!should_optimize_scan(ac) || ac->ac_groups_linear_remaining) {
1006                 *group = next_linear_group(ac, *group, ngroups);
1007                 return;
1008         }
1009
1010         if (*new_cr == 0) {
1011                 ext4_mb_choose_next_group_cr0(ac, new_cr, group, ngroups);
1012         } else if (*new_cr == 1) {
1013                 ext4_mb_choose_next_group_cr1(ac, new_cr, group, ngroups);
1014         } else {
1015                 /*
1016                  * TODO: For CR=2, we can arrange groups in an rb tree sorted by
1017                  * bb_free. But until that happens, we should never come here.
1018                  */
1019                 WARN_ON(1);
1020         }
1021 }
1022
1023 /*
1024  * Cache the order of the largest free extent we have available in this block
1025  * group.
1026  */
1027 static void
1028 mb_set_largest_free_order(struct super_block *sb, struct ext4_group_info *grp)
1029 {
1030         struct ext4_sb_info *sbi = EXT4_SB(sb);
1031         int i;
1032
1033         for (i = MB_NUM_ORDERS(sb) - 1; i >= 0; i--)
1034                 if (grp->bb_counters[i] > 0)
1035                         break;
1036         /* No need to move between order lists? */
1037         if (!test_opt2(sb, MB_OPTIMIZE_SCAN) ||
1038             i == grp->bb_largest_free_order) {
1039                 grp->bb_largest_free_order = i;
1040                 return;
1041         }
1042
1043         if (grp->bb_largest_free_order >= 0) {
1044                 write_lock(&sbi->s_mb_largest_free_orders_locks[
1045                                               grp->bb_largest_free_order]);
1046                 list_del_init(&grp->bb_largest_free_order_node);
1047                 write_unlock(&sbi->s_mb_largest_free_orders_locks[
1048                                               grp->bb_largest_free_order]);
1049         }
1050         grp->bb_largest_free_order = i;
1051         if (grp->bb_largest_free_order >= 0 && grp->bb_free) {
1052                 write_lock(&sbi->s_mb_largest_free_orders_locks[
1053                                               grp->bb_largest_free_order]);
1054                 list_add_tail(&grp->bb_largest_free_order_node,
1055                       &sbi->s_mb_largest_free_orders[grp->bb_largest_free_order]);
1056                 write_unlock(&sbi->s_mb_largest_free_orders_locks[
1057                                               grp->bb_largest_free_order]);
1058         }
1059 }
1060
1061 static noinline_for_stack
1062 void ext4_mb_generate_buddy(struct super_block *sb,
1063                                 void *buddy, void *bitmap, ext4_group_t group)
1064 {
1065         struct ext4_group_info *grp = ext4_get_group_info(sb, group);
1066         struct ext4_sb_info *sbi = EXT4_SB(sb);
1067         ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb);
1068         ext4_grpblk_t i = 0;
1069         ext4_grpblk_t first;
1070         ext4_grpblk_t len;
1071         unsigned free = 0;
1072         unsigned fragments = 0;
1073         unsigned long long period = get_cycles();
1074
1075         /* initialize buddy from bitmap which is aggregation
1076          * of on-disk bitmap and preallocations */
1077         i = mb_find_next_zero_bit(bitmap, max, 0);
1078         grp->bb_first_free = i;
1079         while (i < max) {
1080                 fragments++;
1081                 first = i;
1082                 i = mb_find_next_bit(bitmap, max, i);
1083                 len = i - first;
1084                 free += len;
1085                 if (len > 1)
1086                         ext4_mb_mark_free_simple(sb, buddy, first, len, grp);
1087                 else
1088                         grp->bb_counters[0]++;
1089                 if (i < max)
1090                         i = mb_find_next_zero_bit(bitmap, max, i);
1091         }
1092         grp->bb_fragments = fragments;
1093
1094         if (free != grp->bb_free) {
1095                 ext4_grp_locked_error(sb, group, 0, 0,
1096                                       "block bitmap and bg descriptor "
1097                                       "inconsistent: %u vs %u free clusters",
1098                                       free, grp->bb_free);
1099                 /*
1100                  * If we intend to continue, we consider group descriptor
1101                  * corrupt and update bb_free using bitmap value
1102                  */
1103                 grp->bb_free = free;
1104                 ext4_mark_group_bitmap_corrupted(sb, group,
1105                                         EXT4_GROUP_INFO_BBITMAP_CORRUPT);
1106         }
1107         mb_set_largest_free_order(sb, grp);
1108         mb_update_avg_fragment_size(sb, grp);
1109
1110         clear_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &(grp->bb_state));
1111
1112         period = get_cycles() - period;
1113         atomic_inc(&sbi->s_mb_buddies_generated);
1114         atomic64_add(period, &sbi->s_mb_generation_time);
1115 }
1116
1117 /* The buddy information is attached the buddy cache inode
1118  * for convenience. The information regarding each group
1119  * is loaded via ext4_mb_load_buddy. The information involve
1120  * block bitmap and buddy information. The information are
1121  * stored in the inode as
1122  *
1123  * {                        page                        }
1124  * [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]...
1125  *
1126  *
1127  * one block each for bitmap and buddy information.
1128  * So for each group we take up 2 blocks. A page can
1129  * contain blocks_per_page (PAGE_SIZE / blocksize)  blocks.
1130  * So it can have information regarding groups_per_page which
1131  * is blocks_per_page/2
1132  *
1133  * Locking note:  This routine takes the block group lock of all groups
1134  * for this page; do not hold this lock when calling this routine!
1135  */
1136
1137 static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp)
1138 {
1139         ext4_group_t ngroups;
1140         int blocksize;
1141         int blocks_per_page;
1142         int groups_per_page;
1143         int err = 0;
1144         int i;
1145         ext4_group_t first_group, group;
1146         int first_block;
1147         struct super_block *sb;
1148         struct buffer_head *bhs;
1149         struct buffer_head **bh = NULL;
1150         struct inode *inode;
1151         char *data;
1152         char *bitmap;
1153         struct ext4_group_info *grinfo;
1154
1155         inode = page->mapping->host;
1156         sb = inode->i_sb;
1157         ngroups = ext4_get_groups_count(sb);
1158         blocksize = i_blocksize(inode);
1159         blocks_per_page = PAGE_SIZE / blocksize;
1160
1161         mb_debug(sb, "init page %lu\n", page->index);
1162
1163         groups_per_page = blocks_per_page >> 1;
1164         if (groups_per_page == 0)
1165                 groups_per_page = 1;
1166
1167         /* allocate buffer_heads to read bitmaps */
1168         if (groups_per_page > 1) {
1169                 i = sizeof(struct buffer_head *) * groups_per_page;
1170                 bh = kzalloc(i, gfp);
1171                 if (bh == NULL) {
1172                         err = -ENOMEM;
1173                         goto out;
1174                 }
1175         } else
1176                 bh = &bhs;
1177
1178         first_group = page->index * blocks_per_page / 2;
1179
1180         /* read all groups the page covers into the cache */
1181         for (i = 0, group = first_group; i < groups_per_page; i++, group++) {
1182                 if (group >= ngroups)
1183                         break;
1184
1185                 grinfo = ext4_get_group_info(sb, group);
1186                 /*
1187                  * If page is uptodate then we came here after online resize
1188                  * which added some new uninitialized group info structs, so
1189                  * we must skip all initialized uptodate buddies on the page,
1190                  * which may be currently in use by an allocating task.
1191                  */
1192                 if (PageUptodate(page) && !EXT4_MB_GRP_NEED_INIT(grinfo)) {
1193                         bh[i] = NULL;
1194                         continue;
1195                 }
1196                 bh[i] = ext4_read_block_bitmap_nowait(sb, group, false);
1197                 if (IS_ERR(bh[i])) {
1198                         err = PTR_ERR(bh[i]);
1199                         bh[i] = NULL;
1200                         goto out;
1201                 }
1202                 mb_debug(sb, "read bitmap for group %u\n", group);
1203         }
1204
1205         /* wait for I/O completion */
1206         for (i = 0, group = first_group; i < groups_per_page; i++, group++) {
1207                 int err2;
1208
1209                 if (!bh[i])
1210                         continue;
1211                 err2 = ext4_wait_block_bitmap(sb, group, bh[i]);
1212                 if (!err)
1213                         err = err2;
1214         }
1215
1216         first_block = page->index * blocks_per_page;
1217         for (i = 0; i < blocks_per_page; i++) {
1218                 group = (first_block + i) >> 1;
1219                 if (group >= ngroups)
1220                         break;
1221
1222                 if (!bh[group - first_group])
1223                         /* skip initialized uptodate buddy */
1224                         continue;
1225
1226                 if (!buffer_verified(bh[group - first_group]))
1227                         /* Skip faulty bitmaps */
1228                         continue;
1229                 err = 0;
1230
1231                 /*
1232                  * data carry information regarding this
1233                  * particular group in the format specified
1234                  * above
1235                  *
1236                  */
1237                 data = page_address(page) + (i * blocksize);
1238                 bitmap = bh[group - first_group]->b_data;
1239
1240                 /*
1241                  * We place the buddy block and bitmap block
1242                  * close together
1243                  */
1244                 if ((first_block + i) & 1) {
1245                         /* this is block of buddy */
1246                         BUG_ON(incore == NULL);
1247                         mb_debug(sb, "put buddy for group %u in page %lu/%x\n",
1248                                 group, page->index, i * blocksize);
1249                         trace_ext4_mb_buddy_bitmap_load(sb, group);
1250                         grinfo = ext4_get_group_info(sb, group);
1251                         grinfo->bb_fragments = 0;
1252                         memset(grinfo->bb_counters, 0,
1253                                sizeof(*grinfo->bb_counters) *
1254                                (MB_NUM_ORDERS(sb)));
1255                         /*
1256                          * incore got set to the group block bitmap below
1257                          */
1258                         ext4_lock_group(sb, group);
1259                         /* init the buddy */
1260                         memset(data, 0xff, blocksize);
1261                         ext4_mb_generate_buddy(sb, data, incore, group);
1262                         ext4_unlock_group(sb, group);
1263                         incore = NULL;
1264                 } else {
1265                         /* this is block of bitmap */
1266                         BUG_ON(incore != NULL);
1267                         mb_debug(sb, "put bitmap for group %u in page %lu/%x\n",
1268                                 group, page->index, i * blocksize);
1269                         trace_ext4_mb_bitmap_load(sb, group);
1270
1271                         /* see comments in ext4_mb_put_pa() */
1272                         ext4_lock_group(sb, group);
1273                         memcpy(data, bitmap, blocksize);
1274
1275                         /* mark all preallocated blks used in in-core bitmap */
1276                         ext4_mb_generate_from_pa(sb, data, group);
1277                         ext4_mb_generate_from_freelist(sb, data, group);
1278                         ext4_unlock_group(sb, group);
1279
1280                         /* set incore so that the buddy information can be
1281                          * generated using this
1282                          */
1283                         incore = data;
1284                 }
1285         }
1286         SetPageUptodate(page);
1287
1288 out:
1289         if (bh) {
1290                 for (i = 0; i < groups_per_page; i++)
1291                         brelse(bh[i]);
1292                 if (bh != &bhs)
1293                         kfree(bh);
1294         }
1295         return err;
1296 }
1297
1298 /*
1299  * Lock the buddy and bitmap pages. This make sure other parallel init_group
1300  * on the same buddy page doesn't happen whild holding the buddy page lock.
1301  * Return locked buddy and bitmap pages on e4b struct. If buddy and bitmap
1302  * are on the same page e4b->bd_buddy_page is NULL and return value is 0.
1303  */
1304 static int ext4_mb_get_buddy_page_lock(struct super_block *sb,
1305                 ext4_group_t group, struct ext4_buddy *e4b, gfp_t gfp)
1306 {
1307         struct inode *inode = EXT4_SB(sb)->s_buddy_cache;
1308         int block, pnum, poff;
1309         int blocks_per_page;
1310         struct page *page;
1311
1312         e4b->bd_buddy_page = NULL;
1313         e4b->bd_bitmap_page = NULL;
1314
1315         blocks_per_page = PAGE_SIZE / sb->s_blocksize;
1316         /*
1317          * the buddy cache inode stores the block bitmap
1318          * and buddy information in consecutive blocks.
1319          * So for each group we need two blocks.
1320          */
1321         block = group * 2;
1322         pnum = block / blocks_per_page;
1323         poff = block % blocks_per_page;
1324         page = find_or_create_page(inode->i_mapping, pnum, gfp);
1325         if (!page)
1326                 return -ENOMEM;
1327         BUG_ON(page->mapping != inode->i_mapping);
1328         e4b->bd_bitmap_page = page;
1329         e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize);
1330
1331         if (blocks_per_page >= 2) {
1332                 /* buddy and bitmap are on the same page */
1333                 return 0;
1334         }
1335
1336         block++;
1337         pnum = block / blocks_per_page;
1338         page = find_or_create_page(inode->i_mapping, pnum, gfp);
1339         if (!page)
1340                 return -ENOMEM;
1341         BUG_ON(page->mapping != inode->i_mapping);
1342         e4b->bd_buddy_page = page;
1343         return 0;
1344 }
1345
1346 static void ext4_mb_put_buddy_page_lock(struct ext4_buddy *e4b)
1347 {
1348         if (e4b->bd_bitmap_page) {
1349                 unlock_page(e4b->bd_bitmap_page);
1350                 put_page(e4b->bd_bitmap_page);
1351         }
1352         if (e4b->bd_buddy_page) {
1353                 unlock_page(e4b->bd_buddy_page);
1354                 put_page(e4b->bd_buddy_page);
1355         }
1356 }
1357
1358 /*
1359  * Locking note:  This routine calls ext4_mb_init_cache(), which takes the
1360  * block group lock of all groups for this page; do not hold the BG lock when
1361  * calling this routine!
1362  */
1363 static noinline_for_stack
1364 int ext4_mb_init_group(struct super_block *sb, ext4_group_t group, gfp_t gfp)
1365 {
1366
1367         struct ext4_group_info *this_grp;
1368         struct ext4_buddy e4b;
1369         struct page *page;
1370         int ret = 0;
1371
1372         might_sleep();
1373         mb_debug(sb, "init group %u\n", group);
1374         this_grp = ext4_get_group_info(sb, group);
1375         /*
1376          * This ensures that we don't reinit the buddy cache
1377          * page which map to the group from which we are already
1378          * allocating. If we are looking at the buddy cache we would
1379          * have taken a reference using ext4_mb_load_buddy and that
1380          * would have pinned buddy page to page cache.
1381          * The call to ext4_mb_get_buddy_page_lock will mark the
1382          * page accessed.
1383          */
1384         ret = ext4_mb_get_buddy_page_lock(sb, group, &e4b, gfp);
1385         if (ret || !EXT4_MB_GRP_NEED_INIT(this_grp)) {
1386                 /*
1387                  * somebody initialized the group
1388                  * return without doing anything
1389                  */
1390                 goto err;
1391         }
1392
1393         page = e4b.bd_bitmap_page;
1394         ret = ext4_mb_init_cache(page, NULL, gfp);
1395         if (ret)
1396                 goto err;
1397         if (!PageUptodate(page)) {
1398                 ret = -EIO;
1399                 goto err;
1400         }
1401
1402         if (e4b.bd_buddy_page == NULL) {
1403                 /*
1404                  * If both the bitmap and buddy are in
1405                  * the same page we don't need to force
1406                  * init the buddy
1407                  */
1408                 ret = 0;
1409                 goto err;
1410         }
1411         /* init buddy cache */
1412         page = e4b.bd_buddy_page;
1413         ret = ext4_mb_init_cache(page, e4b.bd_bitmap, gfp);
1414         if (ret)
1415                 goto err;
1416         if (!PageUptodate(page)) {
1417                 ret = -EIO;
1418                 goto err;
1419         }
1420 err:
1421         ext4_mb_put_buddy_page_lock(&e4b);
1422         return ret;
1423 }
1424
1425 /*
1426  * Locking note:  This routine calls ext4_mb_init_cache(), which takes the
1427  * block group lock of all groups for this page; do not hold the BG lock when
1428  * calling this routine!
1429  */
1430 static noinline_for_stack int
1431 ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group,
1432                        struct ext4_buddy *e4b, gfp_t gfp)
1433 {
1434         int blocks_per_page;
1435         int block;
1436         int pnum;
1437         int poff;
1438         struct page *page;
1439         int ret;
1440         struct ext4_group_info *grp;
1441         struct ext4_sb_info *sbi = EXT4_SB(sb);
1442         struct inode *inode = sbi->s_buddy_cache;
1443
1444         might_sleep();
1445         mb_debug(sb, "load group %u\n", group);
1446
1447         blocks_per_page = PAGE_SIZE / sb->s_blocksize;
1448         grp = ext4_get_group_info(sb, group);
1449
1450         e4b->bd_blkbits = sb->s_blocksize_bits;
1451         e4b->bd_info = grp;
1452         e4b->bd_sb = sb;
1453         e4b->bd_group = group;
1454         e4b->bd_buddy_page = NULL;
1455         e4b->bd_bitmap_page = NULL;
1456
1457         if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
1458                 /*
1459                  * we need full data about the group
1460                  * to make a good selection
1461                  */
1462                 ret = ext4_mb_init_group(sb, group, gfp);
1463                 if (ret)
1464                         return ret;
1465         }
1466
1467         /*
1468          * the buddy cache inode stores the block bitmap
1469          * and buddy information in consecutive blocks.
1470          * So for each group we need two blocks.
1471          */
1472         block = group * 2;
1473         pnum = block / blocks_per_page;
1474         poff = block % blocks_per_page;
1475
1476         /* we could use find_or_create_page(), but it locks page
1477          * what we'd like to avoid in fast path ... */
1478         page = find_get_page_flags(inode->i_mapping, pnum, FGP_ACCESSED);
1479         if (page == NULL || !PageUptodate(page)) {
1480                 if (page)
1481                         /*
1482                          * drop the page reference and try
1483                          * to get the page with lock. If we
1484                          * are not uptodate that implies
1485                          * somebody just created the page but
1486                          * is yet to initialize the same. So
1487                          * wait for it to initialize.
1488                          */
1489                         put_page(page);
1490                 page = find_or_create_page(inode->i_mapping, pnum, gfp);
1491                 if (page) {
1492                         BUG_ON(page->mapping != inode->i_mapping);
1493                         if (!PageUptodate(page)) {
1494                                 ret = ext4_mb_init_cache(page, NULL, gfp);
1495                                 if (ret) {
1496                                         unlock_page(page);
1497                                         goto err;
1498                                 }
1499                                 mb_cmp_bitmaps(e4b, page_address(page) +
1500                                                (poff * sb->s_blocksize));
1501                         }
1502                         unlock_page(page);
1503                 }
1504         }
1505         if (page == NULL) {
1506                 ret = -ENOMEM;
1507                 goto err;
1508         }
1509         if (!PageUptodate(page)) {
1510                 ret = -EIO;
1511                 goto err;
1512         }
1513
1514         /* Pages marked accessed already */
1515         e4b->bd_bitmap_page = page;
1516         e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize);
1517
1518         block++;
1519         pnum = block / blocks_per_page;
1520         poff = block % blocks_per_page;
1521
1522         page = find_get_page_flags(inode->i_mapping, pnum, FGP_ACCESSED);
1523         if (page == NULL || !PageUptodate(page)) {
1524                 if (page)
1525                         put_page(page);
1526                 page = find_or_create_page(inode->i_mapping, pnum, gfp);
1527                 if (page) {
1528                         BUG_ON(page->mapping != inode->i_mapping);
1529                         if (!PageUptodate(page)) {
1530                                 ret = ext4_mb_init_cache(page, e4b->bd_bitmap,
1531                                                          gfp);
1532                                 if (ret) {
1533                                         unlock_page(page);
1534                                         goto err;
1535                                 }
1536                         }
1537                         unlock_page(page);
1538                 }
1539         }
1540         if (page == NULL) {
1541                 ret = -ENOMEM;
1542                 goto err;
1543         }
1544         if (!PageUptodate(page)) {
1545                 ret = -EIO;
1546                 goto err;
1547         }
1548
1549         /* Pages marked accessed already */
1550         e4b->bd_buddy_page = page;
1551         e4b->bd_buddy = page_address(page) + (poff * sb->s_blocksize);
1552
1553         return 0;
1554
1555 err:
1556         if (page)
1557                 put_page(page);
1558         if (e4b->bd_bitmap_page)
1559                 put_page(e4b->bd_bitmap_page);
1560         if (e4b->bd_buddy_page)
1561                 put_page(e4b->bd_buddy_page);
1562         e4b->bd_buddy = NULL;
1563         e4b->bd_bitmap = NULL;
1564         return ret;
1565 }
1566
1567 static int ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group,
1568                               struct ext4_buddy *e4b)
1569 {
1570         return ext4_mb_load_buddy_gfp(sb, group, e4b, GFP_NOFS);
1571 }
1572
1573 static void ext4_mb_unload_buddy(struct ext4_buddy *e4b)
1574 {
1575         if (e4b->bd_bitmap_page)
1576                 put_page(e4b->bd_bitmap_page);
1577         if (e4b->bd_buddy_page)
1578                 put_page(e4b->bd_buddy_page);
1579 }
1580
1581
1582 static int mb_find_order_for_block(struct ext4_buddy *e4b, int block)
1583 {
1584         int order = 1, max;
1585         void *bb;
1586
1587         BUG_ON(e4b->bd_bitmap == e4b->bd_buddy);
1588         BUG_ON(block >= (1 << (e4b->bd_blkbits + 3)));
1589
1590         while (order <= e4b->bd_blkbits + 1) {
1591                 bb = mb_find_buddy(e4b, order, &max);
1592                 if (!mb_test_bit(block >> order, bb)) {
1593                         /* this block is part of buddy of order 'order' */
1594                         return order;
1595                 }
1596                 order++;
1597         }
1598         return 0;
1599 }
1600
1601 static void mb_clear_bits(void *bm, int cur, int len)
1602 {
1603         __u32 *addr;
1604
1605         len = cur + len;
1606         while (cur < len) {
1607                 if ((cur & 31) == 0 && (len - cur) >= 32) {
1608                         /* fast path: clear whole word at once */
1609                         addr = bm + (cur >> 3);
1610                         *addr = 0;
1611                         cur += 32;
1612                         continue;
1613                 }
1614                 mb_clear_bit(cur, bm);
1615                 cur++;
1616         }
1617 }
1618
1619 /* clear bits in given range
1620  * will return first found zero bit if any, -1 otherwise
1621  */
1622 static int mb_test_and_clear_bits(void *bm, int cur, int len)
1623 {
1624         __u32 *addr;
1625         int zero_bit = -1;
1626
1627         len = cur + len;
1628         while (cur < len) {
1629                 if ((cur & 31) == 0 && (len - cur) >= 32) {
1630                         /* fast path: clear whole word at once */
1631                         addr = bm + (cur >> 3);
1632                         if (*addr != (__u32)(-1) && zero_bit == -1)
1633                                 zero_bit = cur + mb_find_next_zero_bit(addr, 32, 0);
1634                         *addr = 0;
1635                         cur += 32;
1636                         continue;
1637                 }
1638                 if (!mb_test_and_clear_bit(cur, bm) && zero_bit == -1)
1639                         zero_bit = cur;
1640                 cur++;
1641         }
1642
1643         return zero_bit;
1644 }
1645
1646 void mb_set_bits(void *bm, int cur, int len)
1647 {
1648         __u32 *addr;
1649
1650         len = cur + len;
1651         while (cur < len) {
1652                 if ((cur & 31) == 0 && (len - cur) >= 32) {
1653                         /* fast path: set whole word at once */
1654                         addr = bm + (cur >> 3);
1655                         *addr = 0xffffffff;
1656                         cur += 32;
1657                         continue;
1658                 }
1659                 mb_set_bit(cur, bm);
1660                 cur++;
1661         }
1662 }
1663
1664 static inline int mb_buddy_adjust_border(int* bit, void* bitmap, int side)
1665 {
1666         if (mb_test_bit(*bit + side, bitmap)) {
1667                 mb_clear_bit(*bit, bitmap);
1668                 (*bit) -= side;
1669                 return 1;
1670         }
1671         else {
1672                 (*bit) += side;
1673                 mb_set_bit(*bit, bitmap);
1674                 return -1;
1675         }
1676 }
1677
1678 static void mb_buddy_mark_free(struct ext4_buddy *e4b, int first, int last)
1679 {
1680         int max;
1681         int order = 1;
1682         void *buddy = mb_find_buddy(e4b, order, &max);
1683
1684         while (buddy) {
1685                 void *buddy2;
1686
1687                 /* Bits in range [first; last] are known to be set since
1688                  * corresponding blocks were allocated. Bits in range
1689                  * (first; last) will stay set because they form buddies on
1690                  * upper layer. We just deal with borders if they don't
1691                  * align with upper layer and then go up.
1692                  * Releasing entire group is all about clearing
1693                  * single bit of highest order buddy.
1694                  */
1695
1696                 /* Example:
1697                  * ---------------------------------
1698                  * |   1   |   1   |   1   |   1   |
1699                  * ---------------------------------
1700                  * | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 1 |
1701                  * ---------------------------------
1702                  *   0   1   2   3   4   5   6   7
1703                  *      \_____________________/
1704                  *
1705                  * Neither [1] nor [6] is aligned to above layer.
1706                  * Left neighbour [0] is free, so mark it busy,
1707                  * decrease bb_counters and extend range to
1708                  * [0; 6]
1709                  * Right neighbour [7] is busy. It can't be coaleasced with [6], so
1710                  * mark [6] free, increase bb_counters and shrink range to
1711                  * [0; 5].
1712                  * Then shift range to [0; 2], go up and do the same.
1713                  */
1714
1715
1716                 if (first & 1)
1717                         e4b->bd_info->bb_counters[order] += mb_buddy_adjust_border(&first, buddy, -1);
1718                 if (!(last & 1))
1719                         e4b->bd_info->bb_counters[order] += mb_buddy_adjust_border(&last, buddy, 1);
1720                 if (first > last)
1721                         break;
1722                 order++;
1723
1724                 if (first == last || !(buddy2 = mb_find_buddy(e4b, order, &max))) {
1725                         mb_clear_bits(buddy, first, last - first + 1);
1726                         e4b->bd_info->bb_counters[order - 1] += last - first + 1;
1727                         break;
1728                 }
1729                 first >>= 1;
1730                 last >>= 1;
1731                 buddy = buddy2;
1732         }
1733 }
1734
1735 static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b,
1736                            int first, int count)
1737 {
1738         int left_is_free = 0;
1739         int right_is_free = 0;
1740         int block;
1741         int last = first + count - 1;
1742         struct super_block *sb = e4b->bd_sb;
1743
1744         if (WARN_ON(count == 0))
1745                 return;
1746         BUG_ON(last >= (sb->s_blocksize << 3));
1747         assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group));
1748         /* Don't bother if the block group is corrupt. */
1749         if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info)))
1750                 return;
1751
1752         mb_check_buddy(e4b);
1753         mb_free_blocks_double(inode, e4b, first, count);
1754
1755         this_cpu_inc(discard_pa_seq);
1756         e4b->bd_info->bb_free += count;
1757         if (first < e4b->bd_info->bb_first_free)
1758                 e4b->bd_info->bb_first_free = first;
1759
1760         /* access memory sequentially: check left neighbour,
1761          * clear range and then check right neighbour
1762          */
1763         if (first != 0)
1764                 left_is_free = !mb_test_bit(first - 1, e4b->bd_bitmap);
1765         block = mb_test_and_clear_bits(e4b->bd_bitmap, first, count);
1766         if (last + 1 < EXT4_SB(sb)->s_mb_maxs[0])
1767                 right_is_free = !mb_test_bit(last + 1, e4b->bd_bitmap);
1768
1769         if (unlikely(block != -1)) {
1770                 struct ext4_sb_info *sbi = EXT4_SB(sb);
1771                 ext4_fsblk_t blocknr;
1772
1773                 blocknr = ext4_group_first_block_no(sb, e4b->bd_group);
1774                 blocknr += EXT4_C2B(sbi, block);
1775                 if (!(sbi->s_mount_state & EXT4_FC_REPLAY)) {
1776                         ext4_grp_locked_error(sb, e4b->bd_group,
1777                                               inode ? inode->i_ino : 0,
1778                                               blocknr,
1779                                               "freeing already freed block (bit %u); block bitmap corrupt.",
1780                                               block);
1781                         ext4_mark_group_bitmap_corrupted(
1782                                 sb, e4b->bd_group,
1783                                 EXT4_GROUP_INFO_BBITMAP_CORRUPT);
1784                 }
1785                 goto done;
1786         }
1787
1788         /* let's maintain fragments counter */
1789         if (left_is_free && right_is_free)
1790                 e4b->bd_info->bb_fragments--;
1791         else if (!left_is_free && !right_is_free)
1792                 e4b->bd_info->bb_fragments++;
1793
1794         /* buddy[0] == bd_bitmap is a special case, so handle
1795          * it right away and let mb_buddy_mark_free stay free of
1796          * zero order checks.
1797          * Check if neighbours are to be coaleasced,
1798          * adjust bitmap bb_counters and borders appropriately.
1799          */
1800         if (first & 1) {
1801                 first += !left_is_free;
1802                 e4b->bd_info->bb_counters[0] += left_is_free ? -1 : 1;
1803         }
1804         if (!(last & 1)) {
1805                 last -= !right_is_free;
1806                 e4b->bd_info->bb_counters[0] += right_is_free ? -1 : 1;
1807         }
1808
1809         if (first <= last)
1810                 mb_buddy_mark_free(e4b, first >> 1, last >> 1);
1811
1812 done:
1813         mb_set_largest_free_order(sb, e4b->bd_info);
1814         mb_update_avg_fragment_size(sb, e4b->bd_info);
1815         mb_check_buddy(e4b);
1816 }
1817
1818 static int mb_find_extent(struct ext4_buddy *e4b, int block,
1819                                 int needed, struct ext4_free_extent *ex)
1820 {
1821         int next = block;
1822         int max, order;
1823         void *buddy;
1824
1825         assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group));
1826         BUG_ON(ex == NULL);
1827
1828         buddy = mb_find_buddy(e4b, 0, &max);
1829         BUG_ON(buddy == NULL);
1830         BUG_ON(block >= max);
1831         if (mb_test_bit(block, buddy)) {
1832                 ex->fe_len = 0;
1833                 ex->fe_start = 0;
1834                 ex->fe_group = 0;
1835                 return 0;
1836         }
1837
1838         /* find actual order */
1839         order = mb_find_order_for_block(e4b, block);
1840         block = block >> order;
1841
1842         ex->fe_len = 1 << order;
1843         ex->fe_start = block << order;
1844         ex->fe_group = e4b->bd_group;
1845
1846         /* calc difference from given start */
1847         next = next - ex->fe_start;
1848         ex->fe_len -= next;
1849         ex->fe_start += next;
1850
1851         while (needed > ex->fe_len &&
1852                mb_find_buddy(e4b, order, &max)) {
1853
1854                 if (block + 1 >= max)
1855                         break;
1856
1857                 next = (block + 1) * (1 << order);
1858                 if (mb_test_bit(next, e4b->bd_bitmap))
1859                         break;
1860
1861                 order = mb_find_order_for_block(e4b, next);
1862
1863                 block = next >> order;
1864                 ex->fe_len += 1 << order;
1865         }
1866
1867         if (ex->fe_start + ex->fe_len > EXT4_CLUSTERS_PER_GROUP(e4b->bd_sb)) {
1868                 /* Should never happen! (but apparently sometimes does?!?) */
1869                 WARN_ON(1);
1870                 ext4_grp_locked_error(e4b->bd_sb, e4b->bd_group, 0, 0,
1871                         "corruption or bug in mb_find_extent "
1872                         "block=%d, order=%d needed=%d ex=%u/%d/%d@%u",
1873                         block, order, needed, ex->fe_group, ex->fe_start,
1874                         ex->fe_len, ex->fe_logical);
1875                 ex->fe_len = 0;
1876                 ex->fe_start = 0;
1877                 ex->fe_group = 0;
1878         }
1879         return ex->fe_len;
1880 }
1881
1882 static int mb_mark_used(struct ext4_buddy *e4b, struct ext4_free_extent *ex)
1883 {
1884         int ord;
1885         int mlen = 0;
1886         int max = 0;
1887         int cur;
1888         int start = ex->fe_start;
1889         int len = ex->fe_len;
1890         unsigned ret = 0;
1891         int len0 = len;
1892         void *buddy;
1893         bool split = false;
1894
1895         BUG_ON(start + len > (e4b->bd_sb->s_blocksize << 3));
1896         BUG_ON(e4b->bd_group != ex->fe_group);
1897         assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group));
1898         mb_check_buddy(e4b);
1899         mb_mark_used_double(e4b, start, len);
1900
1901         this_cpu_inc(discard_pa_seq);
1902         e4b->bd_info->bb_free -= len;
1903         if (e4b->bd_info->bb_first_free == start)
1904                 e4b->bd_info->bb_first_free += len;
1905
1906         /* let's maintain fragments counter */
1907         if (start != 0)
1908                 mlen = !mb_test_bit(start - 1, e4b->bd_bitmap);
1909         if (start + len < EXT4_SB(e4b->bd_sb)->s_mb_maxs[0])
1910                 max = !mb_test_bit(start + len, e4b->bd_bitmap);
1911         if (mlen && max)
1912                 e4b->bd_info->bb_fragments++;
1913         else if (!mlen && !max)
1914                 e4b->bd_info->bb_fragments--;
1915
1916         /* let's maintain buddy itself */
1917         while (len) {
1918                 if (!split)
1919                         ord = mb_find_order_for_block(e4b, start);
1920
1921                 if (((start >> ord) << ord) == start && len >= (1 << ord)) {
1922                         /* the whole chunk may be allocated at once! */
1923                         mlen = 1 << ord;
1924                         if (!split)
1925                                 buddy = mb_find_buddy(e4b, ord, &max);
1926                         else
1927                                 split = false;
1928                         BUG_ON((start >> ord) >= max);
1929                         mb_set_bit(start >> ord, buddy);
1930                         e4b->bd_info->bb_counters[ord]--;
1931                         start += mlen;
1932                         len -= mlen;
1933                         BUG_ON(len < 0);
1934                         continue;
1935                 }
1936
1937                 /* store for history */
1938                 if (ret == 0)
1939                         ret = len | (ord << 16);
1940
1941                 /* we have to split large buddy */
1942                 BUG_ON(ord <= 0);
1943                 buddy = mb_find_buddy(e4b, ord, &max);
1944                 mb_set_bit(start >> ord, buddy);
1945                 e4b->bd_info->bb_counters[ord]--;
1946
1947                 ord--;
1948                 cur = (start >> ord) & ~1U;
1949                 buddy = mb_find_buddy(e4b, ord, &max);
1950                 mb_clear_bit(cur, buddy);
1951                 mb_clear_bit(cur + 1, buddy);
1952                 e4b->bd_info->bb_counters[ord]++;
1953                 e4b->bd_info->bb_counters[ord]++;
1954                 split = true;
1955         }
1956         mb_set_largest_free_order(e4b->bd_sb, e4b->bd_info);
1957
1958         mb_update_avg_fragment_size(e4b->bd_sb, e4b->bd_info);
1959         mb_set_bits(e4b->bd_bitmap, ex->fe_start, len0);
1960         mb_check_buddy(e4b);
1961
1962         return ret;
1963 }
1964
1965 /*
1966  * Must be called under group lock!
1967  */
1968 static void ext4_mb_use_best_found(struct ext4_allocation_context *ac,
1969                                         struct ext4_buddy *e4b)
1970 {
1971         struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
1972         int ret;
1973
1974         BUG_ON(ac->ac_b_ex.fe_group != e4b->bd_group);
1975         BUG_ON(ac->ac_status == AC_STATUS_FOUND);
1976
1977         ac->ac_b_ex.fe_len = min(ac->ac_b_ex.fe_len, ac->ac_g_ex.fe_len);
1978         ac->ac_b_ex.fe_logical = ac->ac_g_ex.fe_logical;
1979         ret = mb_mark_used(e4b, &ac->ac_b_ex);
1980
1981         /* preallocation can change ac_b_ex, thus we store actually
1982          * allocated blocks for history */
1983         ac->ac_f_ex = ac->ac_b_ex;
1984
1985         ac->ac_status = AC_STATUS_FOUND;
1986         ac->ac_tail = ret & 0xffff;
1987         ac->ac_buddy = ret >> 16;
1988
1989         /*
1990          * take the page reference. We want the page to be pinned
1991          * so that we don't get a ext4_mb_init_cache_call for this
1992          * group until we update the bitmap. That would mean we
1993          * double allocate blocks. The reference is dropped
1994          * in ext4_mb_release_context
1995          */
1996         ac->ac_bitmap_page = e4b->bd_bitmap_page;
1997         get_page(ac->ac_bitmap_page);
1998         ac->ac_buddy_page = e4b->bd_buddy_page;
1999         get_page(ac->ac_buddy_page);
2000         /* store last allocated for subsequent stream allocation */
2001         if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) {
2002                 spin_lock(&sbi->s_md_lock);
2003                 sbi->s_mb_last_group = ac->ac_f_ex.fe_group;
2004                 sbi->s_mb_last_start = ac->ac_f_ex.fe_start;
2005                 spin_unlock(&sbi->s_md_lock);
2006         }
2007         /*
2008          * As we've just preallocated more space than
2009          * user requested originally, we store allocated
2010          * space in a special descriptor.
2011          */
2012         if (ac->ac_o_ex.fe_len < ac->ac_b_ex.fe_len)
2013                 ext4_mb_new_preallocation(ac);
2014
2015 }
2016
2017 static void ext4_mb_check_limits(struct ext4_allocation_context *ac,
2018                                         struct ext4_buddy *e4b,
2019                                         int finish_group)
2020 {
2021         struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
2022         struct ext4_free_extent *bex = &ac->ac_b_ex;
2023         struct ext4_free_extent *gex = &ac->ac_g_ex;
2024         struct ext4_free_extent ex;
2025         int max;
2026
2027         if (ac->ac_status == AC_STATUS_FOUND)
2028                 return;
2029         /*
2030          * We don't want to scan for a whole year
2031          */
2032         if (ac->ac_found > sbi->s_mb_max_to_scan &&
2033                         !(ac->ac_flags & EXT4_MB_HINT_FIRST)) {
2034                 ac->ac_status = AC_STATUS_BREAK;
2035                 return;
2036         }
2037
2038         /*
2039          * Haven't found good chunk so far, let's continue
2040          */
2041         if (bex->fe_len < gex->fe_len)
2042                 return;
2043
2044         if ((finish_group || ac->ac_found > sbi->s_mb_min_to_scan)
2045                         && bex->fe_group == e4b->bd_group) {
2046                 /* recheck chunk's availability - we don't know
2047                  * when it was found (within this lock-unlock
2048                  * period or not) */
2049                 max = mb_find_extent(e4b, bex->fe_start, gex->fe_len, &ex);
2050                 if (max >= gex->fe_len) {
2051                         ext4_mb_use_best_found(ac, e4b);
2052                         return;
2053                 }
2054         }
2055 }
2056
2057 /*
2058  * The routine checks whether found extent is good enough. If it is,
2059  * then the extent gets marked used and flag is set to the context
2060  * to stop scanning. Otherwise, the extent is compared with the
2061  * previous found extent and if new one is better, then it's stored
2062  * in the context. Later, the best found extent will be used, if
2063  * mballoc can't find good enough extent.
2064  *
2065  * FIXME: real allocation policy is to be designed yet!
2066  */
2067 static void ext4_mb_measure_extent(struct ext4_allocation_context *ac,
2068                                         struct ext4_free_extent *ex,
2069                                         struct ext4_buddy *e4b)
2070 {
2071         struct ext4_free_extent *bex = &ac->ac_b_ex;
2072         struct ext4_free_extent *gex = &ac->ac_g_ex;
2073
2074         BUG_ON(ex->fe_len <= 0);
2075         BUG_ON(ex->fe_len > EXT4_CLUSTERS_PER_GROUP(ac->ac_sb));
2076         BUG_ON(ex->fe_start >= EXT4_CLUSTERS_PER_GROUP(ac->ac_sb));
2077         BUG_ON(ac->ac_status != AC_STATUS_CONTINUE);
2078
2079         ac->ac_found++;
2080
2081         /*
2082          * The special case - take what you catch first
2083          */
2084         if (unlikely(ac->ac_flags & EXT4_MB_HINT_FIRST)) {
2085                 *bex = *ex;
2086                 ext4_mb_use_best_found(ac, e4b);
2087                 return;
2088         }
2089
2090         /*
2091          * Let's check whether the chuck is good enough
2092          */
2093         if (ex->fe_len == gex->fe_len) {
2094                 *bex = *ex;
2095                 ext4_mb_use_best_found(ac, e4b);
2096                 return;
2097         }
2098
2099         /*
2100          * If this is first found extent, just store it in the context
2101          */
2102         if (bex->fe_len == 0) {
2103                 *bex = *ex;
2104                 return;
2105         }
2106
2107         /*
2108          * If new found extent is better, store it in the context
2109          */
2110         if (bex->fe_len < gex->fe_len) {
2111                 /* if the request isn't satisfied, any found extent
2112                  * larger than previous best one is better */
2113                 if (ex->fe_len > bex->fe_len)
2114                         *bex = *ex;
2115         } else if (ex->fe_len > gex->fe_len) {
2116                 /* if the request is satisfied, then we try to find
2117                  * an extent that still satisfy the request, but is
2118                  * smaller than previous one */
2119                 if (ex->fe_len < bex->fe_len)
2120                         *bex = *ex;
2121         }
2122
2123         ext4_mb_check_limits(ac, e4b, 0);
2124 }
2125
2126 static noinline_for_stack
2127 int ext4_mb_try_best_found(struct ext4_allocation_context *ac,
2128                                         struct ext4_buddy *e4b)
2129 {
2130         struct ext4_free_extent ex = ac->ac_b_ex;
2131         ext4_group_t group = ex.fe_group;
2132         int max;
2133         int err;
2134
2135         BUG_ON(ex.fe_len <= 0);
2136         err = ext4_mb_load_buddy(ac->ac_sb, group, e4b);
2137         if (err)
2138                 return err;
2139
2140         ext4_lock_group(ac->ac_sb, group);
2141         max = mb_find_extent(e4b, ex.fe_start, ex.fe_len, &ex);
2142
2143         if (max > 0) {
2144                 ac->ac_b_ex = ex;
2145                 ext4_mb_use_best_found(ac, e4b);
2146         }
2147
2148         ext4_unlock_group(ac->ac_sb, group);
2149         ext4_mb_unload_buddy(e4b);
2150
2151         return 0;
2152 }
2153
2154 static noinline_for_stack
2155 int ext4_mb_find_by_goal(struct ext4_allocation_context *ac,
2156                                 struct ext4_buddy *e4b)
2157 {
2158         ext4_group_t group = ac->ac_g_ex.fe_group;
2159         int max;
2160         int err;
2161         struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
2162         struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group);
2163         struct ext4_free_extent ex;
2164
2165         if (!(ac->ac_flags & EXT4_MB_HINT_TRY_GOAL))
2166                 return 0;
2167         if (grp->bb_free == 0)
2168                 return 0;
2169
2170         err = ext4_mb_load_buddy(ac->ac_sb, group, e4b);
2171         if (err)
2172                 return err;
2173
2174         if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info))) {
2175                 ext4_mb_unload_buddy(e4b);
2176                 return 0;
2177         }
2178
2179         ext4_lock_group(ac->ac_sb, group);
2180         max = mb_find_extent(e4b, ac->ac_g_ex.fe_start,
2181                              ac->ac_g_ex.fe_len, &ex);
2182         ex.fe_logical = 0xDEADFA11; /* debug value */
2183
2184         if (max >= ac->ac_g_ex.fe_len && ac->ac_g_ex.fe_len == sbi->s_stripe) {
2185                 ext4_fsblk_t start;
2186
2187                 start = ext4_group_first_block_no(ac->ac_sb, e4b->bd_group) +
2188                         ex.fe_start;
2189                 /* use do_div to get remainder (would be 64-bit modulo) */
2190                 if (do_div(start, sbi->s_stripe) == 0) {
2191                         ac->ac_found++;
2192                         ac->ac_b_ex = ex;
2193                         ext4_mb_use_best_found(ac, e4b);
2194                 }
2195         } else if (max >= ac->ac_g_ex.fe_len) {
2196                 BUG_ON(ex.fe_len <= 0);
2197                 BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group);
2198                 BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start);
2199                 ac->ac_found++;
2200                 ac->ac_b_ex = ex;
2201                 ext4_mb_use_best_found(ac, e4b);
2202         } else if (max > 0 && (ac->ac_flags & EXT4_MB_HINT_MERGE)) {
2203                 /* Sometimes, caller may want to merge even small
2204                  * number of blocks to an existing extent */
2205                 BUG_ON(ex.fe_len <= 0);
2206                 BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group);
2207                 BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start);
2208                 ac->ac_found++;
2209                 ac->ac_b_ex = ex;
2210                 ext4_mb_use_best_found(ac, e4b);
2211         }
2212         ext4_unlock_group(ac->ac_sb, group);
2213         ext4_mb_unload_buddy(e4b);
2214
2215         return 0;
2216 }
2217
2218 /*
2219  * The routine scans buddy structures (not bitmap!) from given order
2220  * to max order and tries to find big enough chunk to satisfy the req
2221  */
2222 static noinline_for_stack
2223 void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
2224                                         struct ext4_buddy *e4b)
2225 {
2226         struct super_block *sb = ac->ac_sb;
2227         struct ext4_group_info *grp = e4b->bd_info;
2228         void *buddy;
2229         int i;
2230         int k;
2231         int max;
2232
2233         BUG_ON(ac->ac_2order <= 0);
2234         for (i = ac->ac_2order; i < MB_NUM_ORDERS(sb); i++) {
2235                 if (grp->bb_counters[i] == 0)
2236                         continue;
2237
2238                 buddy = mb_find_buddy(e4b, i, &max);
2239                 BUG_ON(buddy == NULL);
2240
2241                 k = mb_find_next_zero_bit(buddy, max, 0);
2242                 if (k >= max) {
2243                         ext4_grp_locked_error(ac->ac_sb, e4b->bd_group, 0, 0,
2244                                 "%d free clusters of order %d. But found 0",
2245                                 grp->bb_counters[i], i);
2246                         ext4_mark_group_bitmap_corrupted(ac->ac_sb,
2247                                          e4b->bd_group,
2248                                         EXT4_GROUP_INFO_BBITMAP_CORRUPT);
2249                         break;
2250                 }
2251                 ac->ac_found++;
2252
2253                 ac->ac_b_ex.fe_len = 1 << i;
2254                 ac->ac_b_ex.fe_start = k << i;
2255                 ac->ac_b_ex.fe_group = e4b->bd_group;
2256
2257                 ext4_mb_use_best_found(ac, e4b);
2258
2259                 BUG_ON(ac->ac_f_ex.fe_len != ac->ac_g_ex.fe_len);
2260
2261                 if (EXT4_SB(sb)->s_mb_stats)
2262                         atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
2263
2264                 break;
2265         }
2266 }
2267
2268 /*
2269  * The routine scans the group and measures all found extents.
2270  * In order to optimize scanning, caller must pass number of
2271  * free blocks in the group, so the routine can know upper limit.
2272  */
2273 static noinline_for_stack
2274 void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac,
2275                                         struct ext4_buddy *e4b)
2276 {
2277         struct super_block *sb = ac->ac_sb;
2278         void *bitmap = e4b->bd_bitmap;
2279         struct ext4_free_extent ex;
2280         int i;
2281         int free;
2282
2283         free = e4b->bd_info->bb_free;
2284         if (WARN_ON(free <= 0))
2285                 return;
2286
2287         i = e4b->bd_info->bb_first_free;
2288
2289         while (free && ac->ac_status == AC_STATUS_CONTINUE) {
2290                 i = mb_find_next_zero_bit(bitmap,
2291                                                 EXT4_CLUSTERS_PER_GROUP(sb), i);
2292                 if (i >= EXT4_CLUSTERS_PER_GROUP(sb)) {
2293                         /*
2294                          * IF we have corrupt bitmap, we won't find any
2295                          * free blocks even though group info says we
2296                          * have free blocks
2297                          */
2298                         ext4_grp_locked_error(sb, e4b->bd_group, 0, 0,
2299                                         "%d free clusters as per "
2300                                         "group info. But bitmap says 0",
2301                                         free);
2302                         ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group,
2303                                         EXT4_GROUP_INFO_BBITMAP_CORRUPT);
2304                         break;
2305                 }
2306
2307                 mb_find_extent(e4b, i, ac->ac_g_ex.fe_len, &ex);
2308                 if (WARN_ON(ex.fe_len <= 0))
2309                         break;
2310                 if (free < ex.fe_len) {
2311                         ext4_grp_locked_error(sb, e4b->bd_group, 0, 0,
2312                                         "%d free clusters as per "
2313                                         "group info. But got %d blocks",
2314                                         free, ex.fe_len);
2315                         ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group,
2316                                         EXT4_GROUP_INFO_BBITMAP_CORRUPT);
2317                         /*
2318                          * The number of free blocks differs. This mostly
2319                          * indicate that the bitmap is corrupt. So exit
2320                          * without claiming the space.
2321                          */
2322                         break;
2323                 }
2324                 ex.fe_logical = 0xDEADC0DE; /* debug value */
2325                 ext4_mb_measure_extent(ac, &ex, e4b);
2326
2327                 i += ex.fe_len;
2328                 free -= ex.fe_len;
2329         }
2330
2331         ext4_mb_check_limits(ac, e4b, 1);
2332 }
2333
2334 /*
2335  * This is a special case for storages like raid5
2336  * we try to find stripe-aligned chunks for stripe-size-multiple requests
2337  */
2338 static noinline_for_stack
2339 void ext4_mb_scan_aligned(struct ext4_allocation_context *ac,
2340                                  struct ext4_buddy *e4b)
2341 {
2342         struct super_block *sb = ac->ac_sb;
2343         struct ext4_sb_info *sbi = EXT4_SB(sb);
2344         void *bitmap = e4b->bd_bitmap;
2345         struct ext4_free_extent ex;
2346         ext4_fsblk_t first_group_block;
2347         ext4_fsblk_t a;
2348         ext4_grpblk_t i;
2349         int max;
2350
2351         BUG_ON(sbi->s_stripe == 0);
2352
2353         /* find first stripe-aligned block in group */
2354         first_group_block = ext4_group_first_block_no(sb, e4b->bd_group);
2355
2356         a = first_group_block + sbi->s_stripe - 1;
2357         do_div(a, sbi->s_stripe);
2358         i = (a * sbi->s_stripe) - first_group_block;
2359
2360         while (i < EXT4_CLUSTERS_PER_GROUP(sb)) {
2361                 if (!mb_test_bit(i, bitmap)) {
2362                         max = mb_find_extent(e4b, i, sbi->s_stripe, &ex);
2363                         if (max >= sbi->s_stripe) {
2364                                 ac->ac_found++;
2365                                 ex.fe_logical = 0xDEADF00D; /* debug value */
2366                                 ac->ac_b_ex = ex;
2367                                 ext4_mb_use_best_found(ac, e4b);
2368                                 break;
2369                         }
2370                 }
2371                 i += sbi->s_stripe;
2372         }
2373 }
2374
2375 /*
2376  * This is also called BEFORE we load the buddy bitmap.
2377  * Returns either 1 or 0 indicating that the group is either suitable
2378  * for the allocation or not.
2379  */
2380 static bool ext4_mb_good_group(struct ext4_allocation_context *ac,
2381                                 ext4_group_t group, int cr)
2382 {
2383         ext4_grpblk_t free, fragments;
2384         int flex_size = ext4_flex_bg_size(EXT4_SB(ac->ac_sb));
2385         struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group);
2386
2387         BUG_ON(cr < 0 || cr >= 4);
2388
2389         if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(grp)))
2390                 return false;
2391
2392         free = grp->bb_free;
2393         if (free == 0)
2394                 return false;
2395
2396         fragments = grp->bb_fragments;
2397         if (fragments == 0)
2398                 return false;
2399
2400         switch (cr) {
2401         case 0:
2402                 BUG_ON(ac->ac_2order == 0);
2403
2404                 /* Avoid using the first bg of a flexgroup for data files */
2405                 if ((ac->ac_flags & EXT4_MB_HINT_DATA) &&
2406                     (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) &&
2407                     ((group % flex_size) == 0))
2408                         return false;
2409
2410                 if (free < ac->ac_g_ex.fe_len)
2411                         return false;
2412
2413                 if (ac->ac_2order >= MB_NUM_ORDERS(ac->ac_sb))
2414                         return true;
2415
2416                 if (grp->bb_largest_free_order < ac->ac_2order)
2417                         return false;
2418
2419                 return true;
2420         case 1:
2421                 if ((free / fragments) >= ac->ac_g_ex.fe_len)
2422                         return true;
2423                 break;
2424         case 2:
2425                 if (free >= ac->ac_g_ex.fe_len)
2426                         return true;
2427                 break;
2428         case 3:
2429                 return true;
2430         default:
2431                 BUG();
2432         }
2433
2434         return false;
2435 }
2436
2437 /*
2438  * This could return negative error code if something goes wrong
2439  * during ext4_mb_init_group(). This should not be called with
2440  * ext4_lock_group() held.
2441  *
2442  * Note: because we are conditionally operating with the group lock in
2443  * the EXT4_MB_STRICT_CHECK case, we need to fake out sparse in this
2444  * function using __acquire and __release.  This means we need to be
2445  * super careful before messing with the error path handling via "goto
2446  * out"!
2447  */
2448 static int ext4_mb_good_group_nolock(struct ext4_allocation_context *ac,
2449                                      ext4_group_t group, int cr)
2450 {
2451         struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group);
2452         struct super_block *sb = ac->ac_sb;
2453         struct ext4_sb_info *sbi = EXT4_SB(sb);
2454         bool should_lock = ac->ac_flags & EXT4_MB_STRICT_CHECK;
2455         ext4_grpblk_t free;
2456         int ret = 0;
2457
2458         if (sbi->s_mb_stats)
2459                 atomic64_inc(&sbi->s_bal_cX_groups_considered[ac->ac_criteria]);
2460         if (should_lock) {
2461                 ext4_lock_group(sb, group);
2462                 __release(ext4_group_lock_ptr(sb, group));
2463         }
2464         free = grp->bb_free;
2465         if (free == 0)
2466                 goto out;
2467         if (cr <= 2 && free < ac->ac_g_ex.fe_len)
2468                 goto out;
2469         if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(grp)))
2470                 goto out;
2471         if (should_lock) {
2472                 __acquire(ext4_group_lock_ptr(sb, group));
2473                 ext4_unlock_group(sb, group);
2474         }
2475
2476         /* We only do this if the grp has never been initialized */
2477         if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
2478                 struct ext4_group_desc *gdp =
2479                         ext4_get_group_desc(sb, group, NULL);
2480                 int ret;
2481
2482                 /* cr=0/1 is a very optimistic search to find large
2483                  * good chunks almost for free.  If buddy data is not
2484                  * ready, then this optimization makes no sense.  But
2485                  * we never skip the first block group in a flex_bg,
2486                  * since this gets used for metadata block allocation,
2487                  * and we want to make sure we locate metadata blocks
2488                  * in the first block group in the flex_bg if possible.
2489                  */
2490                 if (cr < 2 &&
2491                     (!sbi->s_log_groups_per_flex ||
2492                      ((group & ((1 << sbi->s_log_groups_per_flex) - 1)) != 0)) &&
2493                     !(ext4_has_group_desc_csum(sb) &&
2494                       (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))))
2495                         return 0;
2496                 ret = ext4_mb_init_group(sb, group, GFP_NOFS);
2497                 if (ret)
2498                         return ret;
2499         }
2500
2501         if (should_lock) {
2502                 ext4_lock_group(sb, group);
2503                 __release(ext4_group_lock_ptr(sb, group));
2504         }
2505         ret = ext4_mb_good_group(ac, group, cr);
2506 out:
2507         if (should_lock) {
2508                 __acquire(ext4_group_lock_ptr(sb, group));
2509                 ext4_unlock_group(sb, group);
2510         }
2511         return ret;
2512 }
2513
2514 /*
2515  * Start prefetching @nr block bitmaps starting at @group.
2516  * Return the next group which needs to be prefetched.
2517  */
2518 ext4_group_t ext4_mb_prefetch(struct super_block *sb, ext4_group_t group,
2519                               unsigned int nr, int *cnt)
2520 {
2521         ext4_group_t ngroups = ext4_get_groups_count(sb);
2522         struct buffer_head *bh;
2523         struct blk_plug plug;
2524
2525         blk_start_plug(&plug);
2526         while (nr-- > 0) {
2527                 struct ext4_group_desc *gdp = ext4_get_group_desc(sb, group,
2528                                                                   NULL);
2529                 struct ext4_group_info *grp = ext4_get_group_info(sb, group);
2530
2531                 /*
2532                  * Prefetch block groups with free blocks; but don't
2533                  * bother if it is marked uninitialized on disk, since
2534                  * it won't require I/O to read.  Also only try to
2535                  * prefetch once, so we avoid getblk() call, which can
2536                  * be expensive.
2537                  */
2538                 if (!EXT4_MB_GRP_TEST_AND_SET_READ(grp) &&
2539                     EXT4_MB_GRP_NEED_INIT(grp) &&
2540                     ext4_free_group_clusters(sb, gdp) > 0 &&
2541                     !(ext4_has_group_desc_csum(sb) &&
2542                       (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)))) {
2543                         bh = ext4_read_block_bitmap_nowait(sb, group, true);
2544                         if (bh && !IS_ERR(bh)) {
2545                                 if (!buffer_uptodate(bh) && cnt)
2546                                         (*cnt)++;
2547                                 brelse(bh);
2548                         }
2549                 }
2550                 if (++group >= ngroups)
2551                         group = 0;
2552         }
2553         blk_finish_plug(&plug);
2554         return group;
2555 }
2556
2557 /*
2558  * Prefetching reads the block bitmap into the buffer cache; but we
2559  * need to make sure that the buddy bitmap in the page cache has been
2560  * initialized.  Note that ext4_mb_init_group() will block if the I/O
2561  * is not yet completed, or indeed if it was not initiated by
2562  * ext4_mb_prefetch did not start the I/O.
2563  *
2564  * TODO: We should actually kick off the buddy bitmap setup in a work
2565  * queue when the buffer I/O is completed, so that we don't block
2566  * waiting for the block allocation bitmap read to finish when
2567  * ext4_mb_prefetch_fini is called from ext4_mb_regular_allocator().
2568  */
2569 void ext4_mb_prefetch_fini(struct super_block *sb, ext4_group_t group,
2570                            unsigned int nr)
2571 {
2572         while (nr-- > 0) {
2573                 struct ext4_group_desc *gdp = ext4_get_group_desc(sb, group,
2574                                                                   NULL);
2575                 struct ext4_group_info *grp = ext4_get_group_info(sb, group);
2576
2577                 if (!group)
2578                         group = ext4_get_groups_count(sb);
2579                 group--;
2580                 grp = ext4_get_group_info(sb, group);
2581
2582                 if (EXT4_MB_GRP_NEED_INIT(grp) &&
2583                     ext4_free_group_clusters(sb, gdp) > 0 &&
2584                     !(ext4_has_group_desc_csum(sb) &&
2585                       (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)))) {
2586                         if (ext4_mb_init_group(sb, group, GFP_NOFS))
2587                                 break;
2588                 }
2589         }
2590 }
2591
2592 static noinline_for_stack int
2593 ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
2594 {
2595         ext4_group_t prefetch_grp = 0, ngroups, group, i;
2596         int cr = -1, new_cr;
2597         int err = 0, first_err = 0;
2598         unsigned int nr = 0, prefetch_ios = 0;
2599         struct ext4_sb_info *sbi;
2600         struct super_block *sb;
2601         struct ext4_buddy e4b;
2602         int lost;
2603
2604         sb = ac->ac_sb;
2605         sbi = EXT4_SB(sb);
2606         ngroups = ext4_get_groups_count(sb);
2607         /* non-extent files are limited to low blocks/groups */
2608         if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)))
2609                 ngroups = sbi->s_blockfile_groups;
2610
2611         BUG_ON(ac->ac_status == AC_STATUS_FOUND);
2612
2613         /* first, try the goal */
2614         err = ext4_mb_find_by_goal(ac, &e4b);
2615         if (err || ac->ac_status == AC_STATUS_FOUND)
2616                 goto out;
2617
2618         if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
2619                 goto out;
2620
2621         /*
2622          * ac->ac_2order is set only if the fe_len is a power of 2
2623          * if ac->ac_2order is set we also set criteria to 0 so that we
2624          * try exact allocation using buddy.
2625          */
2626         i = fls(ac->ac_g_ex.fe_len);
2627         ac->ac_2order = 0;
2628         /*
2629          * We search using buddy data only if the order of the request
2630          * is greater than equal to the sbi_s_mb_order2_reqs
2631          * You can tune it via /sys/fs/ext4/<partition>/mb_order2_req
2632          * We also support searching for power-of-two requests only for
2633          * requests upto maximum buddy size we have constructed.
2634          */
2635         if (i >= sbi->s_mb_order2_reqs && i <= MB_NUM_ORDERS(sb)) {
2636                 /*
2637                  * This should tell if fe_len is exactly power of 2
2638                  */
2639                 if ((ac->ac_g_ex.fe_len & (~(1 << (i - 1)))) == 0)
2640                         ac->ac_2order = array_index_nospec(i - 1,
2641                                                            MB_NUM_ORDERS(sb));
2642         }
2643
2644         /* if stream allocation is enabled, use global goal */
2645         if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) {
2646                 /* TBD: may be hot point */
2647                 spin_lock(&sbi->s_md_lock);
2648                 ac->ac_g_ex.fe_group = sbi->s_mb_last_group;
2649                 ac->ac_g_ex.fe_start = sbi->s_mb_last_start;
2650                 spin_unlock(&sbi->s_md_lock);
2651         }
2652
2653         /* Let's just scan groups to find more-less suitable blocks */
2654         cr = ac->ac_2order ? 0 : 1;
2655         /*
2656          * cr == 0 try to get exact allocation,
2657          * cr == 3  try to get anything
2658          */
2659 repeat:
2660         for (; cr < 4 && ac->ac_status == AC_STATUS_CONTINUE; cr++) {
2661                 ac->ac_criteria = cr;
2662                 /*
2663                  * searching for the right group start
2664                  * from the goal value specified
2665                  */
2666                 group = ac->ac_g_ex.fe_group;
2667                 ac->ac_groups_linear_remaining = sbi->s_mb_max_linear_groups;
2668                 prefetch_grp = group;
2669
2670                 for (i = 0, new_cr = cr; i < ngroups; i++,
2671                      ext4_mb_choose_next_group(ac, &new_cr, &group, ngroups)) {
2672                         int ret = 0;
2673
2674                         cond_resched();
2675                         if (new_cr != cr) {
2676                                 cr = new_cr;
2677                                 goto repeat;
2678                         }
2679
2680                         /*
2681                          * Batch reads of the block allocation bitmaps
2682                          * to get multiple READs in flight; limit
2683                          * prefetching at cr=0/1, otherwise mballoc can
2684                          * spend a lot of time loading imperfect groups
2685                          */
2686                         if ((prefetch_grp == group) &&
2687                             (cr > 1 ||
2688                              prefetch_ios < sbi->s_mb_prefetch_limit)) {
2689                                 unsigned int curr_ios = prefetch_ios;
2690
2691                                 nr = sbi->s_mb_prefetch;
2692                                 if (ext4_has_feature_flex_bg(sb)) {
2693                                         nr = 1 << sbi->s_log_groups_per_flex;
2694                                         nr -= group & (nr - 1);
2695                                         nr = min(nr, sbi->s_mb_prefetch);
2696                                 }
2697                                 prefetch_grp = ext4_mb_prefetch(sb, group,
2698                                                         nr, &prefetch_ios);
2699                                 if (prefetch_ios == curr_ios)
2700                                         nr = 0;
2701                         }
2702
2703                         /* This now checks without needing the buddy page */
2704                         ret = ext4_mb_good_group_nolock(ac, group, cr);
2705                         if (ret <= 0) {
2706                                 if (!first_err)
2707                                         first_err = ret;
2708                                 continue;
2709                         }
2710
2711                         err = ext4_mb_load_buddy(sb, group, &e4b);
2712                         if (err)
2713                                 goto out;
2714
2715                         ext4_lock_group(sb, group);
2716
2717                         /*
2718                          * We need to check again after locking the
2719                          * block group
2720                          */
2721                         ret = ext4_mb_good_group(ac, group, cr);
2722                         if (ret == 0) {
2723                                 ext4_unlock_group(sb, group);
2724                                 ext4_mb_unload_buddy(&e4b);
2725                                 continue;
2726                         }
2727
2728                         ac->ac_groups_scanned++;
2729                         if (cr == 0)
2730                                 ext4_mb_simple_scan_group(ac, &e4b);
2731                         else if (cr == 1 && sbi->s_stripe &&
2732                                         !(ac->ac_g_ex.fe_len % sbi->s_stripe))
2733                                 ext4_mb_scan_aligned(ac, &e4b);
2734                         else
2735                                 ext4_mb_complex_scan_group(ac, &e4b);
2736
2737                         ext4_unlock_group(sb, group);
2738                         ext4_mb_unload_buddy(&e4b);
2739
2740                         if (ac->ac_status != AC_STATUS_CONTINUE)
2741                                 break;
2742                 }
2743                 /* Processed all groups and haven't found blocks */
2744                 if (sbi->s_mb_stats && i == ngroups)
2745                         atomic64_inc(&sbi->s_bal_cX_failed[cr]);
2746         }
2747
2748         if (ac->ac_b_ex.fe_len > 0 && ac->ac_status != AC_STATUS_FOUND &&
2749             !(ac->ac_flags & EXT4_MB_HINT_FIRST)) {
2750                 /*
2751                  * We've been searching too long. Let's try to allocate
2752                  * the best chunk we've found so far
2753                  */
2754                 ext4_mb_try_best_found(ac, &e4b);
2755                 if (ac->ac_status != AC_STATUS_FOUND) {
2756                         /*
2757                          * Someone more lucky has already allocated it.
2758                          * The only thing we can do is just take first
2759                          * found block(s)
2760                          */
2761                         lost = atomic_inc_return(&sbi->s_mb_lost_chunks);
2762                         mb_debug(sb, "lost chunk, group: %u, start: %d, len: %d, lost: %d\n",
2763                                  ac->ac_b_ex.fe_group, ac->ac_b_ex.fe_start,
2764                                  ac->ac_b_ex.fe_len, lost);
2765
2766                         ac->ac_b_ex.fe_group = 0;
2767                         ac->ac_b_ex.fe_start = 0;
2768                         ac->ac_b_ex.fe_len = 0;
2769                         ac->ac_status = AC_STATUS_CONTINUE;
2770                         ac->ac_flags |= EXT4_MB_HINT_FIRST;
2771                         cr = 3;
2772                         goto repeat;
2773                 }
2774         }
2775
2776         if (sbi->s_mb_stats && ac->ac_status == AC_STATUS_FOUND)
2777                 atomic64_inc(&sbi->s_bal_cX_hits[ac->ac_criteria]);
2778 out:
2779         if (!err && ac->ac_status != AC_STATUS_FOUND && first_err)
2780                 err = first_err;
2781
2782         mb_debug(sb, "Best len %d, origin len %d, ac_status %u, ac_flags 0x%x, cr %d ret %d\n",
2783                  ac->ac_b_ex.fe_len, ac->ac_o_ex.fe_len, ac->ac_status,
2784                  ac->ac_flags, cr, err);
2785
2786         if (nr)
2787                 ext4_mb_prefetch_fini(sb, prefetch_grp, nr);
2788
2789         return err;
2790 }
2791
2792 static void *ext4_mb_seq_groups_start(struct seq_file *seq, loff_t *pos)
2793 {
2794         struct super_block *sb = pde_data(file_inode(seq->file));
2795         ext4_group_t group;
2796
2797         if (*pos < 0 || *pos >= ext4_get_groups_count(sb))
2798                 return NULL;
2799         group = *pos + 1;
2800         return (void *) ((unsigned long) group);
2801 }
2802
2803 static void *ext4_mb_seq_groups_next(struct seq_file *seq, void *v, loff_t *pos)
2804 {
2805         struct super_block *sb = pde_data(file_inode(seq->file));
2806         ext4_group_t group;
2807
2808         ++*pos;
2809         if (*pos < 0 || *pos >= ext4_get_groups_count(sb))
2810                 return NULL;
2811         group = *pos + 1;
2812         return (void *) ((unsigned long) group);
2813 }
2814
2815 static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
2816 {
2817         struct super_block *sb = pde_data(file_inode(seq->file));
2818         ext4_group_t group = (ext4_group_t) ((unsigned long) v);
2819         int i;
2820         int err, buddy_loaded = 0;
2821         struct ext4_buddy e4b;
2822         struct ext4_group_info *grinfo;
2823         unsigned char blocksize_bits = min_t(unsigned char,
2824                                              sb->s_blocksize_bits,
2825                                              EXT4_MAX_BLOCK_LOG_SIZE);
2826         struct sg {
2827                 struct ext4_group_info info;
2828                 ext4_grpblk_t counters[EXT4_MAX_BLOCK_LOG_SIZE + 2];
2829         } sg;
2830
2831         group--;
2832         if (group == 0)
2833                 seq_puts(seq, "#group: free  frags first ["
2834                               " 2^0   2^1   2^2   2^3   2^4   2^5   2^6  "
2835                               " 2^7   2^8   2^9   2^10  2^11  2^12  2^13  ]\n");
2836
2837         i = (blocksize_bits + 2) * sizeof(sg.info.bb_counters[0]) +
2838                 sizeof(struct ext4_group_info);
2839
2840         grinfo = ext4_get_group_info(sb, group);
2841         /* Load the group info in memory only if not already loaded. */
2842         if (unlikely(EXT4_MB_GRP_NEED_INIT(grinfo))) {
2843                 err = ext4_mb_load_buddy(sb, group, &e4b);
2844                 if (err) {
2845                         seq_printf(seq, "#%-5u: I/O error\n", group);
2846                         return 0;
2847                 }
2848                 buddy_loaded = 1;
2849         }
2850
2851         memcpy(&sg, ext4_get_group_info(sb, group), i);
2852
2853         if (buddy_loaded)
2854                 ext4_mb_unload_buddy(&e4b);
2855
2856         seq_printf(seq, "#%-5u: %-5u %-5u %-5u [", group, sg.info.bb_free,
2857                         sg.info.bb_fragments, sg.info.bb_first_free);
2858         for (i = 0; i <= 13; i++)
2859                 seq_printf(seq, " %-5u", i <= blocksize_bits + 1 ?
2860                                 sg.info.bb_counters[i] : 0);
2861         seq_puts(seq, " ]\n");
2862
2863         return 0;
2864 }
2865
2866 static void ext4_mb_seq_groups_stop(struct seq_file *seq, void *v)
2867 {
2868 }
2869
2870 const struct seq_operations ext4_mb_seq_groups_ops = {
2871         .start  = ext4_mb_seq_groups_start,
2872         .next   = ext4_mb_seq_groups_next,
2873         .stop   = ext4_mb_seq_groups_stop,
2874         .show   = ext4_mb_seq_groups_show,
2875 };
2876
2877 int ext4_seq_mb_stats_show(struct seq_file *seq, void *offset)
2878 {
2879         struct super_block *sb = seq->private;
2880         struct ext4_sb_info *sbi = EXT4_SB(sb);
2881
2882         seq_puts(seq, "mballoc:\n");
2883         if (!sbi->s_mb_stats) {
2884                 seq_puts(seq, "\tmb stats collection turned off.\n");
2885                 seq_puts(seq, "\tTo enable, please write \"1\" to sysfs file mb_stats.\n");
2886                 return 0;
2887         }
2888         seq_printf(seq, "\treqs: %u\n", atomic_read(&sbi->s_bal_reqs));
2889         seq_printf(seq, "\tsuccess: %u\n", atomic_read(&sbi->s_bal_success));
2890
2891         seq_printf(seq, "\tgroups_scanned: %u\n",  atomic_read(&sbi->s_bal_groups_scanned));
2892
2893         seq_puts(seq, "\tcr0_stats:\n");
2894         seq_printf(seq, "\t\thits: %llu\n", atomic64_read(&sbi->s_bal_cX_hits[0]));
2895         seq_printf(seq, "\t\tgroups_considered: %llu\n",
2896                    atomic64_read(&sbi->s_bal_cX_groups_considered[0]));
2897         seq_printf(seq, "\t\tuseless_loops: %llu\n",
2898                    atomic64_read(&sbi->s_bal_cX_failed[0]));
2899         seq_printf(seq, "\t\tbad_suggestions: %u\n",
2900                    atomic_read(&sbi->s_bal_cr0_bad_suggestions));
2901
2902         seq_puts(seq, "\tcr1_stats:\n");
2903         seq_printf(seq, "\t\thits: %llu\n", atomic64_read(&sbi->s_bal_cX_hits[1]));
2904         seq_printf(seq, "\t\tgroups_considered: %llu\n",
2905                    atomic64_read(&sbi->s_bal_cX_groups_considered[1]));
2906         seq_printf(seq, "\t\tuseless_loops: %llu\n",
2907                    atomic64_read(&sbi->s_bal_cX_failed[1]));
2908         seq_printf(seq, "\t\tbad_suggestions: %u\n",
2909                    atomic_read(&sbi->s_bal_cr1_bad_suggestions));
2910
2911         seq_puts(seq, "\tcr2_stats:\n");
2912         seq_printf(seq, "\t\thits: %llu\n", atomic64_read(&sbi->s_bal_cX_hits[2]));
2913         seq_printf(seq, "\t\tgroups_considered: %llu\n",
2914                    atomic64_read(&sbi->s_bal_cX_groups_considered[2]));
2915         seq_printf(seq, "\t\tuseless_loops: %llu\n",
2916                    atomic64_read(&sbi->s_bal_cX_failed[2]));
2917
2918         seq_puts(seq, "\tcr3_stats:\n");
2919         seq_printf(seq, "\t\thits: %llu\n", atomic64_read(&sbi->s_bal_cX_hits[3]));
2920         seq_printf(seq, "\t\tgroups_considered: %llu\n",
2921                    atomic64_read(&sbi->s_bal_cX_groups_considered[3]));
2922         seq_printf(seq, "\t\tuseless_loops: %llu\n",
2923                    atomic64_read(&sbi->s_bal_cX_failed[3]));
2924         seq_printf(seq, "\textents_scanned: %u\n", atomic_read(&sbi->s_bal_ex_scanned));
2925         seq_printf(seq, "\t\tgoal_hits: %u\n", atomic_read(&sbi->s_bal_goals));
2926         seq_printf(seq, "\t\t2^n_hits: %u\n", atomic_read(&sbi->s_bal_2orders));
2927         seq_printf(seq, "\t\tbreaks: %u\n", atomic_read(&sbi->s_bal_breaks));
2928         seq_printf(seq, "\t\tlost: %u\n", atomic_read(&sbi->s_mb_lost_chunks));
2929
2930         seq_printf(seq, "\tbuddies_generated: %u/%u\n",
2931                    atomic_read(&sbi->s_mb_buddies_generated),
2932                    ext4_get_groups_count(sb));
2933         seq_printf(seq, "\tbuddies_time_used: %llu\n",
2934                    atomic64_read(&sbi->s_mb_generation_time));
2935         seq_printf(seq, "\tpreallocated: %u\n",
2936                    atomic_read(&sbi->s_mb_preallocated));
2937         seq_printf(seq, "\tdiscarded: %u\n",
2938                    atomic_read(&sbi->s_mb_discarded));
2939         return 0;
2940 }
2941
2942 static void *ext4_mb_seq_structs_summary_start(struct seq_file *seq, loff_t *pos)
2943 __acquires(&EXT4_SB(sb)->s_mb_rb_lock)
2944 {
2945         struct super_block *sb = pde_data(file_inode(seq->file));
2946         unsigned long position;
2947
2948         if (*pos < 0 || *pos >= 2*MB_NUM_ORDERS(sb))
2949                 return NULL;
2950         position = *pos + 1;
2951         return (void *) ((unsigned long) position);
2952 }
2953
2954 static void *ext4_mb_seq_structs_summary_next(struct seq_file *seq, void *v, loff_t *pos)
2955 {
2956         struct super_block *sb = pde_data(file_inode(seq->file));
2957         unsigned long position;
2958
2959         ++*pos;
2960         if (*pos < 0 || *pos >= 2*MB_NUM_ORDERS(sb))
2961                 return NULL;
2962         position = *pos + 1;
2963         return (void *) ((unsigned long) position);
2964 }
2965
2966 static int ext4_mb_seq_structs_summary_show(struct seq_file *seq, void *v)
2967 {
2968         struct super_block *sb = pde_data(file_inode(seq->file));
2969         struct ext4_sb_info *sbi = EXT4_SB(sb);
2970         unsigned long position = ((unsigned long) v);
2971         struct ext4_group_info *grp;
2972         unsigned int count;
2973
2974         position--;
2975         if (position >= MB_NUM_ORDERS(sb)) {
2976                 position -= MB_NUM_ORDERS(sb);
2977                 if (position == 0)
2978                         seq_puts(seq, "avg_fragment_size_lists:\n");
2979
2980                 count = 0;
2981                 read_lock(&sbi->s_mb_avg_fragment_size_locks[position]);
2982                 list_for_each_entry(grp, &sbi->s_mb_avg_fragment_size[position],
2983                                     bb_avg_fragment_size_node)
2984                         count++;
2985                 read_unlock(&sbi->s_mb_avg_fragment_size_locks[position]);
2986                 seq_printf(seq, "\tlist_order_%u_groups: %u\n",
2987                                         (unsigned int)position, count);
2988                 return 0;
2989         }
2990
2991         if (position == 0) {
2992                 seq_printf(seq, "optimize_scan: %d\n",
2993                            test_opt2(sb, MB_OPTIMIZE_SCAN) ? 1 : 0);
2994                 seq_puts(seq, "max_free_order_lists:\n");
2995         }
2996         count = 0;
2997         read_lock(&sbi->s_mb_largest_free_orders_locks[position]);
2998         list_for_each_entry(grp, &sbi->s_mb_largest_free_orders[position],
2999                             bb_largest_free_order_node)
3000                 count++;
3001         read_unlock(&sbi->s_mb_largest_free_orders_locks[position]);
3002         seq_printf(seq, "\tlist_order_%u_groups: %u\n",
3003                    (unsigned int)position, count);
3004
3005         return 0;
3006 }
3007
3008 static void ext4_mb_seq_structs_summary_stop(struct seq_file *seq, void *v)
3009 {
3010 }
3011
3012 const struct seq_operations ext4_mb_seq_structs_summary_ops = {
3013         .start  = ext4_mb_seq_structs_summary_start,
3014         .next   = ext4_mb_seq_structs_summary_next,
3015         .stop   = ext4_mb_seq_structs_summary_stop,
3016         .show   = ext4_mb_seq_structs_summary_show,
3017 };
3018
3019 static struct kmem_cache *get_groupinfo_cache(int blocksize_bits)
3020 {
3021         int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE;
3022         struct kmem_cache *cachep = ext4_groupinfo_caches[cache_index];
3023
3024         BUG_ON(!cachep);
3025         return cachep;
3026 }
3027
3028 /*
3029  * Allocate the top-level s_group_info array for the specified number
3030  * of groups
3031  */
3032 int ext4_mb_alloc_groupinfo(struct super_block *sb, ext4_group_t ngroups)
3033 {
3034         struct ext4_sb_info *sbi = EXT4_SB(sb);
3035         unsigned size;
3036         struct ext4_group_info ***old_groupinfo, ***new_groupinfo;
3037
3038         size = (ngroups + EXT4_DESC_PER_BLOCK(sb) - 1) >>
3039                 EXT4_DESC_PER_BLOCK_BITS(sb);
3040         if (size <= sbi->s_group_info_size)
3041                 return 0;
3042
3043         size = roundup_pow_of_two(sizeof(*sbi->s_group_info) * size);
3044         new_groupinfo = kvzalloc(size, GFP_KERNEL);
3045         if (!new_groupinfo) {
3046                 ext4_msg(sb, KERN_ERR, "can't allocate buddy meta group");
3047                 return -ENOMEM;
3048         }
3049         rcu_read_lock();
3050         old_groupinfo = rcu_dereference(sbi->s_group_info);
3051         if (old_groupinfo)
3052                 memcpy(new_groupinfo, old_groupinfo,
3053                        sbi->s_group_info_size * sizeof(*sbi->s_group_info));
3054         rcu_read_unlock();
3055         rcu_assign_pointer(sbi->s_group_info, new_groupinfo);
3056         sbi->s_group_info_size = size / sizeof(*sbi->s_group_info);
3057         if (old_groupinfo)
3058                 ext4_kvfree_array_rcu(old_groupinfo);
3059         ext4_debug("allocated s_groupinfo array for %d meta_bg's\n",
3060                    sbi->s_group_info_size);
3061         return 0;
3062 }
3063
3064 /* Create and initialize ext4_group_info data for the given group. */
3065 int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
3066                           struct ext4_group_desc *desc)
3067 {
3068         int i;
3069         int metalen = 0;
3070         int idx = group >> EXT4_DESC_PER_BLOCK_BITS(sb);
3071         struct ext4_sb_info *sbi = EXT4_SB(sb);
3072         struct ext4_group_info **meta_group_info;
3073         struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits);
3074
3075         /*
3076          * First check if this group is the first of a reserved block.
3077          * If it's true, we have to allocate a new table of pointers
3078          * to ext4_group_info structures
3079          */
3080         if (group % EXT4_DESC_PER_BLOCK(sb) == 0) {
3081                 metalen = sizeof(*meta_group_info) <<
3082                         EXT4_DESC_PER_BLOCK_BITS(sb);
3083                 meta_group_info = kmalloc(metalen, GFP_NOFS);
3084                 if (meta_group_info == NULL) {
3085                         ext4_msg(sb, KERN_ERR, "can't allocate mem "
3086                                  "for a buddy group");
3087                         goto exit_meta_group_info;
3088                 }
3089                 rcu_read_lock();
3090                 rcu_dereference(sbi->s_group_info)[idx] = meta_group_info;
3091                 rcu_read_unlock();
3092         }
3093
3094         meta_group_info = sbi_array_rcu_deref(sbi, s_group_info, idx);
3095         i = group & (EXT4_DESC_PER_BLOCK(sb) - 1);
3096
3097         meta_group_info[i] = kmem_cache_zalloc(cachep, GFP_NOFS);
3098         if (meta_group_info[i] == NULL) {
3099                 ext4_msg(sb, KERN_ERR, "can't allocate buddy mem");
3100                 goto exit_group_info;
3101         }
3102         set_bit(EXT4_GROUP_INFO_NEED_INIT_BIT,
3103                 &(meta_group_info[i]->bb_state));
3104
3105         /*
3106          * initialize bb_free to be able to skip
3107          * empty groups without initialization
3108          */
3109         if (ext4_has_group_desc_csum(sb) &&
3110             (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) {
3111                 meta_group_info[i]->bb_free =
3112                         ext4_free_clusters_after_init(sb, group, desc);
3113         } else {
3114                 meta_group_info[i]->bb_free =
3115                         ext4_free_group_clusters(sb, desc);
3116         }
3117
3118         INIT_LIST_HEAD(&meta_group_info[i]->bb_prealloc_list);
3119         init_rwsem(&meta_group_info[i]->alloc_sem);
3120         meta_group_info[i]->bb_free_root = RB_ROOT;
3121         INIT_LIST_HEAD(&meta_group_info[i]->bb_largest_free_order_node);
3122         INIT_LIST_HEAD(&meta_group_info[i]->bb_avg_fragment_size_node);
3123         meta_group_info[i]->bb_largest_free_order = -1;  /* uninit */
3124         meta_group_info[i]->bb_avg_fragment_size_order = -1;  /* uninit */
3125         meta_group_info[i]->bb_group = group;
3126
3127         mb_group_bb_bitmap_alloc(sb, meta_group_info[i], group);
3128         return 0;
3129
3130 exit_group_info:
3131         /* If a meta_group_info table has been allocated, release it now */
3132         if (group % EXT4_DESC_PER_BLOCK(sb) == 0) {
3133                 struct ext4_group_info ***group_info;
3134
3135                 rcu_read_lock();
3136                 group_info = rcu_dereference(sbi->s_group_info);
3137                 kfree(group_info[idx]);
3138                 group_info[idx] = NULL;
3139                 rcu_read_unlock();
3140         }
3141 exit_meta_group_info:
3142         return -ENOMEM;
3143 } /* ext4_mb_add_groupinfo */
3144
3145 static int ext4_mb_init_backend(struct super_block *sb)
3146 {
3147         ext4_group_t ngroups = ext4_get_groups_count(sb);
3148         ext4_group_t i;
3149         struct ext4_sb_info *sbi = EXT4_SB(sb);
3150         int err;
3151         struct ext4_group_desc *desc;
3152         struct ext4_group_info ***group_info;
3153         struct kmem_cache *cachep;
3154
3155         err = ext4_mb_alloc_groupinfo(sb, ngroups);
3156         if (err)
3157                 return err;
3158
3159         sbi->s_buddy_cache = new_inode(sb);
3160         if (sbi->s_buddy_cache == NULL) {
3161                 ext4_msg(sb, KERN_ERR, "can't get new inode");
3162                 goto err_freesgi;
3163         }
3164         /* To avoid potentially colliding with an valid on-disk inode number,
3165          * use EXT4_BAD_INO for the buddy cache inode number.  This inode is
3166          * not in the inode hash, so it should never be found by iget(), but
3167          * this will avoid confusion if it ever shows up during debugging. */
3168         sbi->s_buddy_cache->i_ino = EXT4_BAD_INO;
3169         EXT4_I(sbi->s_buddy_cache)->i_disksize = 0;
3170         for (i = 0; i < ngroups; i++) {
3171                 cond_resched();
3172                 desc = ext4_get_group_desc(sb, i, NULL);
3173                 if (desc == NULL) {
3174                         ext4_msg(sb, KERN_ERR, "can't read descriptor %u", i);
3175                         goto err_freebuddy;
3176                 }
3177                 if (ext4_mb_add_groupinfo(sb, i, desc) != 0)
3178                         goto err_freebuddy;
3179         }
3180
3181         if (ext4_has_feature_flex_bg(sb)) {
3182                 /* a single flex group is supposed to be read by a single IO.
3183                  * 2 ^ s_log_groups_per_flex != UINT_MAX as s_mb_prefetch is
3184                  * unsigned integer, so the maximum shift is 32.
3185                  */
3186                 if (sbi->s_es->s_log_groups_per_flex >= 32) {
3187                         ext4_msg(sb, KERN_ERR, "too many log groups per flexible block group");
3188                         goto err_freebuddy;
3189                 }
3190                 sbi->s_mb_prefetch = min_t(uint, 1 << sbi->s_es->s_log_groups_per_flex,
3191                         BLK_MAX_SEGMENT_SIZE >> (sb->s_blocksize_bits - 9));
3192                 sbi->s_mb_prefetch *= 8; /* 8 prefetch IOs in flight at most */
3193         } else {
3194                 sbi->s_mb_prefetch = 32;
3195         }
3196         if (sbi->s_mb_prefetch > ext4_get_groups_count(sb))
3197                 sbi->s_mb_prefetch = ext4_get_groups_count(sb);
3198         /* now many real IOs to prefetch within a single allocation at cr=0
3199          * given cr=0 is an CPU-related optimization we shouldn't try to
3200          * load too many groups, at some point we should start to use what
3201          * we've got in memory.
3202          * with an average random access time 5ms, it'd take a second to get
3203          * 200 groups (* N with flex_bg), so let's make this limit 4
3204          */
3205         sbi->s_mb_prefetch_limit = sbi->s_mb_prefetch * 4;
3206         if (sbi->s_mb_prefetch_limit > ext4_get_groups_count(sb))
3207                 sbi->s_mb_prefetch_limit = ext4_get_groups_count(sb);
3208
3209         return 0;
3210
3211 err_freebuddy:
3212         cachep = get_groupinfo_cache(sb->s_blocksize_bits);
3213         while (i-- > 0)
3214                 kmem_cache_free(cachep, ext4_get_group_info(sb, i));
3215         i = sbi->s_group_info_size;
3216         rcu_read_lock();
3217         group_info = rcu_dereference(sbi->s_group_info);
3218         while (i-- > 0)
3219                 kfree(group_info[i]);
3220         rcu_read_unlock();
3221         iput(sbi->s_buddy_cache);
3222 err_freesgi:
3223         rcu_read_lock();
3224         kvfree(rcu_dereference(sbi->s_group_info));
3225         rcu_read_unlock();
3226         return -ENOMEM;
3227 }
3228
3229 static void ext4_groupinfo_destroy_slabs(void)
3230 {
3231         int i;
3232
3233         for (i = 0; i < NR_GRPINFO_CACHES; i++) {
3234                 kmem_cache_destroy(ext4_groupinfo_caches[i]);
3235                 ext4_groupinfo_caches[i] = NULL;
3236         }
3237 }
3238
3239 static int ext4_groupinfo_create_slab(size_t size)
3240 {
3241         static DEFINE_MUTEX(ext4_grpinfo_slab_create_mutex);
3242         int slab_size;
3243         int blocksize_bits = order_base_2(size);
3244         int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE;
3245         struct kmem_cache *cachep;
3246
3247         if (cache_index >= NR_GRPINFO_CACHES)
3248                 return -EINVAL;
3249
3250         if (unlikely(cache_index < 0))
3251                 cache_index = 0;
3252
3253         mutex_lock(&ext4_grpinfo_slab_create_mutex);
3254         if (ext4_groupinfo_caches[cache_index]) {
3255                 mutex_unlock(&ext4_grpinfo_slab_create_mutex);
3256                 return 0;       /* Already created */
3257         }
3258
3259         slab_size = offsetof(struct ext4_group_info,
3260                                 bb_counters[blocksize_bits + 2]);
3261
3262         cachep = kmem_cache_create(ext4_groupinfo_slab_names[cache_index],
3263                                         slab_size, 0, SLAB_RECLAIM_ACCOUNT,
3264                                         NULL);
3265
3266         ext4_groupinfo_caches[cache_index] = cachep;
3267
3268         mutex_unlock(&ext4_grpinfo_slab_create_mutex);
3269         if (!cachep) {
3270                 printk(KERN_EMERG
3271                        "EXT4-fs: no memory for groupinfo slab cache\n");
3272                 return -ENOMEM;
3273         }
3274
3275         return 0;
3276 }
3277
3278 static void ext4_discard_work(struct work_struct *work)
3279 {
3280         struct ext4_sb_info *sbi = container_of(work,
3281                         struct ext4_sb_info, s_discard_work);
3282         struct super_block *sb = sbi->s_sb;
3283         struct ext4_free_data *fd, *nfd;
3284         struct ext4_buddy e4b;
3285         struct list_head discard_list;
3286         ext4_group_t grp, load_grp;
3287         int err = 0;
3288
3289         INIT_LIST_HEAD(&discard_list);
3290         spin_lock(&sbi->s_md_lock);
3291         list_splice_init(&sbi->s_discard_list, &discard_list);
3292         spin_unlock(&sbi->s_md_lock);
3293
3294         load_grp = UINT_MAX;
3295         list_for_each_entry_safe(fd, nfd, &discard_list, efd_list) {
3296                 /*
3297                  * If filesystem is umounting or no memory or suffering
3298                  * from no space, give up the discard
3299                  */
3300                 if ((sb->s_flags & SB_ACTIVE) && !err &&
3301                     !atomic_read(&sbi->s_retry_alloc_pending)) {
3302                         grp = fd->efd_group;
3303                         if (grp != load_grp) {
3304                                 if (load_grp != UINT_MAX)
3305                                         ext4_mb_unload_buddy(&e4b);
3306
3307                                 err = ext4_mb_load_buddy(sb, grp, &e4b);
3308                                 if (err) {
3309                                         kmem_cache_free(ext4_free_data_cachep, fd);
3310                                         load_grp = UINT_MAX;
3311                                         continue;
3312                                 } else {
3313                                         load_grp = grp;
3314                                 }
3315                         }
3316
3317                         ext4_lock_group(sb, grp);
3318                         ext4_try_to_trim_range(sb, &e4b, fd->efd_start_cluster,
3319                                                 fd->efd_start_cluster + fd->efd_count - 1, 1);
3320                         ext4_unlock_group(sb, grp);
3321                 }
3322                 kmem_cache_free(ext4_free_data_cachep, fd);
3323         }
3324
3325         if (load_grp != UINT_MAX)
3326                 ext4_mb_unload_buddy(&e4b);
3327 }
3328
3329 int ext4_mb_init(struct super_block *sb)
3330 {
3331         struct ext4_sb_info *sbi = EXT4_SB(sb);
3332         unsigned i, j;
3333         unsigned offset, offset_incr;
3334         unsigned max;
3335         int ret;
3336
3337         i = MB_NUM_ORDERS(sb) * sizeof(*sbi->s_mb_offsets);
3338
3339         sbi->s_mb_offsets = kmalloc(i, GFP_KERNEL);
3340         if (sbi->s_mb_offsets == NULL) {
3341                 ret = -ENOMEM;
3342                 goto out;
3343         }
3344
3345         i = MB_NUM_ORDERS(sb) * sizeof(*sbi->s_mb_maxs);
3346         sbi->s_mb_maxs = kmalloc(i, GFP_KERNEL);
3347         if (sbi->s_mb_maxs == NULL) {
3348                 ret = -ENOMEM;
3349                 goto out;
3350         }
3351
3352         ret = ext4_groupinfo_create_slab(sb->s_blocksize);
3353         if (ret < 0)
3354                 goto out;
3355
3356         /* order 0 is regular bitmap */
3357         sbi->s_mb_maxs[0] = sb->s_blocksize << 3;
3358         sbi->s_mb_offsets[0] = 0;
3359
3360         i = 1;
3361         offset = 0;
3362         offset_incr = 1 << (sb->s_blocksize_bits - 1);
3363         max = sb->s_blocksize << 2;
3364         do {
3365                 sbi->s_mb_offsets[i] = offset;
3366                 sbi->s_mb_maxs[i] = max;
3367                 offset += offset_incr;
3368                 offset_incr = offset_incr >> 1;
3369                 max = max >> 1;
3370                 i++;
3371         } while (i < MB_NUM_ORDERS(sb));
3372
3373         sbi->s_mb_avg_fragment_size =
3374                 kmalloc_array(MB_NUM_ORDERS(sb), sizeof(struct list_head),
3375                         GFP_KERNEL);
3376         if (!sbi->s_mb_avg_fragment_size) {
3377                 ret = -ENOMEM;
3378                 goto out;
3379         }
3380         sbi->s_mb_avg_fragment_size_locks =
3381                 kmalloc_array(MB_NUM_ORDERS(sb), sizeof(rwlock_t),
3382                         GFP_KERNEL);
3383         if (!sbi->s_mb_avg_fragment_size_locks) {
3384                 ret = -ENOMEM;
3385                 goto out;
3386         }
3387         for (i = 0; i < MB_NUM_ORDERS(sb); i++) {
3388                 INIT_LIST_HEAD(&sbi->s_mb_avg_fragment_size[i]);
3389                 rwlock_init(&sbi->s_mb_avg_fragment_size_locks[i]);
3390         }
3391         sbi->s_mb_largest_free_orders =
3392                 kmalloc_array(MB_NUM_ORDERS(sb), sizeof(struct list_head),
3393                         GFP_KERNEL);
3394         if (!sbi->s_mb_largest_free_orders) {
3395                 ret = -ENOMEM;
3396                 goto out;
3397         }
3398         sbi->s_mb_largest_free_orders_locks =
3399                 kmalloc_array(MB_NUM_ORDERS(sb), sizeof(rwlock_t),
3400                         GFP_KERNEL);
3401         if (!sbi->s_mb_largest_free_orders_locks) {
3402                 ret = -ENOMEM;
3403                 goto out;
3404         }
3405         for (i = 0; i < MB_NUM_ORDERS(sb); i++) {
3406                 INIT_LIST_HEAD(&sbi->s_mb_largest_free_orders[i]);
3407                 rwlock_init(&sbi->s_mb_largest_free_orders_locks[i]);
3408         }
3409
3410         spin_lock_init(&sbi->s_md_lock);
3411         sbi->s_mb_free_pending = 0;
3412         INIT_LIST_HEAD(&sbi->s_freed_data_list);
3413         INIT_LIST_HEAD(&sbi->s_discard_list);
3414         INIT_WORK(&sbi->s_discard_work, ext4_discard_work);
3415         atomic_set(&sbi->s_retry_alloc_pending, 0);
3416
3417         sbi->s_mb_max_to_scan = MB_DEFAULT_MAX_TO_SCAN;
3418         sbi->s_mb_min_to_scan = MB_DEFAULT_MIN_TO_SCAN;
3419         sbi->s_mb_stats = MB_DEFAULT_STATS;
3420         sbi->s_mb_stream_request = MB_DEFAULT_STREAM_THRESHOLD;
3421         sbi->s_mb_order2_reqs = MB_DEFAULT_ORDER2_REQS;
3422         sbi->s_mb_max_inode_prealloc = MB_DEFAULT_MAX_INODE_PREALLOC;
3423         /*
3424          * The default group preallocation is 512, which for 4k block
3425          * sizes translates to 2 megabytes.  However for bigalloc file
3426          * systems, this is probably too big (i.e, if the cluster size
3427          * is 1 megabyte, then group preallocation size becomes half a
3428          * gigabyte!).  As a default, we will keep a two megabyte
3429          * group pralloc size for cluster sizes up to 64k, and after
3430          * that, we will force a minimum group preallocation size of
3431          * 32 clusters.  This translates to 8 megs when the cluster
3432          * size is 256k, and 32 megs when the cluster size is 1 meg,
3433          * which seems reasonable as a default.
3434          */
3435         sbi->s_mb_group_prealloc = max(MB_DEFAULT_GROUP_PREALLOC >>
3436                                        sbi->s_cluster_bits, 32);
3437         /*
3438          * If there is a s_stripe > 1, then we set the s_mb_group_prealloc
3439          * to the lowest multiple of s_stripe which is bigger than
3440          * the s_mb_group_prealloc as determined above. We want
3441          * the preallocation size to be an exact multiple of the
3442          * RAID stripe size so that preallocations don't fragment
3443          * the stripes.
3444          */
3445         if (sbi->s_stripe > 1) {
3446                 sbi->s_mb_group_prealloc = roundup(
3447                         sbi->s_mb_group_prealloc, sbi->s_stripe);
3448         }
3449
3450         sbi->s_locality_groups = alloc_percpu(struct ext4_locality_group);
3451         if (sbi->s_locality_groups == NULL) {
3452                 ret = -ENOMEM;
3453                 goto out;
3454         }
3455         for_each_possible_cpu(i) {
3456                 struct ext4_locality_group *lg;
3457                 lg = per_cpu_ptr(sbi->s_locality_groups, i);
3458                 mutex_init(&lg->lg_mutex);
3459                 for (j = 0; j < PREALLOC_TB_SIZE; j++)
3460                         INIT_LIST_HEAD(&lg->lg_prealloc_list[j]);
3461                 spin_lock_init(&lg->lg_prealloc_lock);
3462         }
3463
3464         if (bdev_nonrot(sb->s_bdev))
3465                 sbi->s_mb_max_linear_groups = 0;
3466         else
3467                 sbi->s_mb_max_linear_groups = MB_DEFAULT_LINEAR_LIMIT;
3468         /* init file for buddy data */
3469         ret = ext4_mb_init_backend(sb);
3470         if (ret != 0)
3471                 goto out_free_locality_groups;
3472
3473         return 0;
3474
3475 out_free_locality_groups:
3476         free_percpu(sbi->s_locality_groups);
3477         sbi->s_locality_groups = NULL;
3478 out:
3479         kfree(sbi->s_mb_avg_fragment_size);
3480         kfree(sbi->s_mb_avg_fragment_size_locks);
3481         kfree(sbi->s_mb_largest_free_orders);
3482         kfree(sbi->s_mb_largest_free_orders_locks);
3483         kfree(sbi->s_mb_offsets);
3484         sbi->s_mb_offsets = NULL;
3485         kfree(sbi->s_mb_maxs);
3486         sbi->s_mb_maxs = NULL;
3487         return ret;
3488 }
3489
3490 /* need to called with the ext4 group lock held */
3491 static int ext4_mb_cleanup_pa(struct ext4_group_info *grp)
3492 {
3493         struct ext4_prealloc_space *pa;
3494         struct list_head *cur, *tmp;
3495         int count = 0;
3496
3497         list_for_each_safe(cur, tmp, &grp->bb_prealloc_list) {
3498                 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
3499                 list_del(&pa->pa_group_list);
3500                 count++;
3501                 kmem_cache_free(ext4_pspace_cachep, pa);
3502         }
3503         return count;
3504 }
3505
3506 int ext4_mb_release(struct super_block *sb)
3507 {
3508         ext4_group_t ngroups = ext4_get_groups_count(sb);
3509         ext4_group_t i;
3510         int num_meta_group_infos;
3511         struct ext4_group_info *grinfo, ***group_info;
3512         struct ext4_sb_info *sbi = EXT4_SB(sb);
3513         struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits);
3514         int count;
3515
3516         if (test_opt(sb, DISCARD)) {
3517                 /*
3518                  * wait the discard work to drain all of ext4_free_data
3519                  */
3520                 flush_work(&sbi->s_discard_work);
3521                 WARN_ON_ONCE(!list_empty(&sbi->s_discard_list));
3522         }
3523
3524         if (sbi->s_group_info) {
3525                 for (i = 0; i < ngroups; i++) {
3526                         cond_resched();
3527                         grinfo = ext4_get_group_info(sb, i);
3528                         mb_group_bb_bitmap_free(grinfo);
3529                         ext4_lock_group(sb, i);
3530                         count = ext4_mb_cleanup_pa(grinfo);
3531                         if (count)
3532                                 mb_debug(sb, "mballoc: %d PAs left\n",
3533                                          count);
3534                         ext4_unlock_group(sb, i);
3535                         kmem_cache_free(cachep, grinfo);
3536                 }
3537                 num_meta_group_infos = (ngroups +
3538                                 EXT4_DESC_PER_BLOCK(sb) - 1) >>
3539                         EXT4_DESC_PER_BLOCK_BITS(sb);
3540                 rcu_read_lock();
3541                 group_info = rcu_dereference(sbi->s_group_info);
3542                 for (i = 0; i < num_meta_group_infos; i++)
3543                         kfree(group_info[i]);
3544                 kvfree(group_info);
3545                 rcu_read_unlock();
3546         }
3547         kfree(sbi->s_mb_avg_fragment_size);
3548         kfree(sbi->s_mb_avg_fragment_size_locks);
3549         kfree(sbi->s_mb_largest_free_orders);
3550         kfree(sbi->s_mb_largest_free_orders_locks);
3551         kfree(sbi->s_mb_offsets);
3552         kfree(sbi->s_mb_maxs);
3553         iput(sbi->s_buddy_cache);
3554         if (sbi->s_mb_stats) {
3555                 ext4_msg(sb, KERN_INFO,
3556                        "mballoc: %u blocks %u reqs (%u success)",
3557                                 atomic_read(&sbi->s_bal_allocated),
3558                                 atomic_read(&sbi->s_bal_reqs),
3559                                 atomic_read(&sbi->s_bal_success));
3560                 ext4_msg(sb, KERN_INFO,
3561                       "mballoc: %u extents scanned, %u groups scanned, %u goal hits, "
3562                                 "%u 2^N hits, %u breaks, %u lost",
3563                                 atomic_read(&sbi->s_bal_ex_scanned),
3564                                 atomic_read(&sbi->s_bal_groups_scanned),
3565                                 atomic_read(&sbi->s_bal_goals),
3566                                 atomic_read(&sbi->s_bal_2orders),
3567                                 atomic_read(&sbi->s_bal_breaks),
3568                                 atomic_read(&sbi->s_mb_lost_chunks));
3569                 ext4_msg(sb, KERN_INFO,
3570                        "mballoc: %u generated and it took %llu",
3571                                 atomic_read(&sbi->s_mb_buddies_generated),
3572                                 atomic64_read(&sbi->s_mb_generation_time));
3573                 ext4_msg(sb, KERN_INFO,
3574                        "mballoc: %u preallocated, %u discarded",
3575                                 atomic_read(&sbi->s_mb_preallocated),
3576                                 atomic_read(&sbi->s_mb_discarded));
3577         }
3578
3579         free_percpu(sbi->s_locality_groups);
3580
3581         return 0;
3582 }
3583
3584 static inline int ext4_issue_discard(struct super_block *sb,
3585                 ext4_group_t block_group, ext4_grpblk_t cluster, int count,
3586                 struct bio **biop)
3587 {
3588         ext4_fsblk_t discard_block;
3589
3590         discard_block = (EXT4_C2B(EXT4_SB(sb), cluster) +
3591                          ext4_group_first_block_no(sb, block_group));
3592         count = EXT4_C2B(EXT4_SB(sb), count);
3593         trace_ext4_discard_blocks(sb,
3594                         (unsigned long long) discard_block, count);
3595         if (biop) {
3596                 return __blkdev_issue_discard(sb->s_bdev,
3597                         (sector_t)discard_block << (sb->s_blocksize_bits - 9),
3598                         (sector_t)count << (sb->s_blocksize_bits - 9),
3599                         GFP_NOFS, biop);
3600         } else
3601                 return sb_issue_discard(sb, discard_block, count, GFP_NOFS, 0);
3602 }
3603
3604 static void ext4_free_data_in_buddy(struct super_block *sb,
3605                                     struct ext4_free_data *entry)
3606 {
3607         struct ext4_buddy e4b;
3608         struct ext4_group_info *db;
3609         int err, count = 0, count2 = 0;
3610
3611         mb_debug(sb, "gonna free %u blocks in group %u (0x%p):",
3612                  entry->efd_count, entry->efd_group, entry);
3613
3614         err = ext4_mb_load_buddy(sb, entry->efd_group, &e4b);
3615         /* we expect to find existing buddy because it's pinned */
3616         BUG_ON(err != 0);
3617
3618         spin_lock(&EXT4_SB(sb)->s_md_lock);
3619         EXT4_SB(sb)->s_mb_free_pending -= entry->efd_count;
3620         spin_unlock(&EXT4_SB(sb)->s_md_lock);
3621
3622         db = e4b.bd_info;
3623         /* there are blocks to put in buddy to make them really free */
3624         count += entry->efd_count;
3625         count2++;
3626         ext4_lock_group(sb, entry->efd_group);
3627         /* Take it out of per group rb tree */
3628         rb_erase(&entry->efd_node, &(db->bb_free_root));
3629         mb_free_blocks(NULL, &e4b, entry->efd_start_cluster, entry->efd_count);
3630
3631         /*
3632          * Clear the trimmed flag for the group so that the next
3633          * ext4_trim_fs can trim it.
3634          * If the volume is mounted with -o discard, online discard
3635          * is supported and the free blocks will be trimmed online.
3636          */
3637         if (!test_opt(sb, DISCARD))
3638                 EXT4_MB_GRP_CLEAR_TRIMMED(db);
3639
3640         if (!db->bb_free_root.rb_node) {
3641                 /* No more items in the per group rb tree
3642                  * balance refcounts from ext4_mb_free_metadata()
3643                  */
3644                 put_page(e4b.bd_buddy_page);
3645                 put_page(e4b.bd_bitmap_page);
3646         }
3647         ext4_unlock_group(sb, entry->efd_group);
3648         ext4_mb_unload_buddy(&e4b);
3649
3650         mb_debug(sb, "freed %d blocks in %d structures\n", count,
3651                  count2);
3652 }
3653
3654 /*
3655  * This function is called by the jbd2 layer once the commit has finished,
3656  * so we know we can free the blocks that were released with that commit.
3657  */
3658 void ext4_process_freed_data(struct super_block *sb, tid_t commit_tid)
3659 {
3660         struct ext4_sb_info *sbi = EXT4_SB(sb);
3661         struct ext4_free_data *entry, *tmp;
3662         struct list_head freed_data_list;
3663         struct list_head *cut_pos = NULL;
3664         bool wake;
3665
3666         INIT_LIST_HEAD(&freed_data_list);
3667
3668         spin_lock(&sbi->s_md_lock);
3669         list_for_each_entry(entry, &sbi->s_freed_data_list, efd_list) {
3670                 if (entry->efd_tid != commit_tid)
3671                         break;
3672                 cut_pos = &entry->efd_list;
3673         }
3674         if (cut_pos)
3675                 list_cut_position(&freed_data_list, &sbi->s_freed_data_list,
3676                                   cut_pos);
3677         spin_unlock(&sbi->s_md_lock);
3678
3679         list_for_each_entry(entry, &freed_data_list, efd_list)
3680                 ext4_free_data_in_buddy(sb, entry);
3681
3682         if (test_opt(sb, DISCARD)) {
3683                 spin_lock(&sbi->s_md_lock);
3684                 wake = list_empty(&sbi->s_discard_list);
3685                 list_splice_tail(&freed_data_list, &sbi->s_discard_list);
3686                 spin_unlock(&sbi->s_md_lock);
3687                 if (wake)
3688                         queue_work(system_unbound_wq, &sbi->s_discard_work);
3689         } else {
3690                 list_for_each_entry_safe(entry, tmp, &freed_data_list, efd_list)
3691                         kmem_cache_free(ext4_free_data_cachep, entry);
3692         }
3693 }
3694
3695 int __init ext4_init_mballoc(void)
3696 {
3697         ext4_pspace_cachep = KMEM_CACHE(ext4_prealloc_space,
3698                                         SLAB_RECLAIM_ACCOUNT);
3699         if (ext4_pspace_cachep == NULL)
3700                 goto out;
3701
3702         ext4_ac_cachep = KMEM_CACHE(ext4_allocation_context,
3703                                     SLAB_RECLAIM_ACCOUNT);
3704         if (ext4_ac_cachep == NULL)
3705                 goto out_pa_free;
3706
3707         ext4_free_data_cachep = KMEM_CACHE(ext4_free_data,
3708                                            SLAB_RECLAIM_ACCOUNT);
3709         if (ext4_free_data_cachep == NULL)
3710                 goto out_ac_free;
3711
3712         return 0;
3713
3714 out_ac_free:
3715         kmem_cache_destroy(ext4_ac_cachep);
3716 out_pa_free:
3717         kmem_cache_destroy(ext4_pspace_cachep);
3718 out:
3719         return -ENOMEM;
3720 }
3721
3722 void ext4_exit_mballoc(void)
3723 {
3724         /*
3725          * Wait for completion of call_rcu()'s on ext4_pspace_cachep
3726          * before destroying the slab cache.
3727          */
3728         rcu_barrier();
3729         kmem_cache_destroy(ext4_pspace_cachep);
3730         kmem_cache_destroy(ext4_ac_cachep);
3731         kmem_cache_destroy(ext4_free_data_cachep);
3732         ext4_groupinfo_destroy_slabs();
3733 }
3734
3735
3736 /*
3737  * Check quota and mark chosen space (ac->ac_b_ex) non-free in bitmaps
3738  * Returns 0 if success or error code
3739  */
3740 static noinline_for_stack int
3741 ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
3742                                 handle_t *handle, unsigned int reserv_clstrs)
3743 {
3744         struct buffer_head *bitmap_bh = NULL;
3745         struct ext4_group_desc *gdp;
3746         struct buffer_head *gdp_bh;
3747         struct ext4_sb_info *sbi;
3748         struct super_block *sb;
3749         ext4_fsblk_t block;
3750         int err, len;
3751
3752         BUG_ON(ac->ac_status != AC_STATUS_FOUND);
3753         BUG_ON(ac->ac_b_ex.fe_len <= 0);
3754
3755         sb = ac->ac_sb;
3756         sbi = EXT4_SB(sb);
3757
3758         bitmap_bh = ext4_read_block_bitmap(sb, ac->ac_b_ex.fe_group);
3759         if (IS_ERR(bitmap_bh)) {
3760                 err = PTR_ERR(bitmap_bh);
3761                 bitmap_bh = NULL;
3762                 goto out_err;
3763         }
3764
3765         BUFFER_TRACE(bitmap_bh, "getting write access");
3766         err = ext4_journal_get_write_access(handle, sb, bitmap_bh,
3767                                             EXT4_JTR_NONE);
3768         if (err)
3769                 goto out_err;
3770
3771         err = -EIO;
3772         gdp = ext4_get_group_desc(sb, ac->ac_b_ex.fe_group, &gdp_bh);
3773         if (!gdp)
3774                 goto out_err;
3775
3776         ext4_debug("using block group %u(%d)\n", ac->ac_b_ex.fe_group,
3777                         ext4_free_group_clusters(sb, gdp));
3778
3779         BUFFER_TRACE(gdp_bh, "get_write_access");
3780         err = ext4_journal_get_write_access(handle, sb, gdp_bh, EXT4_JTR_NONE);
3781         if (err)
3782                 goto out_err;
3783
3784         block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
3785
3786         len = EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
3787         if (!ext4_inode_block_valid(ac->ac_inode, block, len)) {
3788                 ext4_error(sb, "Allocating blocks %llu-%llu which overlap "
3789                            "fs metadata", block, block+len);
3790                 /* File system mounted not to panic on error
3791                  * Fix the bitmap and return EFSCORRUPTED
3792                  * We leak some of the blocks here.
3793                  */
3794                 ext4_lock_group(sb, ac->ac_b_ex.fe_group);
3795                 mb_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start,
3796                               ac->ac_b_ex.fe_len);
3797                 ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
3798                 err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
3799                 if (!err)
3800                         err = -EFSCORRUPTED;
3801                 goto out_err;
3802         }
3803
3804         ext4_lock_group(sb, ac->ac_b_ex.fe_group);
3805 #ifdef AGGRESSIVE_CHECK
3806         {
3807                 int i;
3808                 for (i = 0; i < ac->ac_b_ex.fe_len; i++) {
3809                         BUG_ON(mb_test_bit(ac->ac_b_ex.fe_start + i,
3810                                                 bitmap_bh->b_data));
3811                 }
3812         }
3813 #endif
3814         mb_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start,
3815                       ac->ac_b_ex.fe_len);
3816         if (ext4_has_group_desc_csum(sb) &&
3817             (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) {
3818                 gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
3819                 ext4_free_group_clusters_set(sb, gdp,
3820                                              ext4_free_clusters_after_init(sb,
3821                                                 ac->ac_b_ex.fe_group, gdp));
3822         }
3823         len = ext4_free_group_clusters(sb, gdp) - ac->ac_b_ex.fe_len;
3824         ext4_free_group_clusters_set(sb, gdp, len);
3825         ext4_block_bitmap_csum_set(sb, gdp, bitmap_bh);
3826         ext4_group_desc_csum_set(sb, ac->ac_b_ex.fe_group, gdp);
3827
3828         ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
3829         percpu_counter_sub(&sbi->s_freeclusters_counter, ac->ac_b_ex.fe_len);
3830         /*
3831          * Now reduce the dirty block count also. Should not go negative
3832          */
3833         if (!(ac->ac_flags & EXT4_MB_DELALLOC_RESERVED))
3834                 /* release all the reserved blocks if non delalloc */
3835                 percpu_counter_sub(&sbi->s_dirtyclusters_counter,
3836                                    reserv_clstrs);
3837
3838         if (sbi->s_log_groups_per_flex) {
3839                 ext4_group_t flex_group = ext4_flex_group(sbi,
3840                                                           ac->ac_b_ex.fe_group);
3841                 atomic64_sub(ac->ac_b_ex.fe_len,
3842                              &sbi_array_rcu_deref(sbi, s_flex_groups,
3843                                                   flex_group)->free_clusters);
3844         }
3845
3846         err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
3847         if (err)
3848                 goto out_err;
3849         err = ext4_handle_dirty_metadata(handle, NULL, gdp_bh);
3850
3851 out_err:
3852         brelse(bitmap_bh);
3853         return err;
3854 }
3855
3856 /*
3857  * Idempotent helper for Ext4 fast commit replay path to set the state of
3858  * blocks in bitmaps and update counters.
3859  */
3860 void ext4_mb_mark_bb(struct super_block *sb, ext4_fsblk_t block,
3861                         int len, int state)
3862 {
3863         struct buffer_head *bitmap_bh = NULL;
3864         struct ext4_group_desc *gdp;
3865         struct buffer_head *gdp_bh;
3866         struct ext4_sb_info *sbi = EXT4_SB(sb);
3867         ext4_group_t group;
3868         ext4_grpblk_t blkoff;
3869         int i, err;
3870         int already;
3871         unsigned int clen, clen_changed, thisgrp_len;
3872
3873         while (len > 0) {
3874                 ext4_get_group_no_and_offset(sb, block, &group, &blkoff);
3875
3876                 /*
3877                  * Check to see if we are freeing blocks across a group
3878                  * boundary.
3879                  * In case of flex_bg, this can happen that (block, len) may
3880                  * span across more than one group. In that case we need to
3881                  * get the corresponding group metadata to work with.
3882                  * For this we have goto again loop.
3883                  */
3884                 thisgrp_len = min_t(unsigned int, (unsigned int)len,
3885                         EXT4_BLOCKS_PER_GROUP(sb) - EXT4_C2B(sbi, blkoff));
3886                 clen = EXT4_NUM_B2C(sbi, thisgrp_len);
3887
3888                 if (!ext4_sb_block_valid(sb, NULL, block, thisgrp_len)) {
3889                         ext4_error(sb, "Marking blocks in system zone - "
3890                                    "Block = %llu, len = %u",
3891                                    block, thisgrp_len);
3892                         bitmap_bh = NULL;
3893                         break;
3894                 }
3895
3896                 bitmap_bh = ext4_read_block_bitmap(sb, group);
3897                 if (IS_ERR(bitmap_bh)) {
3898                         err = PTR_ERR(bitmap_bh);
3899                         bitmap_bh = NULL;
3900                         break;
3901                 }
3902
3903                 err = -EIO;
3904                 gdp = ext4_get_group_desc(sb, group, &gdp_bh);
3905                 if (!gdp)
3906                         break;
3907
3908                 ext4_lock_group(sb, group);
3909                 already = 0;
3910                 for (i = 0; i < clen; i++)
3911                         if (!mb_test_bit(blkoff + i, bitmap_bh->b_data) ==
3912                                          !state)
3913                                 already++;
3914
3915                 clen_changed = clen - already;
3916                 if (state)
3917                         mb_set_bits(bitmap_bh->b_data, blkoff, clen);
3918                 else
3919                         mb_clear_bits(bitmap_bh->b_data, blkoff, clen);
3920                 if (ext4_has_group_desc_csum(sb) &&
3921                     (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) {
3922                         gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
3923                         ext4_free_group_clusters_set(sb, gdp,
3924                              ext4_free_clusters_after_init(sb, group, gdp));
3925                 }
3926                 if (state)
3927                         clen = ext4_free_group_clusters(sb, gdp) - clen_changed;
3928                 else
3929                         clen = ext4_free_group_clusters(sb, gdp) + clen_changed;
3930
3931                 ext4_free_group_clusters_set(sb, gdp, clen);
3932                 ext4_block_bitmap_csum_set(sb, gdp, bitmap_bh);
3933                 ext4_group_desc_csum_set(sb, group, gdp);
3934
3935                 ext4_unlock_group(sb, group);
3936
3937                 if (sbi->s_log_groups_per_flex) {
3938                         ext4_group_t flex_group = ext4_flex_group(sbi, group);
3939                         struct flex_groups *fg = sbi_array_rcu_deref(sbi,
3940                                                    s_flex_groups, flex_group);
3941
3942                         if (state)
3943                                 atomic64_sub(clen_changed, &fg->free_clusters);
3944                         else
3945                                 atomic64_add(clen_changed, &fg->free_clusters);
3946
3947                 }
3948
3949                 err = ext4_handle_dirty_metadata(NULL, NULL, bitmap_bh);
3950                 if (err)
3951                         break;
3952                 sync_dirty_buffer(bitmap_bh);
3953                 err = ext4_handle_dirty_metadata(NULL, NULL, gdp_bh);
3954                 sync_dirty_buffer(gdp_bh);
3955                 if (err)
3956                         break;
3957
3958                 block += thisgrp_len;
3959                 len -= thisgrp_len;
3960                 brelse(bitmap_bh);
3961                 BUG_ON(len < 0);
3962         }
3963
3964         if (err)
3965                 brelse(bitmap_bh);
3966 }
3967
3968 /*
3969  * here we normalize request for locality group
3970  * Group request are normalized to s_mb_group_prealloc, which goes to
3971  * s_strip if we set the same via mount option.
3972  * s_mb_group_prealloc can be configured via
3973  * /sys/fs/ext4/<partition>/mb_group_prealloc
3974  *
3975  * XXX: should we try to preallocate more than the group has now?
3976  */
3977 static void ext4_mb_normalize_group_request(struct ext4_allocation_context *ac)
3978 {
3979         struct super_block *sb = ac->ac_sb;
3980         struct ext4_locality_group *lg = ac->ac_lg;
3981
3982         BUG_ON(lg == NULL);
3983         ac->ac_g_ex.fe_len = EXT4_SB(sb)->s_mb_group_prealloc;
3984         mb_debug(sb, "goal %u blocks for locality group\n", ac->ac_g_ex.fe_len);
3985 }
3986
3987 /*
3988  * Normalization means making request better in terms of
3989  * size and alignment
3990  */
3991 static noinline_for_stack void
3992 ext4_mb_normalize_request(struct ext4_allocation_context *ac,
3993                                 struct ext4_allocation_request *ar)
3994 {
3995         struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
3996         struct ext4_super_block *es = sbi->s_es;
3997         int bsbits, max;
3998         ext4_lblk_t end;
3999         loff_t size, start_off;
4000         loff_t orig_size __maybe_unused;
4001         ext4_lblk_t start;
4002         struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
4003         struct ext4_prealloc_space *pa;
4004
4005         /* do normalize only data requests, metadata requests
4006            do not need preallocation */
4007         if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
4008                 return;
4009
4010         /* sometime caller may want exact blocks */
4011         if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
4012                 return;
4013
4014         /* caller may indicate that preallocation isn't
4015          * required (it's a tail, for example) */
4016         if (ac->ac_flags & EXT4_MB_HINT_NOPREALLOC)
4017                 return;
4018
4019         if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) {
4020                 ext4_mb_normalize_group_request(ac);
4021                 return ;
4022         }
4023
4024         bsbits = ac->ac_sb->s_blocksize_bits;
4025
4026         /* first, let's learn actual file size
4027          * given current request is allocated */
4028         size = ac->ac_o_ex.fe_logical + EXT4_C2B(sbi, ac->ac_o_ex.fe_len);
4029         size = size << bsbits;
4030         if (size < i_size_read(ac->ac_inode))
4031                 size = i_size_read(ac->ac_inode);
4032         orig_size = size;
4033
4034         /* max size of free chunks */
4035         max = 2 << bsbits;
4036
4037 #define NRL_CHECK_SIZE(req, size, max, chunk_size)      \
4038                 (req <= (size) || max <= (chunk_size))
4039
4040         /* first, try to predict filesize */
4041         /* XXX: should this table be tunable? */
4042         start_off = 0;
4043         if (size <= 16 * 1024) {
4044                 size = 16 * 1024;
4045         } else if (size <= 32 * 1024) {
4046                 size = 32 * 1024;
4047         } else if (size <= 64 * 1024) {
4048                 size = 64 * 1024;
4049         } else if (size <= 128 * 1024) {
4050                 size = 128 * 1024;
4051         } else if (size <= 256 * 1024) {
4052                 size = 256 * 1024;
4053         } else if (size <= 512 * 1024) {
4054                 size = 512 * 1024;
4055         } else if (size <= 1024 * 1024) {
4056                 size = 1024 * 1024;
4057         } else if (NRL_CHECK_SIZE(size, 4 * 1024 * 1024, max, 2 * 1024)) {
4058                 start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
4059                                                 (21 - bsbits)) << 21;
4060                 size = 2 * 1024 * 1024;
4061         } else if (NRL_CHECK_SIZE(size, 8 * 1024 * 1024, max, 4 * 1024)) {
4062                 start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
4063                                                         (22 - bsbits)) << 22;
4064                 size = 4 * 1024 * 1024;
4065         } else if (NRL_CHECK_SIZE(ac->ac_o_ex.fe_len,
4066                                         (8<<20)>>bsbits, max, 8 * 1024)) {
4067                 start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
4068                                                         (23 - bsbits)) << 23;
4069                 size = 8 * 1024 * 1024;
4070         } else {
4071                 start_off = (loff_t) ac->ac_o_ex.fe_logical << bsbits;
4072                 size      = (loff_t) EXT4_C2B(EXT4_SB(ac->ac_sb),
4073                                               ac->ac_o_ex.fe_len) << bsbits;
4074         }
4075         size = size >> bsbits;
4076         start = start_off >> bsbits;
4077
4078         /*
4079          * For tiny groups (smaller than 8MB) the chosen allocation
4080          * alignment may be larger than group size. Make sure the
4081          * alignment does not move allocation to a different group which
4082          * makes mballoc fail assertions later.
4083          */
4084         start = max(start, rounddown(ac->ac_o_ex.fe_logical,
4085                         (ext4_lblk_t)EXT4_BLOCKS_PER_GROUP(ac->ac_sb)));
4086
4087         /* don't cover already allocated blocks in selected range */
4088         if (ar->pleft && start <= ar->lleft) {
4089                 size -= ar->lleft + 1 - start;
4090                 start = ar->lleft + 1;
4091         }
4092         if (ar->pright && start + size - 1 >= ar->lright)
4093                 size -= start + size - ar->lright;
4094
4095         /*
4096          * Trim allocation request for filesystems with artificially small
4097          * groups.
4098          */
4099         if (size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb))
4100                 size = EXT4_BLOCKS_PER_GROUP(ac->ac_sb);
4101
4102         end = start + size;
4103
4104         /* check we don't cross already preallocated blocks */
4105         rcu_read_lock();
4106         list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
4107                 ext4_lblk_t pa_end;
4108
4109                 if (pa->pa_deleted)
4110                         continue;
4111                 spin_lock(&pa->pa_lock);
4112                 if (pa->pa_deleted) {
4113                         spin_unlock(&pa->pa_lock);
4114                         continue;
4115                 }
4116
4117                 pa_end = pa->pa_lstart + EXT4_C2B(EXT4_SB(ac->ac_sb),
4118                                                   pa->pa_len);
4119
4120                 /* PA must not overlap original request */
4121                 BUG_ON(!(ac->ac_o_ex.fe_logical >= pa_end ||
4122                         ac->ac_o_ex.fe_logical < pa->pa_lstart));
4123
4124                 /* skip PAs this normalized request doesn't overlap with */
4125                 if (pa->pa_lstart >= end || pa_end <= start) {
4126                         spin_unlock(&pa->pa_lock);
4127                         continue;
4128                 }
4129                 BUG_ON(pa->pa_lstart <= start && pa_end >= end);
4130
4131                 /* adjust start or end to be adjacent to this pa */
4132                 if (pa_end <= ac->ac_o_ex.fe_logical) {
4133                         BUG_ON(pa_end < start);
4134                         start = pa_end;
4135                 } else if (pa->pa_lstart > ac->ac_o_ex.fe_logical) {
4136                         BUG_ON(pa->pa_lstart > end);
4137                         end = pa->pa_lstart;
4138                 }
4139                 spin_unlock(&pa->pa_lock);
4140         }
4141         rcu_read_unlock();
4142         size = end - start;
4143
4144         /* XXX: extra loop to check we really don't overlap preallocations */
4145         rcu_read_lock();
4146         list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
4147                 ext4_lblk_t pa_end;
4148
4149                 spin_lock(&pa->pa_lock);
4150                 if (pa->pa_deleted == 0) {
4151                         pa_end = pa->pa_lstart + EXT4_C2B(EXT4_SB(ac->ac_sb),
4152                                                           pa->pa_len);
4153                         BUG_ON(!(start >= pa_end || end <= pa->pa_lstart));
4154                 }
4155                 spin_unlock(&pa->pa_lock);
4156         }
4157         rcu_read_unlock();
4158
4159         /*
4160          * In this function "start" and "size" are normalized for better
4161          * alignment and length such that we could preallocate more blocks.
4162          * This normalization is done such that original request of
4163          * ac->ac_o_ex.fe_logical & fe_len should always lie within "start" and
4164          * "size" boundaries.
4165          * (Note fe_len can be relaxed since FS block allocation API does not
4166          * provide gurantee on number of contiguous blocks allocation since that
4167          * depends upon free space left, etc).
4168          * In case of inode pa, later we use the allocated blocks
4169          * [pa_start + fe_logical - pa_lstart, fe_len/size] from the preallocated
4170          * range of goal/best blocks [start, size] to put it at the
4171          * ac_o_ex.fe_logical extent of this inode.
4172          * (See ext4_mb_use_inode_pa() for more details)
4173          */
4174         if (start + size <= ac->ac_o_ex.fe_logical ||
4175                         start > ac->ac_o_ex.fe_logical) {
4176                 ext4_msg(ac->ac_sb, KERN_ERR,
4177                          "start %lu, size %lu, fe_logical %lu",
4178                          (unsigned long) start, (unsigned long) size,
4179                          (unsigned long) ac->ac_o_ex.fe_logical);
4180                 BUG();
4181         }
4182         BUG_ON(size <= 0 || size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb));
4183
4184         /* now prepare goal request */
4185
4186         /* XXX: is it better to align blocks WRT to logical
4187          * placement or satisfy big request as is */
4188         ac->ac_g_ex.fe_logical = start;
4189         ac->ac_g_ex.fe_len = EXT4_NUM_B2C(sbi, size);
4190
4191         /* define goal start in order to merge */
4192         if (ar->pright && (ar->lright == (start + size)) &&
4193             ar->pright >= size &&
4194             ar->pright - size >= le32_to_cpu(es->s_first_data_block)) {
4195                 /* merge to the right */
4196                 ext4_get_group_no_and_offset(ac->ac_sb, ar->pright - size,
4197                                                 &ac->ac_g_ex.fe_group,
4198                                                 &ac->ac_g_ex.fe_start);
4199                 ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL;
4200         }
4201         if (ar->pleft && (ar->lleft + 1 == start) &&
4202             ar->pleft + 1 < ext4_blocks_count(es)) {
4203                 /* merge to the left */
4204                 ext4_get_group_no_and_offset(ac->ac_sb, ar->pleft + 1,
4205                                                 &ac->ac_g_ex.fe_group,
4206                                                 &ac->ac_g_ex.fe_start);
4207                 ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL;
4208         }
4209
4210         mb_debug(ac->ac_sb, "goal: %lld(was %lld) blocks at %u\n", size,
4211                  orig_size, start);
4212 }
4213
4214 static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
4215 {
4216         struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
4217
4218         if (sbi->s_mb_stats && ac->ac_g_ex.fe_len >= 1) {
4219                 atomic_inc(&sbi->s_bal_reqs);
4220                 atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
4221                 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
4222                         atomic_inc(&sbi->s_bal_success);
4223                 atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
4224                 atomic_add(ac->ac_groups_scanned, &sbi->s_bal_groups_scanned);
4225                 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
4226                                 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
4227                         atomic_inc(&sbi->s_bal_goals);
4228                 if (ac->ac_found > sbi->s_mb_max_to_scan)
4229                         atomic_inc(&sbi->s_bal_breaks);
4230         }
4231
4232         if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
4233                 trace_ext4_mballoc_alloc(ac);
4234         else
4235                 trace_ext4_mballoc_prealloc(ac);
4236 }
4237
4238 /*
4239  * Called on failure; free up any blocks from the inode PA for this
4240  * context.  We don't need this for MB_GROUP_PA because we only change
4241  * pa_free in ext4_mb_release_context(), but on failure, we've already
4242  * zeroed out ac->ac_b_ex.fe_len, so group_pa->pa_free is not changed.
4243  */
4244 static void ext4_discard_allocated_blocks(struct ext4_allocation_context *ac)
4245 {
4246         struct ext4_prealloc_space *pa = ac->ac_pa;
4247         struct ext4_buddy e4b;
4248         int err;
4249
4250         if (pa == NULL) {
4251                 if (ac->ac_f_ex.fe_len == 0)
4252                         return;
4253                 err = ext4_mb_load_buddy(ac->ac_sb, ac->ac_f_ex.fe_group, &e4b);
4254                 if (err) {
4255                         /*
4256                          * This should never happen since we pin the
4257                          * pages in the ext4_allocation_context so
4258                          * ext4_mb_load_buddy() should never fail.
4259                          */
4260                         WARN(1, "mb_load_buddy failed (%d)", err);
4261                         return;
4262                 }
4263                 ext4_lock_group(ac->ac_sb, ac->ac_f_ex.fe_group);
4264                 mb_free_blocks(ac->ac_inode, &e4b, ac->ac_f_ex.fe_start,
4265                                ac->ac_f_ex.fe_len);
4266                 ext4_unlock_group(ac->ac_sb, ac->ac_f_ex.fe_group);
4267                 ext4_mb_unload_buddy(&e4b);
4268                 return;
4269         }
4270         if (pa->pa_type == MB_INODE_PA)
4271                 pa->pa_free += ac->ac_b_ex.fe_len;
4272 }
4273
4274 /*
4275  * use blocks preallocated to inode
4276  */
4277 static void ext4_mb_use_inode_pa(struct ext4_allocation_context *ac,
4278                                 struct ext4_prealloc_space *pa)
4279 {
4280         struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
4281         ext4_fsblk_t start;
4282         ext4_fsblk_t end;
4283         int len;
4284
4285         /* found preallocated blocks, use them */
4286         start = pa->pa_pstart + (ac->ac_o_ex.fe_logical - pa->pa_lstart);
4287         end = min(pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len),
4288                   start + EXT4_C2B(sbi, ac->ac_o_ex.fe_len));
4289         len = EXT4_NUM_B2C(sbi, end - start);
4290         ext4_get_group_no_and_offset(ac->ac_sb, start, &ac->ac_b_ex.fe_group,
4291                                         &ac->ac_b_ex.fe_start);
4292         ac->ac_b_ex.fe_len = len;
4293         ac->ac_status = AC_STATUS_FOUND;
4294         ac->ac_pa = pa;
4295
4296         BUG_ON(start < pa->pa_pstart);
4297         BUG_ON(end > pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len));
4298         BUG_ON(pa->pa_free < len);
4299         pa->pa_free -= len;
4300
4301         mb_debug(ac->ac_sb, "use %llu/%d from inode pa %p\n", start, len, pa);
4302 }
4303
4304 /*
4305  * use blocks preallocated to locality group
4306  */
4307 static void ext4_mb_use_group_pa(struct ext4_allocation_context *ac,
4308                                 struct ext4_prealloc_space *pa)
4309 {
4310         unsigned int len = ac->ac_o_ex.fe_len;
4311
4312         ext4_get_group_no_and_offset(ac->ac_sb, pa->pa_pstart,
4313                                         &ac->ac_b_ex.fe_group,
4314                                         &ac->ac_b_ex.fe_start);
4315         ac->ac_b_ex.fe_len = len;
4316         ac->ac_status = AC_STATUS_FOUND;
4317         ac->ac_pa = pa;
4318
4319         /* we don't correct pa_pstart or pa_plen here to avoid
4320          * possible race when the group is being loaded concurrently
4321          * instead we correct pa later, after blocks are marked
4322          * in on-disk bitmap -- see ext4_mb_release_context()
4323          * Other CPUs are prevented from allocating from this pa by lg_mutex
4324          */
4325         mb_debug(ac->ac_sb, "use %u/%u from group pa %p\n",
4326                  pa->pa_lstart-len, len, pa);
4327 }
4328
4329 /*
4330  * Return the prealloc space that have minimal distance
4331  * from the goal block. @cpa is the prealloc
4332  * space that is having currently known minimal distance
4333  * from the goal block.
4334  */
4335 static struct ext4_prealloc_space *
4336 ext4_mb_check_group_pa(ext4_fsblk_t goal_block,
4337                         struct ext4_prealloc_space *pa,
4338                         struct ext4_prealloc_space *cpa)
4339 {
4340         ext4_fsblk_t cur_distance, new_distance;
4341
4342         if (cpa == NULL) {
4343                 atomic_inc(&pa->pa_count);
4344                 return pa;
4345         }
4346         cur_distance = abs(goal_block - cpa->pa_pstart);
4347         new_distance = abs(goal_block - pa->pa_pstart);
4348
4349         if (cur_distance <= new_distance)
4350                 return cpa;
4351
4352         /* drop the previous reference */
4353         atomic_dec(&cpa->pa_count);
4354         atomic_inc(&pa->pa_count);
4355         return pa;
4356 }
4357
4358 /*
4359  * search goal blocks in preallocated space
4360  */
4361 static noinline_for_stack bool
4362 ext4_mb_use_preallocated(struct ext4_allocation_context *ac)
4363 {
4364         struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
4365         int order, i;
4366         struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
4367         struct ext4_locality_group *lg;
4368         struct ext4_prealloc_space *pa, *cpa = NULL;
4369         ext4_fsblk_t goal_block;
4370
4371         /* only data can be preallocated */
4372         if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
4373                 return false;
4374
4375         /* first, try per-file preallocation */
4376         rcu_read_lock();
4377         list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
4378
4379                 /* all fields in this condition don't change,
4380                  * so we can skip locking for them */
4381                 if (ac->ac_o_ex.fe_logical < pa->pa_lstart ||
4382                     ac->ac_o_ex.fe_logical >= (pa->pa_lstart +
4383                                                EXT4_C2B(sbi, pa->pa_len)))
4384                         continue;
4385
4386                 /* non-extent files can't have physical blocks past 2^32 */
4387                 if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)) &&
4388                     (pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len) >
4389                      EXT4_MAX_BLOCK_FILE_PHYS))
4390                         continue;
4391
4392                 /* found preallocated blocks, use them */
4393                 spin_lock(&pa->pa_lock);
4394                 if (pa->pa_deleted == 0 && pa->pa_free) {
4395                         atomic_inc(&pa->pa_count);
4396                         ext4_mb_use_inode_pa(ac, pa);
4397                         spin_unlock(&pa->pa_lock);
4398                         ac->ac_criteria = 10;
4399                         rcu_read_unlock();
4400                         return true;
4401                 }
4402                 spin_unlock(&pa->pa_lock);
4403         }
4404         rcu_read_unlock();
4405
4406         /* can we use group allocation? */
4407         if (!(ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC))
4408                 return false;
4409
4410         /* inode may have no locality group for some reason */
4411         lg = ac->ac_lg;
4412         if (lg == NULL)
4413                 return false;
4414         order  = fls(ac->ac_o_ex.fe_len) - 1;
4415         if (order > PREALLOC_TB_SIZE - 1)
4416                 /* The max size of hash table is PREALLOC_TB_SIZE */
4417                 order = PREALLOC_TB_SIZE - 1;
4418
4419         goal_block = ext4_grp_offs_to_block(ac->ac_sb, &ac->ac_g_ex);
4420         /*
4421          * search for the prealloc space that is having
4422          * minimal distance from the goal block.
4423          */
4424         for (i = order; i < PREALLOC_TB_SIZE; i++) {
4425                 rcu_read_lock();
4426                 list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[i],
4427                                         pa_inode_list) {
4428                         spin_lock(&pa->pa_lock);
4429                         if (pa->pa_deleted == 0 &&
4430                                         pa->pa_free >= ac->ac_o_ex.fe_len) {
4431
4432                                 cpa = ext4_mb_check_group_pa(goal_block,
4433                                                                 pa, cpa);
4434                         }
4435                         spin_unlock(&pa->pa_lock);
4436                 }
4437                 rcu_read_unlock();
4438         }
4439         if (cpa) {
4440                 ext4_mb_use_group_pa(ac, cpa);
4441                 ac->ac_criteria = 20;
4442                 return true;
4443         }
4444         return false;
4445 }
4446
4447 /*
4448  * the function goes through all block freed in the group
4449  * but not yet committed and marks them used in in-core bitmap.
4450  * buddy must be generated from this bitmap
4451  * Need to be called with the ext4 group lock held
4452  */
4453 static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap,
4454                                                 ext4_group_t group)
4455 {
4456         struct rb_node *n;
4457         struct ext4_group_info *grp;
4458         struct ext4_free_data *entry;
4459
4460         grp = ext4_get_group_info(sb, group);
4461         n = rb_first(&(grp->bb_free_root));
4462
4463         while (n) {
4464                 entry = rb_entry(n, struct ext4_free_data, efd_node);
4465                 mb_set_bits(bitmap, entry->efd_start_cluster, entry->efd_count);
4466                 n = rb_next(n);
4467         }
4468         return;
4469 }
4470
4471 /*
4472  * the function goes through all preallocation in this group and marks them
4473  * used in in-core bitmap. buddy must be generated from this bitmap
4474  * Need to be called with ext4 group lock held
4475  */
4476 static noinline_for_stack
4477 void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
4478                                         ext4_group_t group)
4479 {
4480         struct ext4_group_info *grp = ext4_get_group_info(sb, group);
4481         struct ext4_prealloc_space *pa;
4482         struct list_head *cur;
4483         ext4_group_t groupnr;
4484         ext4_grpblk_t start;
4485         int preallocated = 0;
4486         int len;
4487
4488         /* all form of preallocation discards first load group,
4489          * so the only competing code is preallocation use.
4490          * we don't need any locking here
4491          * notice we do NOT ignore preallocations with pa_deleted
4492          * otherwise we could leave used blocks available for
4493          * allocation in buddy when concurrent ext4_mb_put_pa()
4494          * is dropping preallocation
4495          */
4496         list_for_each(cur, &grp->bb_prealloc_list) {
4497                 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
4498                 spin_lock(&pa->pa_lock);
4499                 ext4_get_group_no_and_offset(sb, pa->pa_pstart,
4500                                              &groupnr, &start);
4501                 len = pa->pa_len;
4502                 spin_unlock(&pa->pa_lock);
4503                 if (unlikely(len == 0))
4504                         continue;
4505                 BUG_ON(groupnr != group);
4506                 mb_set_bits(bitmap, start, len);
4507                 preallocated += len;
4508         }
4509         mb_debug(sb, "preallocated %d for group %u\n", preallocated, group);
4510 }
4511
4512 static void ext4_mb_mark_pa_deleted(struct super_block *sb,
4513                                     struct ext4_prealloc_space *pa)
4514 {
4515         struct ext4_inode_info *ei;
4516
4517         if (pa->pa_deleted) {
4518                 ext4_warning(sb, "deleted pa, type:%d, pblk:%llu, lblk:%u, len:%d\n",
4519                              pa->pa_type, pa->pa_pstart, pa->pa_lstart,
4520                              pa->pa_len);
4521                 return;
4522         }
4523
4524         pa->pa_deleted = 1;
4525
4526         if (pa->pa_type == MB_INODE_PA) {
4527                 ei = EXT4_I(pa->pa_inode);
4528                 atomic_dec(&ei->i_prealloc_active);
4529         }
4530 }
4531
4532 static void ext4_mb_pa_callback(struct rcu_head *head)
4533 {
4534         struct ext4_prealloc_space *pa;
4535         pa = container_of(head, struct ext4_prealloc_space, u.pa_rcu);
4536
4537         BUG_ON(atomic_read(&pa->pa_count));
4538         BUG_ON(pa->pa_deleted == 0);
4539         kmem_cache_free(ext4_pspace_cachep, pa);
4540 }
4541
4542 /*
4543  * drops a reference to preallocated space descriptor
4544  * if this was the last reference and the space is consumed
4545  */
4546 static void ext4_mb_put_pa(struct ext4_allocation_context *ac,
4547                         struct super_block *sb, struct ext4_prealloc_space *pa)
4548 {
4549         ext4_group_t grp;
4550         ext4_fsblk_t grp_blk;
4551
4552         /* in this short window concurrent discard can set pa_deleted */
4553         spin_lock(&pa->pa_lock);
4554         if (!atomic_dec_and_test(&pa->pa_count) || pa->pa_free != 0) {
4555                 spin_unlock(&pa->pa_lock);
4556                 return;
4557         }
4558
4559         if (pa->pa_deleted == 1) {
4560                 spin_unlock(&pa->pa_lock);
4561                 return;
4562         }
4563
4564         ext4_mb_mark_pa_deleted(sb, pa);
4565         spin_unlock(&pa->pa_lock);
4566
4567         grp_blk = pa->pa_pstart;
4568         /*
4569          * If doing group-based preallocation, pa_pstart may be in the
4570          * next group when pa is used up
4571          */
4572         if (pa->pa_type == MB_GROUP_PA)
4573                 grp_blk--;
4574
4575         grp = ext4_get_group_number(sb, grp_blk);
4576
4577         /*
4578          * possible race:
4579          *
4580          *  P1 (buddy init)                     P2 (regular allocation)
4581          *                                      find block B in PA
4582          *  copy on-disk bitmap to buddy
4583          *                                      mark B in on-disk bitmap
4584          *                                      drop PA from group
4585          *  mark all PAs in buddy
4586          *
4587          * thus, P1 initializes buddy with B available. to prevent this
4588          * we make "copy" and "mark all PAs" atomic and serialize "drop PA"
4589          * against that pair
4590          */
4591         ext4_lock_group(sb, grp);
4592         list_del(&pa->pa_group_list);
4593         ext4_unlock_group(sb, grp);
4594
4595         spin_lock(pa->pa_obj_lock);
4596         list_del_rcu(&pa->pa_inode_list);
4597         spin_unlock(pa->pa_obj_lock);
4598
4599         call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
4600 }
4601
4602 /*
4603  * creates new preallocated space for given inode
4604  */
4605 static noinline_for_stack void
4606 ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
4607 {
4608         struct super_block *sb = ac->ac_sb;
4609         struct ext4_sb_info *sbi = EXT4_SB(sb);
4610         struct ext4_prealloc_space *pa;
4611         struct ext4_group_info *grp;
4612         struct ext4_inode_info *ei;
4613
4614         /* preallocate only when found space is larger then requested */
4615         BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len);
4616         BUG_ON(ac->ac_status != AC_STATUS_FOUND);
4617         BUG_ON(!S_ISREG(ac->ac_inode->i_mode));
4618         BUG_ON(ac->ac_pa == NULL);
4619
4620         pa = ac->ac_pa;
4621
4622         if (ac->ac_b_ex.fe_len < ac->ac_g_ex.fe_len) {
4623                 int winl;
4624                 int wins;
4625                 int win;
4626                 int offs;
4627
4628                 /* we can't allocate as much as normalizer wants.
4629                  * so, found space must get proper lstart
4630                  * to cover original request */
4631                 BUG_ON(ac->ac_g_ex.fe_logical > ac->ac_o_ex.fe_logical);
4632                 BUG_ON(ac->ac_g_ex.fe_len < ac->ac_o_ex.fe_len);
4633
4634                 /* we're limited by original request in that
4635                  * logical block must be covered any way
4636                  * winl is window we can move our chunk within */
4637                 winl = ac->ac_o_ex.fe_logical - ac->ac_g_ex.fe_logical;
4638
4639                 /* also, we should cover whole original request */
4640                 wins = EXT4_C2B(sbi, ac->ac_b_ex.fe_len - ac->ac_o_ex.fe_len);
4641
4642                 /* the smallest one defines real window */
4643                 win = min(winl, wins);
4644
4645                 offs = ac->ac_o_ex.fe_logical %
4646                         EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
4647                 if (offs && offs < win)
4648                         win = offs;
4649
4650                 ac->ac_b_ex.fe_logical = ac->ac_o_ex.fe_logical -
4651                         EXT4_NUM_B2C(sbi, win);
4652                 BUG_ON(ac->ac_o_ex.fe_logical < ac->ac_b_ex.fe_logical);
4653                 BUG_ON(ac->ac_o_ex.fe_len > ac->ac_b_ex.fe_len);
4654         }
4655
4656         /* preallocation can change ac_b_ex, thus we store actually
4657          * allocated blocks for history */
4658         ac->ac_f_ex = ac->ac_b_ex;
4659
4660         pa->pa_lstart = ac->ac_b_ex.fe_logical;
4661         pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
4662         pa->pa_len = ac->ac_b_ex.fe_len;
4663         pa->pa_free = pa->pa_len;
4664         spin_lock_init(&pa->pa_lock);
4665         INIT_LIST_HEAD(&pa->pa_inode_list);
4666         INIT_LIST_HEAD(&pa->pa_group_list);
4667         pa->pa_deleted = 0;
4668         pa->pa_type = MB_INODE_PA;
4669
4670         mb_debug(sb, "new inode pa %p: %llu/%d for %u\n", pa, pa->pa_pstart,
4671                  pa->pa_len, pa->pa_lstart);
4672         trace_ext4_mb_new_inode_pa(ac, pa);
4673
4674         ext4_mb_use_inode_pa(ac, pa);
4675         atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
4676
4677         ei = EXT4_I(ac->ac_inode);
4678         grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
4679
4680         pa->pa_obj_lock = &ei->i_prealloc_lock;
4681         pa->pa_inode = ac->ac_inode;
4682
4683         list_add(&pa->pa_group_list, &grp->bb_prealloc_list);
4684
4685         spin_lock(pa->pa_obj_lock);
4686         list_add_rcu(&pa->pa_inode_list, &ei->i_prealloc_list);
4687         spin_unlock(pa->pa_obj_lock);
4688         atomic_inc(&ei->i_prealloc_active);
4689 }
4690
4691 /*
4692  * creates new preallocated space for locality group inodes belongs to
4693  */
4694 static noinline_for_stack void
4695 ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
4696 {
4697         struct super_block *sb = ac->ac_sb;
4698         struct ext4_locality_group *lg;
4699         struct ext4_prealloc_space *pa;
4700         struct ext4_group_info *grp;
4701
4702         /* preallocate only when found space is larger then requested */
4703         BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len);
4704         BUG_ON(ac->ac_status != AC_STATUS_FOUND);
4705         BUG_ON(!S_ISREG(ac->ac_inode->i_mode));
4706         BUG_ON(ac->ac_pa == NULL);
4707
4708         pa = ac->ac_pa;
4709
4710         /* preallocation can change ac_b_ex, thus we store actually
4711          * allocated blocks for history */
4712         ac->ac_f_ex = ac->ac_b_ex;
4713
4714         pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
4715         pa->pa_lstart = pa->pa_pstart;
4716         pa->pa_len = ac->ac_b_ex.fe_len;
4717         pa->pa_free = pa->pa_len;
4718         spin_lock_init(&pa->pa_lock);
4719         INIT_LIST_HEAD(&pa->pa_inode_list);
4720         INIT_LIST_HEAD(&pa->pa_group_list);
4721         pa->pa_deleted = 0;
4722         pa->pa_type = MB_GROUP_PA;
4723
4724         mb_debug(sb, "new group pa %p: %llu/%d for %u\n", pa, pa->pa_pstart,
4725                  pa->pa_len, pa->pa_lstart);
4726         trace_ext4_mb_new_group_pa(ac, pa);
4727
4728         ext4_mb_use_group_pa(ac, pa);
4729         atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
4730
4731         grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
4732         lg = ac->ac_lg;
4733         BUG_ON(lg == NULL);
4734
4735         pa->pa_obj_lock = &lg->lg_prealloc_lock;
4736         pa->pa_inode = NULL;
4737
4738         list_add(&pa->pa_group_list, &grp->bb_prealloc_list);
4739
4740         /*
4741          * We will later add the new pa to the right bucket
4742          * after updating the pa_free in ext4_mb_release_context
4743          */
4744 }
4745
4746 static void ext4_mb_new_preallocation(struct ext4_allocation_context *ac)
4747 {
4748         if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)
4749                 ext4_mb_new_group_pa(ac);
4750         else
4751                 ext4_mb_new_inode_pa(ac);
4752 }
4753
4754 /*
4755  * finds all unused blocks in on-disk bitmap, frees them in
4756  * in-core bitmap and buddy.
4757  * @pa must be unlinked from inode and group lists, so that
4758  * nobody else can find/use it.
4759  * the caller MUST hold group/inode locks.
4760  * TODO: optimize the case when there are no in-core structures yet
4761  */
4762 static noinline_for_stack int
4763 ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
4764                         struct ext4_prealloc_space *pa)
4765 {
4766         struct super_block *sb = e4b->bd_sb;
4767         struct ext4_sb_info *sbi = EXT4_SB(sb);
4768         unsigned int end;
4769         unsigned int next;
4770         ext4_group_t group;
4771         ext4_grpblk_t bit;
4772         unsigned long long grp_blk_start;
4773         int free = 0;
4774
4775         BUG_ON(pa->pa_deleted == 0);
4776         ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
4777         grp_blk_start = pa->pa_pstart - EXT4_C2B(sbi, bit);
4778         BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
4779         end = bit + pa->pa_len;
4780
4781         while (bit < end) {
4782                 bit = mb_find_next_zero_bit(bitmap_bh->b_data, end, bit);
4783                 if (bit >= end)
4784                         break;
4785                 next = mb_find_next_bit(bitmap_bh->b_data, end, bit);
4786                 mb_debug(sb, "free preallocated %u/%u in group %u\n",
4787                          (unsigned) ext4_group_first_block_no(sb, group) + bit,
4788                          (unsigned) next - bit, (unsigned) group);
4789                 free += next - bit;
4790
4791                 trace_ext4_mballoc_discard(sb, NULL, group, bit, next - bit);
4792                 trace_ext4_mb_release_inode_pa(pa, (grp_blk_start +
4793                                                     EXT4_C2B(sbi, bit)),
4794                                                next - bit);
4795                 mb_free_blocks(pa->pa_inode, e4b, bit, next - bit);
4796                 bit = next + 1;
4797         }
4798         if (free != pa->pa_free) {
4799                 ext4_msg(e4b->bd_sb, KERN_CRIT,
4800                          "pa %p: logic %lu, phys. %lu, len %d",
4801                          pa, (unsigned long) pa->pa_lstart,
4802                          (unsigned long) pa->pa_pstart,
4803                          pa->pa_len);
4804                 ext4_grp_locked_error(sb, group, 0, 0, "free %u, pa_free %u",
4805                                         free, pa->pa_free);
4806                 /*
4807                  * pa is already deleted so we use the value obtained
4808                  * from the bitmap and continue.
4809                  */
4810         }
4811         atomic_add(free, &sbi->s_mb_discarded);
4812
4813         return 0;
4814 }
4815
4816 static noinline_for_stack int
4817 ext4_mb_release_group_pa(struct ext4_buddy *e4b,
4818                                 struct ext4_prealloc_space *pa)
4819 {
4820         struct super_block *sb = e4b->bd_sb;
4821         ext4_group_t group;
4822         ext4_grpblk_t bit;
4823
4824         trace_ext4_mb_release_group_pa(sb, pa);
4825         BUG_ON(pa->pa_deleted == 0);
4826         ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
4827         BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
4828         mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
4829         atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
4830         trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
4831
4832         return 0;
4833 }
4834
4835 /*
4836  * releases all preallocations in given group
4837  *
4838  * first, we need to decide discard policy:
4839  * - when do we discard
4840  *   1) ENOSPC
4841  * - how many do we discard
4842  *   1) how many requested
4843  */
4844 static noinline_for_stack int
4845 ext4_mb_discard_group_preallocations(struct super_block *sb,
4846                                      ext4_group_t group, int *busy)
4847 {
4848         struct ext4_group_info *grp = ext4_get_group_info(sb, group);
4849         struct buffer_head *bitmap_bh = NULL;
4850         struct ext4_prealloc_space *pa, *tmp;
4851         struct list_head list;
4852         struct ext4_buddy e4b;
4853         int err;
4854         int free = 0;
4855
4856         mb_debug(sb, "discard preallocation for group %u\n", group);
4857         if (list_empty(&grp->bb_prealloc_list))
4858                 goto out_dbg;
4859
4860         bitmap_bh = ext4_read_block_bitmap(sb, group);
4861         if (IS_ERR(bitmap_bh)) {
4862                 err = PTR_ERR(bitmap_bh);
4863                 ext4_error_err(sb, -err,
4864                                "Error %d reading block bitmap for %u",
4865                                err, group);
4866                 goto out_dbg;
4867         }
4868
4869         err = ext4_mb_load_buddy(sb, group, &e4b);
4870         if (err) {
4871                 ext4_warning(sb, "Error %d loading buddy information for %u",
4872                              err, group);
4873                 put_bh(bitmap_bh);
4874                 goto out_dbg;
4875         }
4876
4877         INIT_LIST_HEAD(&list);
4878         ext4_lock_group(sb, group);
4879         list_for_each_entry_safe(pa, tmp,
4880                                 &grp->bb_prealloc_list, pa_group_list) {
4881                 spin_lock(&pa->pa_lock);
4882                 if (atomic_read(&pa->pa_count)) {
4883                         spin_unlock(&pa->pa_lock);
4884                         *busy = 1;
4885                         continue;
4886                 }
4887                 if (pa->pa_deleted) {
4888                         spin_unlock(&pa->pa_lock);
4889                         continue;
4890                 }
4891
4892                 /* seems this one can be freed ... */
4893                 ext4_mb_mark_pa_deleted(sb, pa);
4894
4895                 if (!free)
4896                         this_cpu_inc(discard_pa_seq);
4897
4898                 /* we can trust pa_free ... */
4899                 free += pa->pa_free;
4900
4901                 spin_unlock(&pa->pa_lock);
4902
4903                 list_del(&pa->pa_group_list);
4904                 list_add(&pa->u.pa_tmp_list, &list);
4905         }
4906
4907         /* now free all selected PAs */
4908         list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) {
4909
4910                 /* remove from object (inode or locality group) */
4911                 spin_lock(pa->pa_obj_lock);
4912                 list_del_rcu(&pa->pa_inode_list);
4913                 spin_unlock(pa->pa_obj_lock);
4914
4915                 if (pa->pa_type == MB_GROUP_PA)
4916                         ext4_mb_release_group_pa(&e4b, pa);
4917                 else
4918                         ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa);
4919
4920                 list_del(&pa->u.pa_tmp_list);
4921                 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
4922         }
4923
4924         ext4_unlock_group(sb, group);
4925         ext4_mb_unload_buddy(&e4b);
4926         put_bh(bitmap_bh);
4927 out_dbg:
4928         mb_debug(sb, "discarded (%d) blocks preallocated for group %u bb_free (%d)\n",
4929                  free, group, grp->bb_free);
4930         return free;
4931 }
4932
4933 /*
4934  * releases all non-used preallocated blocks for given inode
4935  *
4936  * It's important to discard preallocations under i_data_sem
4937  * We don't want another block to be served from the prealloc
4938  * space when we are discarding the inode prealloc space.
4939  *
4940  * FIXME!! Make sure it is valid at all the call sites
4941  */
4942 void ext4_discard_preallocations(struct inode *inode, unsigned int needed)
4943 {
4944         struct ext4_inode_info *ei = EXT4_I(inode);
4945         struct super_block *sb = inode->i_sb;
4946         struct buffer_head *bitmap_bh = NULL;
4947         struct ext4_prealloc_space *pa, *tmp;
4948         ext4_group_t group = 0;
4949         struct list_head list;
4950         struct ext4_buddy e4b;
4951         int err;
4952
4953         if (!S_ISREG(inode->i_mode)) {
4954                 /*BUG_ON(!list_empty(&ei->i_prealloc_list));*/
4955                 return;
4956         }
4957
4958         if (EXT4_SB(sb)->s_mount_state & EXT4_FC_REPLAY)
4959                 return;
4960
4961         mb_debug(sb, "discard preallocation for inode %lu\n",
4962                  inode->i_ino);
4963         trace_ext4_discard_preallocations(inode,
4964                         atomic_read(&ei->i_prealloc_active), needed);
4965
4966         INIT_LIST_HEAD(&list);
4967
4968         if (needed == 0)
4969                 needed = UINT_MAX;
4970
4971 repeat:
4972         /* first, collect all pa's in the inode */
4973         spin_lock(&ei->i_prealloc_lock);
4974         while (!list_empty(&ei->i_prealloc_list) && needed) {
4975                 pa = list_entry(ei->i_prealloc_list.prev,
4976                                 struct ext4_prealloc_space, pa_inode_list);
4977                 BUG_ON(pa->pa_obj_lock != &ei->i_prealloc_lock);
4978                 spin_lock(&pa->pa_lock);
4979                 if (atomic_read(&pa->pa_count)) {
4980                         /* this shouldn't happen often - nobody should
4981                          * use preallocation while we're discarding it */
4982                         spin_unlock(&pa->pa_lock);
4983                         spin_unlock(&ei->i_prealloc_lock);
4984                         ext4_msg(sb, KERN_ERR,
4985                                  "uh-oh! used pa while discarding");
4986                         WARN_ON(1);
4987                         schedule_timeout_uninterruptible(HZ);
4988                         goto repeat;
4989
4990                 }
4991                 if (pa->pa_deleted == 0) {
4992                         ext4_mb_mark_pa_deleted(sb, pa);
4993                         spin_unlock(&pa->pa_lock);
4994                         list_del_rcu(&pa->pa_inode_list);
4995                         list_add(&pa->u.pa_tmp_list, &list);
4996                         needed--;
4997                         continue;
4998                 }
4999
5000                 /* someone is deleting pa right now */
5001                 spin_unlock(&pa->pa_lock);
5002                 spin_unlock(&ei->i_prealloc_lock);
5003
5004                 /* we have to wait here because pa_deleted
5005                  * doesn't mean pa is already unlinked from
5006                  * the list. as we might be called from
5007                  * ->clear_inode() the inode will get freed
5008                  * and concurrent thread which is unlinking
5009                  * pa from inode's list may access already
5010                  * freed memory, bad-bad-bad */
5011
5012                 /* XXX: if this happens too often, we can
5013                  * add a flag to force wait only in case
5014                  * of ->clear_inode(), but not in case of
5015                  * regular truncate */
5016                 schedule_timeout_uninterruptible(HZ);
5017                 goto repeat;
5018         }
5019         spin_unlock(&ei->i_prealloc_lock);
5020
5021         list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) {
5022                 BUG_ON(pa->pa_type != MB_INODE_PA);
5023                 group = ext4_get_group_number(sb, pa->pa_pstart);
5024
5025                 err = ext4_mb_load_buddy_gfp(sb, group, &e4b,
5026                                              GFP_NOFS|__GFP_NOFAIL);
5027                 if (err) {
5028                         ext4_error_err(sb, -err, "Error %d loading buddy information for %u",
5029                                        err, group);
5030                         continue;
5031                 }
5032
5033                 bitmap_bh = ext4_read_block_bitmap(sb, group);
5034                 if (IS_ERR(bitmap_bh)) {
5035                         err = PTR_ERR(bitmap_bh);
5036                         ext4_error_err(sb, -err, "Error %d reading block bitmap for %u",
5037                                        err, group);
5038                         ext4_mb_unload_buddy(&e4b);
5039                         continue;
5040                 }
5041
5042                 ext4_lock_group(sb, group);
5043                 list_del(&pa->pa_group_list);
5044                 ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa);
5045                 ext4_unlock_group(sb, group);
5046
5047                 ext4_mb_unload_buddy(&e4b);
5048                 put_bh(bitmap_bh);
5049
5050                 list_del(&pa->u.pa_tmp_list);
5051                 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
5052         }
5053 }
5054
5055 static int ext4_mb_pa_alloc(struct ext4_allocation_context *ac)
5056 {
5057         struct ext4_prealloc_space *pa;
5058
5059         BUG_ON(ext4_pspace_cachep == NULL);
5060         pa = kmem_cache_zalloc(ext4_pspace_cachep, GFP_NOFS);
5061         if (!pa)
5062                 return -ENOMEM;
5063         atomic_set(&pa->pa_count, 1);
5064         ac->ac_pa = pa;
5065         return 0;
5066 }
5067
5068 static void ext4_mb_pa_free(struct ext4_allocation_context *ac)
5069 {
5070         struct ext4_prealloc_space *pa = ac->ac_pa;
5071
5072         BUG_ON(!pa);
5073         ac->ac_pa = NULL;
5074         WARN_ON(!atomic_dec_and_test(&pa->pa_count));
5075         kmem_cache_free(ext4_pspace_cachep, pa);
5076 }
5077
5078 #ifdef CONFIG_EXT4_DEBUG
5079 static inline void ext4_mb_show_pa(struct super_block *sb)
5080 {
5081         ext4_group_t i, ngroups;
5082
5083         if (ext4_test_mount_flag(sb, EXT4_MF_FS_ABORTED))
5084                 return;
5085
5086         ngroups = ext4_get_groups_count(sb);
5087         mb_debug(sb, "groups: ");
5088         for (i = 0; i < ngroups; i++) {
5089                 struct ext4_group_info *grp = ext4_get_group_info(sb, i);
5090                 struct ext4_prealloc_space *pa;
5091                 ext4_grpblk_t start;
5092                 struct list_head *cur;
5093                 ext4_lock_group(sb, i);
5094                 list_for_each(cur, &grp->bb_prealloc_list) {
5095                         pa = list_entry(cur, struct ext4_prealloc_space,
5096                                         pa_group_list);
5097                         spin_lock(&pa->pa_lock);
5098                         ext4_get_group_no_and_offset(sb, pa->pa_pstart,
5099                                                      NULL, &start);
5100                         spin_unlock(&pa->pa_lock);
5101                         mb_debug(sb, "PA:%u:%d:%d\n", i, start,
5102                                  pa->pa_len);
5103                 }
5104                 ext4_unlock_group(sb, i);
5105                 mb_debug(sb, "%u: %d/%d\n", i, grp->bb_free,
5106                          grp->bb_fragments);
5107         }
5108 }
5109
5110 static void ext4_mb_show_ac(struct ext4_allocation_context *ac)
5111 {
5112         struct super_block *sb = ac->ac_sb;
5113
5114         if (ext4_test_mount_flag(sb, EXT4_MF_FS_ABORTED))
5115                 return;
5116
5117         mb_debug(sb, "Can't allocate:"
5118                         " Allocation context details:");
5119         mb_debug(sb, "status %u flags 0x%x",
5120                         ac->ac_status, ac->ac_flags);
5121         mb_debug(sb, "orig %lu/%lu/%lu@%lu, "
5122                         "goal %lu/%lu/%lu@%lu, "
5123                         "best %lu/%lu/%lu@%lu cr %d",
5124                         (unsigned long)ac->ac_o_ex.fe_group,
5125                         (unsigned long)ac->ac_o_ex.fe_start,
5126                         (unsigned long)ac->ac_o_ex.fe_len,
5127                         (unsigned long)ac->ac_o_ex.fe_logical,
5128                         (unsigned long)ac->ac_g_ex.fe_group,
5129                         (unsigned long)ac->ac_g_ex.fe_start,
5130                         (unsigned long)ac->ac_g_ex.fe_len,
5131                         (unsigned long)ac->ac_g_ex.fe_logical,
5132                         (unsigned long)ac->ac_b_ex.fe_group,
5133                         (unsigned long)ac->ac_b_ex.fe_start,
5134                         (unsigned long)ac->ac_b_ex.fe_len,
5135                         (unsigned long)ac->ac_b_ex.fe_logical,
5136                         (int)ac->ac_criteria);
5137         mb_debug(sb, "%u found", ac->ac_found);
5138         ext4_mb_show_pa(sb);
5139 }
5140 #else
5141 static inline void ext4_mb_show_pa(struct super_block *sb)
5142 {
5143         return;
5144 }
5145 static inline void ext4_mb_show_ac(struct ext4_allocation_context *ac)
5146 {
5147         ext4_mb_show_pa(ac->ac_sb);
5148         return;
5149 }
5150 #endif
5151
5152 /*
5153  * We use locality group preallocation for small size file. The size of the
5154  * file is determined by the current size or the resulting size after
5155  * allocation which ever is larger
5156  *
5157  * One can tune this size via /sys/fs/ext4/<partition>/mb_stream_req
5158  */
5159 static void ext4_mb_group_or_file(struct ext4_allocation_context *ac)
5160 {
5161         struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
5162         int bsbits = ac->ac_sb->s_blocksize_bits;
5163         loff_t size, isize;
5164         bool inode_pa_eligible, group_pa_eligible;
5165
5166         if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
5167                 return;
5168
5169         if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
5170                 return;
5171
5172         group_pa_eligible = sbi->s_mb_group_prealloc > 0;
5173         inode_pa_eligible = true;
5174         size = ac->ac_o_ex.fe_logical + EXT4_C2B(sbi, ac->ac_o_ex.fe_len);
5175         isize = (i_size_read(ac->ac_inode) + ac->ac_sb->s_blocksize - 1)
5176                 >> bsbits;
5177
5178         /* No point in using inode preallocation for closed files */
5179         if ((size == isize) && !ext4_fs_is_busy(sbi) &&
5180             !inode_is_open_for_write(ac->ac_inode))
5181                 inode_pa_eligible = false;
5182
5183         size = max(size, isize);
5184         /* Don't use group allocation for large files */
5185         if (size > sbi->s_mb_stream_request)
5186                 group_pa_eligible = false;
5187
5188         if (!group_pa_eligible) {
5189                 if (inode_pa_eligible)
5190                         ac->ac_flags |= EXT4_MB_STREAM_ALLOC;
5191                 else
5192                         ac->ac_flags |= EXT4_MB_HINT_NOPREALLOC;
5193                 return;
5194         }
5195
5196         BUG_ON(ac->ac_lg != NULL);
5197         /*
5198          * locality group prealloc space are per cpu. The reason for having
5199          * per cpu locality group is to reduce the contention between block
5200          * request from multiple CPUs.
5201          */
5202         ac->ac_lg = raw_cpu_ptr(sbi->s_locality_groups);
5203
5204         /* we're going to use group allocation */
5205         ac->ac_flags |= EXT4_MB_HINT_GROUP_ALLOC;
5206
5207         /* serialize all allocations in the group */
5208         mutex_lock(&ac->ac_lg->lg_mutex);
5209 }
5210
5211 static noinline_for_stack void
5212 ext4_mb_initialize_context(struct ext4_allocation_context *ac,
5213                                 struct ext4_allocation_request *ar)
5214 {
5215         struct super_block *sb = ar->inode->i_sb;
5216         struct ext4_sb_info *sbi = EXT4_SB(sb);
5217         struct ext4_super_block *es = sbi->s_es;
5218         ext4_group_t group;
5219         unsigned int len;
5220         ext4_fsblk_t goal;
5221         ext4_grpblk_t block;
5222
5223         /* we can't allocate > group size */
5224         len = ar->len;
5225
5226         /* just a dirty hack to filter too big requests  */
5227         if (len >= EXT4_CLUSTERS_PER_GROUP(sb))
5228                 len = EXT4_CLUSTERS_PER_GROUP(sb);
5229
5230         /* start searching from the goal */
5231         goal = ar->goal;
5232         if (goal < le32_to_cpu(es->s_first_data_block) ||
5233                         goal >= ext4_blocks_count(es))
5234                 goal = le32_to_cpu(es->s_first_data_block);
5235         ext4_get_group_no_and_offset(sb, goal, &group, &block);
5236
5237         /* set up allocation goals */
5238         ac->ac_b_ex.fe_logical = EXT4_LBLK_CMASK(sbi, ar->logical);
5239         ac->ac_status = AC_STATUS_CONTINUE;
5240         ac->ac_sb = sb;
5241         ac->ac_inode = ar->inode;
5242         ac->ac_o_ex.fe_logical = ac->ac_b_ex.fe_logical;
5243         ac->ac_o_ex.fe_group = group;
5244         ac->ac_o_ex.fe_start = block;
5245         ac->ac_o_ex.fe_len = len;
5246         ac->ac_g_ex = ac->ac_o_ex;
5247         ac->ac_flags = ar->flags;
5248
5249         /* we have to define context: we'll work with a file or
5250          * locality group. this is a policy, actually */
5251         ext4_mb_group_or_file(ac);
5252
5253         mb_debug(sb, "init ac: %u blocks @ %u, goal %u, flags 0x%x, 2^%d, "
5254                         "left: %u/%u, right %u/%u to %swritable\n",
5255                         (unsigned) ar->len, (unsigned) ar->logical,
5256                         (unsigned) ar->goal, ac->ac_flags, ac->ac_2order,
5257                         (unsigned) ar->lleft, (unsigned) ar->pleft,
5258                         (unsigned) ar->lright, (unsigned) ar->pright,
5259                         inode_is_open_for_write(ar->inode) ? "" : "non-");
5260 }
5261
5262 static noinline_for_stack void
5263 ext4_mb_discard_lg_preallocations(struct super_block *sb,
5264                                         struct ext4_locality_group *lg,
5265                                         int order, int total_entries)
5266 {
5267         ext4_group_t group = 0;
5268         struct ext4_buddy e4b;
5269         struct list_head discard_list;
5270         struct ext4_prealloc_space *pa, *tmp;
5271
5272         mb_debug(sb, "discard locality group preallocation\n");
5273
5274         INIT_LIST_HEAD(&discard_list);
5275
5276         spin_lock(&lg->lg_prealloc_lock);
5277         list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[order],
5278                                 pa_inode_list,
5279                                 lockdep_is_held(&lg->lg_prealloc_lock)) {
5280                 spin_lock(&pa->pa_lock);
5281                 if (atomic_read(&pa->pa_count)) {
5282                         /*
5283                          * This is the pa that we just used
5284                          * for block allocation. So don't
5285                          * free that
5286                          */
5287                         spin_unlock(&pa->pa_lock);
5288                         continue;
5289                 }
5290                 if (pa->pa_deleted) {
5291                         spin_unlock(&pa->pa_lock);
5292                         continue;
5293                 }
5294                 /* only lg prealloc space */
5295                 BUG_ON(pa->pa_type != MB_GROUP_PA);
5296
5297                 /* seems this one can be freed ... */
5298                 ext4_mb_mark_pa_deleted(sb, pa);
5299                 spin_unlock(&pa->pa_lock);
5300
5301                 list_del_rcu(&pa->pa_inode_list);
5302                 list_add(&pa->u.pa_tmp_list, &discard_list);
5303
5304                 total_entries--;
5305                 if (total_entries <= 5) {
5306                         /*
5307                          * we want to keep only 5 entries
5308                          * allowing it to grow to 8. This
5309                          * mak sure we don't call discard
5310                          * soon for this list.
5311                          */
5312                         break;
5313                 }
5314         }
5315         spin_unlock(&lg->lg_prealloc_lock);
5316
5317         list_for_each_entry_safe(pa, tmp, &discard_list, u.pa_tmp_list) {
5318                 int err;
5319
5320                 group = ext4_get_group_number(sb, pa->pa_pstart);
5321                 err = ext4_mb_load_buddy_gfp(sb, group, &e4b,
5322                                              GFP_NOFS|__GFP_NOFAIL);
5323                 if (err) {
5324                         ext4_error_err(sb, -err, "Error %d loading buddy information for %u",
5325                                        err, group);
5326                         continue;
5327                 }
5328                 ext4_lock_group(sb, group);
5329                 list_del(&pa->pa_group_list);
5330                 ext4_mb_release_group_pa(&e4b, pa);
5331                 ext4_unlock_group(sb, group);
5332
5333                 ext4_mb_unload_buddy(&e4b);
5334                 list_del(&pa->u.pa_tmp_list);
5335                 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
5336         }
5337 }
5338
5339 /*
5340  * We have incremented pa_count. So it cannot be freed at this
5341  * point. Also we hold lg_mutex. So no parallel allocation is
5342  * possible from this lg. That means pa_free cannot be updated.
5343  *
5344  * A parallel ext4_mb_discard_group_preallocations is possible.
5345  * which can cause the lg_prealloc_list to be updated.
5346  */
5347
5348 static void ext4_mb_add_n_trim(struct ext4_allocation_context *ac)
5349 {
5350         int order, added = 0, lg_prealloc_count = 1;
5351         struct super_block *sb = ac->ac_sb;
5352         struct ext4_locality_group *lg = ac->ac_lg;
5353         struct ext4_prealloc_space *tmp_pa, *pa = ac->ac_pa;
5354
5355         order = fls(pa->pa_free) - 1;
5356         if (order > PREALLOC_TB_SIZE - 1)
5357                 /* The max size of hash table is PREALLOC_TB_SIZE */
5358                 order = PREALLOC_TB_SIZE - 1;
5359         /* Add the prealloc space to lg */
5360         spin_lock(&lg->lg_prealloc_lock);
5361         list_for_each_entry_rcu(tmp_pa, &lg->lg_prealloc_list[order],
5362                                 pa_inode_list,
5363                                 lockdep_is_held(&lg->lg_prealloc_lock)) {
5364                 spin_lock(&tmp_pa->pa_lock);
5365                 if (tmp_pa->pa_deleted) {
5366                         spin_unlock(&tmp_pa->pa_lock);
5367                         continue;
5368                 }
5369                 if (!added && pa->pa_free < tmp_pa->pa_free) {
5370                         /* Add to the tail of the previous entry */
5371                         list_add_tail_rcu(&pa->pa_inode_list,
5372                                                 &tmp_pa->pa_inode_list);
5373                         added = 1;
5374                         /*
5375                          * we want to count the total
5376                          * number of entries in the list
5377                          */
5378                 }
5379                 spin_unlock(&tmp_pa->pa_lock);
5380                 lg_prealloc_count++;
5381         }
5382         if (!added)
5383                 list_add_tail_rcu(&pa->pa_inode_list,
5384                                         &lg->lg_prealloc_list[order]);
5385         spin_unlock(&lg->lg_prealloc_lock);
5386
5387         /* Now trim the list to be not more than 8 elements */
5388         if (lg_prealloc_count > 8) {
5389                 ext4_mb_discard_lg_preallocations(sb, lg,
5390                                                   order, lg_prealloc_count);
5391                 return;
5392         }
5393         return ;
5394 }
5395
5396 /*
5397  * if per-inode prealloc list is too long, trim some PA
5398  */
5399 static void ext4_mb_trim_inode_pa(struct inode *inode)
5400 {
5401         struct ext4_inode_info *ei = EXT4_I(inode);
5402         struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
5403         int count, delta;
5404
5405         count = atomic_read(&ei->i_prealloc_active);
5406         delta = (sbi->s_mb_max_inode_prealloc >> 2) + 1;
5407         if (count > sbi->s_mb_max_inode_prealloc + delta) {
5408                 count -= sbi->s_mb_max_inode_prealloc;
5409                 ext4_discard_preallocations(inode, count);
5410         }
5411 }
5412
5413 /*
5414  * release all resource we used in allocation
5415  */
5416 static int ext4_mb_release_context(struct ext4_allocation_context *ac)
5417 {
5418         struct inode *inode = ac->ac_inode;
5419         struct ext4_inode_info *ei = EXT4_I(inode);
5420         struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
5421         struct ext4_prealloc_space *pa = ac->ac_pa;
5422         if (pa) {
5423                 if (pa->pa_type == MB_GROUP_PA) {
5424                         /* see comment in ext4_mb_use_group_pa() */
5425                         spin_lock(&pa->pa_lock);
5426                         pa->pa_pstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
5427                         pa->pa_lstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
5428                         pa->pa_free -= ac->ac_b_ex.fe_len;
5429                         pa->pa_len -= ac->ac_b_ex.fe_len;
5430                         spin_unlock(&pa->pa_lock);
5431
5432                         /*
5433                          * We want to add the pa to the right bucket.
5434                          * Remove it from the list and while adding
5435                          * make sure the list to which we are adding
5436                          * doesn't grow big.
5437                          */
5438                         if (likely(pa->pa_free)) {
5439                                 spin_lock(pa->pa_obj_lock);
5440                                 list_del_rcu(&pa->pa_inode_list);
5441                                 spin_unlock(pa->pa_obj_lock);
5442                                 ext4_mb_add_n_trim(ac);
5443                         }
5444                 }
5445
5446                 if (pa->pa_type == MB_INODE_PA) {
5447                         /*
5448                          * treat per-inode prealloc list as a lru list, then try
5449                          * to trim the least recently used PA.
5450                          */
5451                         spin_lock(pa->pa_obj_lock);
5452                         list_move(&pa->pa_inode_list, &ei->i_prealloc_list);
5453                         spin_unlock(pa->pa_obj_lock);
5454                 }
5455
5456                 ext4_mb_put_pa(ac, ac->ac_sb, pa);
5457         }
5458         if (ac->ac_bitmap_page)
5459                 put_page(ac->ac_bitmap_page);
5460         if (ac->ac_buddy_page)
5461                 put_page(ac->ac_buddy_page);
5462         if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)
5463                 mutex_unlock(&ac->ac_lg->lg_mutex);
5464         ext4_mb_collect_stats(ac);
5465         ext4_mb_trim_inode_pa(inode);
5466         return 0;
5467 }
5468
5469 static int ext4_mb_discard_preallocations(struct super_block *sb, int needed)
5470 {
5471         ext4_group_t i, ngroups = ext4_get_groups_count(sb);
5472         int ret;
5473         int freed = 0, busy = 0;
5474         int retry = 0;
5475
5476         trace_ext4_mb_discard_preallocations(sb, needed);
5477
5478         if (needed == 0)
5479                 needed = EXT4_CLUSTERS_PER_GROUP(sb) + 1;
5480  repeat:
5481         for (i = 0; i < ngroups && needed > 0; i++) {
5482                 ret = ext4_mb_discard_group_preallocations(sb, i, &busy);
5483                 freed += ret;
5484                 needed -= ret;
5485                 cond_resched();
5486         }
5487
5488         if (needed > 0 && busy && ++retry < 3) {
5489                 busy = 0;
5490                 goto repeat;
5491         }
5492
5493         return freed;
5494 }
5495
5496 static bool ext4_mb_discard_preallocations_should_retry(struct super_block *sb,
5497                         struct ext4_allocation_context *ac, u64 *seq)
5498 {
5499         int freed;
5500         u64 seq_retry = 0;
5501         bool ret = false;
5502
5503         freed = ext4_mb_discard_preallocations(sb, ac->ac_o_ex.fe_len);
5504         if (freed) {
5505                 ret = true;
5506                 goto out_dbg;
5507         }
5508         seq_retry = ext4_get_discard_pa_seq_sum();
5509         if (!(ac->ac_flags & EXT4_MB_STRICT_CHECK) || seq_retry != *seq) {
5510                 ac->ac_flags |= EXT4_MB_STRICT_CHECK;
5511                 *seq = seq_retry;
5512                 ret = true;
5513         }
5514
5515 out_dbg:
5516         mb_debug(sb, "freed %d, retry ? %s\n", freed, ret ? "yes" : "no");
5517         return ret;
5518 }
5519
5520 static ext4_fsblk_t ext4_mb_new_blocks_simple(handle_t *handle,
5521                                 struct ext4_allocation_request *ar, int *errp);
5522
5523 /*
5524  * Main entry point into mballoc to allocate blocks
5525  * it tries to use preallocation first, then falls back
5526  * to usual allocation
5527  */
5528 ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
5529                                 struct ext4_allocation_request *ar, int *errp)
5530 {
5531         struct ext4_allocation_context *ac = NULL;
5532         struct ext4_sb_info *sbi;
5533         struct super_block *sb;
5534         ext4_fsblk_t block = 0;
5535         unsigned int inquota = 0;
5536         unsigned int reserv_clstrs = 0;
5537         int retries = 0;
5538         u64 seq;
5539
5540         might_sleep();
5541         sb = ar->inode->i_sb;
5542         sbi = EXT4_SB(sb);
5543
5544         trace_ext4_request_blocks(ar);
5545         if (sbi->s_mount_state & EXT4_FC_REPLAY)
5546                 return ext4_mb_new_blocks_simple(handle, ar, errp);
5547
5548         /* Allow to use superuser reservation for quota file */
5549         if (ext4_is_quota_file(ar->inode))
5550                 ar->flags |= EXT4_MB_USE_ROOT_BLOCKS;
5551
5552         if ((ar->flags & EXT4_MB_DELALLOC_RESERVED) == 0) {
5553                 /* Without delayed allocation we need to verify
5554                  * there is enough free blocks to do block allocation
5555                  * and verify allocation doesn't exceed the quota limits.
5556                  */
5557                 while (ar->len &&
5558                         ext4_claim_free_clusters(sbi, ar->len, ar->flags)) {
5559
5560                         /* let others to free the space */
5561                         cond_resched();
5562                         ar->len = ar->len >> 1;
5563                 }
5564                 if (!ar->len) {
5565                         ext4_mb_show_pa(sb);
5566                         *errp = -ENOSPC;
5567                         return 0;
5568                 }
5569                 reserv_clstrs = ar->len;
5570                 if (ar->flags & EXT4_MB_USE_ROOT_BLOCKS) {
5571                         dquot_alloc_block_nofail(ar->inode,
5572                                                  EXT4_C2B(sbi, ar->len));
5573                 } else {
5574                         while (ar->len &&
5575                                 dquot_alloc_block(ar->inode,
5576                                                   EXT4_C2B(sbi, ar->len))) {
5577
5578                                 ar->flags |= EXT4_MB_HINT_NOPREALLOC;
5579                                 ar->len--;
5580                         }
5581                 }
5582                 inquota = ar->len;
5583                 if (ar->len == 0) {
5584                         *errp = -EDQUOT;
5585                         goto out;
5586                 }
5587         }
5588
5589         ac = kmem_cache_zalloc(ext4_ac_cachep, GFP_NOFS);
5590         if (!ac) {
5591                 ar->len = 0;
5592                 *errp = -ENOMEM;
5593                 goto out;
5594         }
5595
5596         ext4_mb_initialize_context(ac, ar);
5597
5598         ac->ac_op = EXT4_MB_HISTORY_PREALLOC;
5599         seq = this_cpu_read(discard_pa_seq);
5600         if (!ext4_mb_use_preallocated(ac)) {
5601                 ac->ac_op = EXT4_MB_HISTORY_ALLOC;
5602                 ext4_mb_normalize_request(ac, ar);
5603
5604                 *errp = ext4_mb_pa_alloc(ac);
5605                 if (*errp)
5606                         goto errout;
5607 repeat:
5608                 /* allocate space in core */
5609                 *errp = ext4_mb_regular_allocator(ac);
5610                 /*
5611                  * pa allocated above is added to grp->bb_prealloc_list only
5612                  * when we were able to allocate some block i.e. when
5613                  * ac->ac_status == AC_STATUS_FOUND.
5614                  * And error from above mean ac->ac_status != AC_STATUS_FOUND
5615                  * So we have to free this pa here itself.
5616                  */
5617                 if (*errp) {
5618                         ext4_mb_pa_free(ac);
5619                         ext4_discard_allocated_blocks(ac);
5620                         goto errout;
5621                 }
5622                 if (ac->ac_status == AC_STATUS_FOUND &&
5623                         ac->ac_o_ex.fe_len >= ac->ac_f_ex.fe_len)
5624                         ext4_mb_pa_free(ac);
5625         }
5626         if (likely(ac->ac_status == AC_STATUS_FOUND)) {
5627                 *errp = ext4_mb_mark_diskspace_used(ac, handle, reserv_clstrs);
5628                 if (*errp) {
5629                         ext4_discard_allocated_blocks(ac);
5630                         goto errout;
5631                 } else {
5632                         block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
5633                         ar->len = ac->ac_b_ex.fe_len;
5634                 }
5635         } else {
5636                 if (++retries < 3 &&
5637                     ext4_mb_discard_preallocations_should_retry(sb, ac, &seq))
5638                         goto repeat;
5639                 /*
5640                  * If block allocation fails then the pa allocated above
5641                  * needs to be freed here itself.
5642                  */
5643                 ext4_mb_pa_free(ac);
5644                 *errp = -ENOSPC;
5645         }
5646
5647 errout:
5648         if (*errp) {
5649                 ac->ac_b_ex.fe_len = 0;
5650                 ar->len = 0;
5651                 ext4_mb_show_ac(ac);
5652         }
5653         ext4_mb_release_context(ac);
5654 out:
5655         if (ac)
5656                 kmem_cache_free(ext4_ac_cachep, ac);
5657         if (inquota && ar->len < inquota)
5658                 dquot_free_block(ar->inode, EXT4_C2B(sbi, inquota - ar->len));
5659         if (!ar->len) {
5660                 if ((ar->flags & EXT4_MB_DELALLOC_RESERVED) == 0)
5661                         /* release all the reserved blocks if non delalloc */
5662                         percpu_counter_sub(&sbi->s_dirtyclusters_counter,
5663                                                 reserv_clstrs);
5664         }
5665
5666         trace_ext4_allocate_blocks(ar, (unsigned long long)block);
5667
5668         return block;
5669 }
5670
5671 /*
5672  * We can merge two free data extents only if the physical blocks
5673  * are contiguous, AND the extents were freed by the same transaction,
5674  * AND the blocks are associated with the same group.
5675  */
5676 static void ext4_try_merge_freed_extent(struct ext4_sb_info *sbi,
5677                                         struct ext4_free_data *entry,
5678                                         struct ext4_free_data *new_entry,
5679                                         struct rb_root *entry_rb_root)
5680 {
5681         if ((entry->efd_tid != new_entry->efd_tid) ||
5682             (entry->efd_group != new_entry->efd_group))
5683                 return;
5684         if (entry->efd_start_cluster + entry->efd_count ==
5685             new_entry->efd_start_cluster) {
5686                 new_entry->efd_start_cluster = entry->efd_start_cluster;
5687                 new_entry->efd_count += entry->efd_count;
5688         } else if (new_entry->efd_start_cluster + new_entry->efd_count ==
5689                    entry->efd_start_cluster) {
5690                 new_entry->efd_count += entry->efd_count;
5691         } else
5692                 return;
5693         spin_lock(&sbi->s_md_lock);
5694         list_del(&entry->efd_list);
5695         spin_unlock(&sbi->s_md_lock);
5696         rb_erase(&entry->efd_node, entry_rb_root);
5697         kmem_cache_free(ext4_free_data_cachep, entry);
5698 }
5699
5700 static noinline_for_stack int
5701 ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b,
5702                       struct ext4_free_data *new_entry)
5703 {
5704         ext4_group_t group = e4b->bd_group;
5705         ext4_grpblk_t cluster;
5706         ext4_grpblk_t clusters = new_entry->efd_count;
5707         struct ext4_free_data *entry;
5708         struct ext4_group_info *db = e4b->bd_info;
5709         struct super_block *sb = e4b->bd_sb;
5710         struct ext4_sb_info *sbi = EXT4_SB(sb);
5711         struct rb_node **n = &db->bb_free_root.rb_node, *node;
5712         struct rb_node *parent = NULL, *new_node;
5713
5714         BUG_ON(!ext4_handle_valid(handle));
5715         BUG_ON(e4b->bd_bitmap_page == NULL);
5716         BUG_ON(e4b->bd_buddy_page == NULL);
5717
5718         new_node = &new_entry->efd_node;
5719         cluster = new_entry->efd_start_cluster;
5720
5721         if (!*n) {
5722                 /* first free block exent. We need to
5723                    protect buddy cache from being freed,
5724                  * otherwise we'll refresh it from
5725                  * on-disk bitmap and lose not-yet-available
5726                  * blocks */
5727                 get_page(e4b->bd_buddy_page);
5728                 get_page(e4b->bd_bitmap_page);
5729         }
5730         while (*n) {
5731                 parent = *n;
5732                 entry = rb_entry(parent, struct ext4_free_data, efd_node);
5733                 if (cluster < entry->efd_start_cluster)
5734                         n = &(*n)->rb_left;
5735                 else if (cluster >= (entry->efd_start_cluster + entry->efd_count))
5736                         n = &(*n)->rb_right;
5737                 else {
5738                         ext4_grp_locked_error(sb, group, 0,
5739                                 ext4_group_first_block_no(sb, group) +
5740                                 EXT4_C2B(sbi, cluster),
5741                                 "Block already on to-be-freed list");
5742                         kmem_cache_free(ext4_free_data_cachep, new_entry);
5743                         return 0;
5744                 }
5745         }
5746
5747         rb_link_node(new_node, parent, n);
5748         rb_insert_color(new_node, &db->bb_free_root);
5749
5750         /* Now try to see the extent can be merged to left and right */
5751         node = rb_prev(new_node);
5752         if (node) {
5753                 entry = rb_entry(node, struct ext4_free_data, efd_node);
5754                 ext4_try_merge_freed_extent(sbi, entry, new_entry,
5755                                             &(db->bb_free_root));
5756         }
5757
5758         node = rb_next(new_node);
5759         if (node) {
5760                 entry = rb_entry(node, struct ext4_free_data, efd_node);
5761                 ext4_try_merge_freed_extent(sbi, entry, new_entry,
5762                                             &(db->bb_free_root));
5763         }
5764
5765         spin_lock(&sbi->s_md_lock);
5766         list_add_tail(&new_entry->efd_list, &sbi->s_freed_data_list);
5767         sbi->s_mb_free_pending += clusters;
5768         spin_unlock(&sbi->s_md_lock);
5769         return 0;
5770 }
5771
5772 /*
5773  * Simple allocator for Ext4 fast commit replay path. It searches for blocks
5774  * linearly starting at the goal block and also excludes the blocks which
5775  * are going to be in use after fast commit replay.
5776  */
5777 static ext4_fsblk_t ext4_mb_new_blocks_simple(handle_t *handle,
5778                                 struct ext4_allocation_request *ar, int *errp)
5779 {
5780         struct buffer_head *bitmap_bh;
5781         struct super_block *sb = ar->inode->i_sb;
5782         ext4_group_t group;
5783         ext4_grpblk_t blkoff;
5784         ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb);
5785         ext4_grpblk_t i = 0;
5786         ext4_fsblk_t goal, block;
5787         struct ext4_super_block *es = EXT4_SB(sb)->s_es;
5788
5789         goal = ar->goal;
5790         if (goal < le32_to_cpu(es->s_first_data_block) ||
5791                         goal >= ext4_blocks_count(es))
5792                 goal = le32_to_cpu(es->s_first_data_block);
5793
5794         ar->len = 0;
5795         ext4_get_group_no_and_offset(sb, goal, &group, &blkoff);
5796         for (; group < ext4_get_groups_count(sb); group++) {
5797                 bitmap_bh = ext4_read_block_bitmap(sb, group);
5798                 if (IS_ERR(bitmap_bh)) {
5799                         *errp = PTR_ERR(bitmap_bh);
5800                         pr_warn("Failed to read block bitmap\n");
5801                         return 0;
5802                 }
5803
5804                 ext4_get_group_no_and_offset(sb,
5805                         max(ext4_group_first_block_no(sb, group), goal),
5806                         NULL, &blkoff);
5807                 while (1) {
5808                         i = mb_find_next_zero_bit(bitmap_bh->b_data, max,
5809                                                 blkoff);
5810                         if (i >= max)
5811                                 break;
5812                         if (ext4_fc_replay_check_excluded(sb,
5813                                 ext4_group_first_block_no(sb, group) + i)) {
5814                                 blkoff = i + 1;
5815                         } else
5816                                 break;
5817                 }
5818                 brelse(bitmap_bh);
5819                 if (i < max)
5820                         break;
5821         }
5822
5823         if (group >= ext4_get_groups_count(sb) || i >= max) {
5824                 *errp = -ENOSPC;
5825                 return 0;
5826         }
5827
5828         block = ext4_group_first_block_no(sb, group) + i;
5829         ext4_mb_mark_bb(sb, block, 1, 1);
5830         ar->len = 1;
5831
5832         return block;
5833 }
5834
5835 static void ext4_free_blocks_simple(struct inode *inode, ext4_fsblk_t block,
5836                                         unsigned long count)
5837 {
5838         struct buffer_head *bitmap_bh;
5839         struct super_block *sb = inode->i_sb;
5840         struct ext4_group_desc *gdp;
5841         struct buffer_head *gdp_bh;
5842         ext4_group_t group;
5843         ext4_grpblk_t blkoff;
5844         int already_freed = 0, err, i;
5845
5846         ext4_get_group_no_and_offset(sb, block, &group, &blkoff);
5847         bitmap_bh = ext4_read_block_bitmap(sb, group);
5848         if (IS_ERR(bitmap_bh)) {
5849                 err = PTR_ERR(bitmap_bh);
5850                 pr_warn("Failed to read block bitmap\n");
5851                 return;
5852         }
5853         gdp = ext4_get_group_desc(sb, group, &gdp_bh);
5854         if (!gdp)
5855                 return;
5856
5857         for (i = 0; i < count; i++) {
5858                 if (!mb_test_bit(blkoff + i, bitmap_bh->b_data))
5859                         already_freed++;
5860         }
5861         mb_clear_bits(bitmap_bh->b_data, blkoff, count);
5862         err = ext4_handle_dirty_metadata(NULL, NULL, bitmap_bh);
5863         if (err)
5864                 return;
5865         ext4_free_group_clusters_set(
5866                 sb, gdp, ext4_free_group_clusters(sb, gdp) +
5867                 count - already_freed);
5868         ext4_block_bitmap_csum_set(sb, gdp, bitmap_bh);
5869         ext4_group_desc_csum_set(sb, group, gdp);
5870         ext4_handle_dirty_metadata(NULL, NULL, gdp_bh);
5871         sync_dirty_buffer(bitmap_bh);
5872         sync_dirty_buffer(gdp_bh);
5873         brelse(bitmap_bh);
5874 }
5875
5876 /**
5877  * ext4_mb_clear_bb() -- helper function for freeing blocks.
5878  *                      Used by ext4_free_blocks()
5879  * @handle:             handle for this transaction
5880  * @inode:              inode
5881  * @block:              starting physical block to be freed
5882  * @count:              number of blocks to be freed
5883  * @flags:              flags used by ext4_free_blocks
5884  */
5885 static void ext4_mb_clear_bb(handle_t *handle, struct inode *inode,
5886                                ext4_fsblk_t block, unsigned long count,
5887                                int flags)
5888 {
5889         struct buffer_head *bitmap_bh = NULL;
5890         struct super_block *sb = inode->i_sb;
5891         struct ext4_group_desc *gdp;
5892         unsigned int overflow;
5893         ext4_grpblk_t bit;
5894         struct buffer_head *gd_bh;
5895         ext4_group_t block_group;
5896         struct ext4_sb_info *sbi;
5897         struct ext4_buddy e4b;
5898         unsigned int count_clusters;
5899         int err = 0;
5900         int ret;
5901
5902         sbi = EXT4_SB(sb);
5903
5904         if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) &&
5905             !ext4_inode_block_valid(inode, block, count)) {
5906                 ext4_error(sb, "Freeing blocks in system zone - "
5907                            "Block = %llu, count = %lu", block, count);
5908                 /* err = 0. ext4_std_error should be a no op */
5909                 goto error_return;
5910         }
5911         flags |= EXT4_FREE_BLOCKS_VALIDATED;
5912
5913 do_more:
5914         overflow = 0;
5915         ext4_get_group_no_and_offset(sb, block, &block_group, &bit);
5916
5917         if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(
5918                         ext4_get_group_info(sb, block_group))))
5919                 return;
5920
5921         /*
5922          * Check to see if we are freeing blocks across a group
5923          * boundary.
5924          */
5925         if (EXT4_C2B(sbi, bit) + count > EXT4_BLOCKS_PER_GROUP(sb)) {
5926                 overflow = EXT4_C2B(sbi, bit) + count -
5927                         EXT4_BLOCKS_PER_GROUP(sb);
5928                 count -= overflow;
5929                 /* The range changed so it's no longer validated */
5930                 flags &= ~EXT4_FREE_BLOCKS_VALIDATED;
5931         }
5932         count_clusters = EXT4_NUM_B2C(sbi, count);
5933         bitmap_bh = ext4_read_block_bitmap(sb, block_group);
5934         if (IS_ERR(bitmap_bh)) {
5935                 err = PTR_ERR(bitmap_bh);
5936                 bitmap_bh = NULL;
5937                 goto error_return;
5938         }
5939         gdp = ext4_get_group_desc(sb, block_group, &gd_bh);
5940         if (!gdp) {
5941                 err = -EIO;
5942                 goto error_return;
5943         }
5944
5945         if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) &&
5946             !ext4_inode_block_valid(inode, block, count)) {
5947                 ext4_error(sb, "Freeing blocks in system zone - "
5948                            "Block = %llu, count = %lu", block, count);
5949                 /* err = 0. ext4_std_error should be a no op */
5950                 goto error_return;
5951         }
5952
5953         BUFFER_TRACE(bitmap_bh, "getting write access");
5954         err = ext4_journal_get_write_access(handle, sb, bitmap_bh,
5955                                             EXT4_JTR_NONE);
5956         if (err)
5957                 goto error_return;
5958
5959         /*
5960          * We are about to modify some metadata.  Call the journal APIs
5961          * to unshare ->b_data if a currently-committing transaction is
5962          * using it
5963          */
5964         BUFFER_TRACE(gd_bh, "get_write_access");
5965         err = ext4_journal_get_write_access(handle, sb, gd_bh, EXT4_JTR_NONE);
5966         if (err)
5967                 goto error_return;
5968 #ifdef AGGRESSIVE_CHECK
5969         {
5970                 int i;
5971                 for (i = 0; i < count_clusters; i++)
5972                         BUG_ON(!mb_test_bit(bit + i, bitmap_bh->b_data));
5973         }
5974 #endif
5975         trace_ext4_mballoc_free(sb, inode, block_group, bit, count_clusters);
5976
5977         /* __GFP_NOFAIL: retry infinitely, ignore TIF_MEMDIE and memcg limit. */
5978         err = ext4_mb_load_buddy_gfp(sb, block_group, &e4b,
5979                                      GFP_NOFS|__GFP_NOFAIL);
5980         if (err)
5981                 goto error_return;
5982
5983         /*
5984          * We need to make sure we don't reuse the freed block until after the
5985          * transaction is committed. We make an exception if the inode is to be
5986          * written in writeback mode since writeback mode has weak data
5987          * consistency guarantees.
5988          */
5989         if (ext4_handle_valid(handle) &&
5990             ((flags & EXT4_FREE_BLOCKS_METADATA) ||
5991              !ext4_should_writeback_data(inode))) {
5992                 struct ext4_free_data *new_entry;
5993                 /*
5994                  * We use __GFP_NOFAIL because ext4_free_blocks() is not allowed
5995                  * to fail.
5996                  */
5997                 new_entry = kmem_cache_alloc(ext4_free_data_cachep,
5998                                 GFP_NOFS|__GFP_NOFAIL);
5999                 new_entry->efd_start_cluster = bit;
6000                 new_entry->efd_group = block_group;
6001                 new_entry->efd_count = count_clusters;
6002                 new_entry->efd_tid = handle->h_transaction->t_tid;
6003
6004                 ext4_lock_group(sb, block_group);
6005                 mb_clear_bits(bitmap_bh->b_data, bit, count_clusters);
6006                 ext4_mb_free_metadata(handle, &e4b, new_entry);
6007         } else {
6008                 /* need to update group_info->bb_free and bitmap
6009                  * with group lock held. generate_buddy look at
6010                  * them with group lock_held
6011                  */
6012                 if (test_opt(sb, DISCARD)) {
6013                         err = ext4_issue_discard(sb, block_group, bit, count,
6014                                                  NULL);
6015                         if (err && err != -EOPNOTSUPP)
6016                                 ext4_msg(sb, KERN_WARNING, "discard request in"
6017                                          " group:%u block:%d count:%lu failed"
6018                                          " with %d", block_group, bit, count,
6019                                          err);
6020                 } else
6021                         EXT4_MB_GRP_CLEAR_TRIMMED(e4b.bd_info);
6022
6023                 ext4_lock_group(sb, block_group);
6024                 mb_clear_bits(bitmap_bh->b_data, bit, count_clusters);
6025                 mb_free_blocks(inode, &e4b, bit, count_clusters);
6026         }
6027
6028         ret = ext4_free_group_clusters(sb, gdp) + count_clusters;
6029         ext4_free_group_clusters_set(sb, gdp, ret);
6030         ext4_block_bitmap_csum_set(sb, gdp, bitmap_bh);
6031         ext4_group_desc_csum_set(sb, block_group, gdp);
6032         ext4_unlock_group(sb, block_group);
6033
6034         if (sbi->s_log_groups_per_flex) {
6035                 ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
6036                 atomic64_add(count_clusters,
6037                              &sbi_array_rcu_deref(sbi, s_flex_groups,
6038                                                   flex_group)->free_clusters);
6039         }
6040
6041         /*
6042          * on a bigalloc file system, defer the s_freeclusters_counter
6043          * update to the caller (ext4_remove_space and friends) so they
6044          * can determine if a cluster freed here should be rereserved
6045          */
6046         if (!(flags & EXT4_FREE_BLOCKS_RERESERVE_CLUSTER)) {
6047                 if (!(flags & EXT4_FREE_BLOCKS_NO_QUOT_UPDATE))
6048                         dquot_free_block(inode, EXT4_C2B(sbi, count_clusters));
6049                 percpu_counter_add(&sbi->s_freeclusters_counter,
6050                                    count_clusters);
6051         }
6052
6053         ext4_mb_unload_buddy(&e4b);
6054
6055         /* We dirtied the bitmap block */
6056         BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
6057         err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
6058
6059         /* And the group descriptor block */
6060         BUFFER_TRACE(gd_bh, "dirtied group descriptor block");
6061         ret = ext4_handle_dirty_metadata(handle, NULL, gd_bh);
6062         if (!err)
6063                 err = ret;
6064
6065         if (overflow && !err) {
6066                 block += count;
6067                 count = overflow;
6068                 put_bh(bitmap_bh);
6069                 /* The range changed so it's no longer validated */
6070                 flags &= ~EXT4_FREE_BLOCKS_VALIDATED;
6071                 goto do_more;
6072         }
6073 error_return:
6074         brelse(bitmap_bh);
6075         ext4_std_error(sb, err);
6076         return;
6077 }
6078
6079 /**
6080  * ext4_free_blocks() -- Free given blocks and update quota
6081  * @handle:             handle for this transaction
6082  * @inode:              inode
6083  * @bh:                 optional buffer of the block to be freed
6084  * @block:              starting physical block to be freed
6085  * @count:              number of blocks to be freed
6086  * @flags:              flags used by ext4_free_blocks
6087  */
6088 void ext4_free_blocks(handle_t *handle, struct inode *inode,
6089                       struct buffer_head *bh, ext4_fsblk_t block,
6090                       unsigned long count, int flags)
6091 {
6092         struct super_block *sb = inode->i_sb;
6093         unsigned int overflow;
6094         struct ext4_sb_info *sbi;
6095
6096         sbi = EXT4_SB(sb);
6097
6098         if (sbi->s_mount_state & EXT4_FC_REPLAY) {
6099                 ext4_free_blocks_simple(inode, block, count);
6100                 return;
6101         }
6102
6103         might_sleep();
6104         if (bh) {
6105                 if (block)
6106                         BUG_ON(block != bh->b_blocknr);
6107                 else
6108                         block = bh->b_blocknr;
6109         }
6110
6111         if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) &&
6112             !ext4_inode_block_valid(inode, block, count)) {
6113                 ext4_error(sb, "Freeing blocks not in datazone - "
6114                            "block = %llu, count = %lu", block, count);
6115                 return;
6116         }
6117         flags |= EXT4_FREE_BLOCKS_VALIDATED;
6118
6119         ext4_debug("freeing block %llu\n", block);
6120         trace_ext4_free_blocks(inode, block, count, flags);
6121
6122         if (bh && (flags & EXT4_FREE_BLOCKS_FORGET)) {
6123                 BUG_ON(count > 1);
6124
6125                 ext4_forget(handle, flags & EXT4_FREE_BLOCKS_METADATA,
6126                             inode, bh, block);
6127         }
6128
6129         /*
6130          * If the extent to be freed does not begin on a cluster
6131          * boundary, we need to deal with partial clusters at the
6132          * beginning and end of the extent.  Normally we will free
6133          * blocks at the beginning or the end unless we are explicitly
6134          * requested to avoid doing so.
6135          */
6136         overflow = EXT4_PBLK_COFF(sbi, block);
6137         if (overflow) {
6138                 if (flags & EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER) {
6139                         overflow = sbi->s_cluster_ratio - overflow;
6140                         block += overflow;
6141                         if (count > overflow)
6142                                 count -= overflow;
6143                         else
6144                                 return;
6145                 } else {
6146                         block -= overflow;
6147                         count += overflow;
6148                 }
6149                 /* The range changed so it's no longer validated */
6150                 flags &= ~EXT4_FREE_BLOCKS_VALIDATED;
6151         }
6152         overflow = EXT4_LBLK_COFF(sbi, count);
6153         if (overflow) {
6154                 if (flags & EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER) {
6155                         if (count > overflow)
6156                                 count -= overflow;
6157                         else
6158                                 return;
6159                 } else
6160                         count += sbi->s_cluster_ratio - overflow;
6161                 /* The range changed so it's no longer validated */
6162                 flags &= ~EXT4_FREE_BLOCKS_VALIDATED;
6163         }
6164
6165         if (!bh && (flags & EXT4_FREE_BLOCKS_FORGET)) {
6166                 int i;
6167                 int is_metadata = flags & EXT4_FREE_BLOCKS_METADATA;
6168
6169                 for (i = 0; i < count; i++) {
6170                         cond_resched();
6171                         if (is_metadata)
6172                                 bh = sb_find_get_block(inode->i_sb, block + i);
6173                         ext4_forget(handle, is_metadata, inode, bh, block + i);
6174                 }
6175         }
6176
6177         ext4_mb_clear_bb(handle, inode, block, count, flags);
6178         return;
6179 }
6180
6181 /**
6182  * ext4_group_add_blocks() -- Add given blocks to an existing group
6183  * @handle:                     handle to this transaction
6184  * @sb:                         super block
6185  * @block:                      start physical block to add to the block group
6186  * @count:                      number of blocks to free
6187  *
6188  * This marks the blocks as free in the bitmap and buddy.
6189  */
6190 int ext4_group_add_blocks(handle_t *handle, struct super_block *sb,
6191                          ext4_fsblk_t block, unsigned long count)
6192 {
6193         struct buffer_head *bitmap_bh = NULL;
6194         struct buffer_head *gd_bh;
6195         ext4_group_t block_group;
6196         ext4_grpblk_t bit;
6197         unsigned int i;
6198         struct ext4_group_desc *desc;
6199         struct ext4_sb_info *sbi = EXT4_SB(sb);
6200         struct ext4_buddy e4b;
6201         int err = 0, ret, free_clusters_count;
6202         ext4_grpblk_t clusters_freed;
6203         ext4_fsblk_t first_cluster = EXT4_B2C(sbi, block);
6204         ext4_fsblk_t last_cluster = EXT4_B2C(sbi, block + count - 1);
6205         unsigned long cluster_count = last_cluster - first_cluster + 1;
6206
6207         ext4_debug("Adding block(s) %llu-%llu\n", block, block + count - 1);
6208
6209         if (count == 0)
6210                 return 0;
6211
6212         ext4_get_group_no_and_offset(sb, block, &block_group, &bit);
6213         /*
6214          * Check to see if we are freeing blocks across a group
6215          * boundary.
6216          */
6217         if (bit + cluster_count > EXT4_CLUSTERS_PER_GROUP(sb)) {
6218                 ext4_warning(sb, "too many blocks added to group %u",
6219                              block_group);
6220                 err = -EINVAL;
6221                 goto error_return;
6222         }
6223
6224         bitmap_bh = ext4_read_block_bitmap(sb, block_group);
6225         if (IS_ERR(bitmap_bh)) {
6226                 err = PTR_ERR(bitmap_bh);
6227                 bitmap_bh = NULL;
6228                 goto error_return;
6229         }
6230
6231         desc = ext4_get_group_desc(sb, block_group, &gd_bh);
6232         if (!desc) {
6233                 err = -EIO;
6234                 goto error_return;
6235         }
6236
6237         if (!ext4_sb_block_valid(sb, NULL, block, count)) {
6238                 ext4_error(sb, "Adding blocks in system zones - "
6239                            "Block = %llu, count = %lu",
6240                            block, count);
6241                 err = -EINVAL;
6242                 goto error_return;
6243         }
6244
6245         BUFFER_TRACE(bitmap_bh, "getting write access");
6246         err = ext4_journal_get_write_access(handle, sb, bitmap_bh,
6247                                             EXT4_JTR_NONE);
6248         if (err)
6249                 goto error_return;
6250
6251         /*
6252          * We are about to modify some metadata.  Call the journal APIs
6253          * to unshare ->b_data if a currently-committing transaction is
6254          * using it
6255          */
6256         BUFFER_TRACE(gd_bh, "get_write_access");
6257         err = ext4_journal_get_write_access(handle, sb, gd_bh, EXT4_JTR_NONE);
6258         if (err)
6259                 goto error_return;
6260
6261         for (i = 0, clusters_freed = 0; i < cluster_count; i++) {
6262                 BUFFER_TRACE(bitmap_bh, "clear bit");
6263                 if (!mb_test_bit(bit + i, bitmap_bh->b_data)) {
6264                         ext4_error(sb, "bit already cleared for block %llu",
6265                                    (ext4_fsblk_t)(block + i));
6266                         BUFFER_TRACE(bitmap_bh, "bit already cleared");
6267                 } else {
6268                         clusters_freed++;
6269                 }
6270         }
6271
6272         err = ext4_mb_load_buddy(sb, block_group, &e4b);
6273         if (err)
6274                 goto error_return;
6275
6276         /*
6277          * need to update group_info->bb_free and bitmap
6278          * with group lock held. generate_buddy look at
6279          * them with group lock_held
6280          */
6281         ext4_lock_group(sb, block_group);
6282         mb_clear_bits(bitmap_bh->b_data, bit, cluster_count);
6283         mb_free_blocks(NULL, &e4b, bit, cluster_count);
6284         free_clusters_count = clusters_freed +
6285                 ext4_free_group_clusters(sb, desc);
6286         ext4_free_group_clusters_set(sb, desc, free_clusters_count);
6287         ext4_block_bitmap_csum_set(sb, desc, bitmap_bh);
6288         ext4_group_desc_csum_set(sb, block_group, desc);
6289         ext4_unlock_group(sb, block_group);
6290         percpu_counter_add(&sbi->s_freeclusters_counter,
6291                            clusters_freed);
6292
6293         if (sbi->s_log_groups_per_flex) {
6294                 ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
6295                 atomic64_add(clusters_freed,
6296                              &sbi_array_rcu_deref(sbi, s_flex_groups,
6297                                                   flex_group)->free_clusters);
6298         }
6299
6300         ext4_mb_unload_buddy(&e4b);
6301
6302         /* We dirtied the bitmap block */
6303         BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
6304         err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
6305
6306         /* And the group descriptor block */
6307         BUFFER_TRACE(gd_bh, "dirtied group descriptor block");
6308         ret = ext4_handle_dirty_metadata(handle, NULL, gd_bh);
6309         if (!err)
6310                 err = ret;
6311
6312 error_return:
6313         brelse(bitmap_bh);
6314         ext4_std_error(sb, err);
6315         return err;
6316 }
6317
6318 /**
6319  * ext4_trim_extent -- function to TRIM one single free extent in the group
6320  * @sb:         super block for the file system
6321  * @start:      starting block of the free extent in the alloc. group
6322  * @count:      number of blocks to TRIM
6323  * @e4b:        ext4 buddy for the group
6324  *
6325  * Trim "count" blocks starting at "start" in the "group". To assure that no
6326  * one will allocate those blocks, mark it as used in buddy bitmap. This must
6327  * be called with under the group lock.
6328  */
6329 static int ext4_trim_extent(struct super_block *sb,
6330                 int start, int count, struct ext4_buddy *e4b)
6331 __releases(bitlock)
6332 __acquires(bitlock)
6333 {
6334         struct ext4_free_extent ex;
6335         ext4_group_t group = e4b->bd_group;
6336         int ret = 0;
6337
6338         trace_ext4_trim_extent(sb, group, start, count);
6339
6340         assert_spin_locked(ext4_group_lock_ptr(sb, group));
6341
6342         ex.fe_start = start;
6343         ex.fe_group = group;
6344         ex.fe_len = count;
6345
6346         /*
6347          * Mark blocks used, so no one can reuse them while
6348          * being trimmed.
6349          */
6350         mb_mark_used(e4b, &ex);
6351         ext4_unlock_group(sb, group);
6352         ret = ext4_issue_discard(sb, group, start, count, NULL);
6353         ext4_lock_group(sb, group);
6354         mb_free_blocks(NULL, e4b, start, ex.fe_len);
6355         return ret;
6356 }
6357
6358 static int ext4_try_to_trim_range(struct super_block *sb,
6359                 struct ext4_buddy *e4b, ext4_grpblk_t start,
6360                 ext4_grpblk_t max, ext4_grpblk_t minblocks)
6361 __acquires(ext4_group_lock_ptr(sb, e4b->bd_group))
6362 __releases(ext4_group_lock_ptr(sb, e4b->bd_group))
6363 {
6364         ext4_grpblk_t next, count, free_count;
6365         void *bitmap;
6366
6367         bitmap = e4b->bd_bitmap;
6368         start = (e4b->bd_info->bb_first_free > start) ?
6369                 e4b->bd_info->bb_first_free : start;
6370         count = 0;
6371         free_count = 0;
6372
6373         while (start <= max) {
6374                 start = mb_find_next_zero_bit(bitmap, max + 1, start);
6375                 if (start > max)
6376                         break;
6377                 next = mb_find_next_bit(bitmap, max + 1, start);
6378
6379                 if ((next - start) >= minblocks) {
6380                         int ret = ext4_trim_extent(sb, start, next - start, e4b);
6381
6382                         if (ret && ret != -EOPNOTSUPP)
6383                                 break;
6384                         count += next - start;
6385                 }
6386                 free_count += next - start;
6387                 start = next + 1;
6388
6389                 if (fatal_signal_pending(current)) {
6390                         count = -ERESTARTSYS;
6391                         break;
6392                 }
6393
6394                 if (need_resched()) {
6395                         ext4_unlock_group(sb, e4b->bd_group);
6396                         cond_resched();
6397                         ext4_lock_group(sb, e4b->bd_group);
6398                 }
6399
6400                 if ((e4b->bd_info->bb_free - free_count) < minblocks)
6401                         break;
6402         }
6403
6404         return count;
6405 }
6406
6407 /**
6408  * ext4_trim_all_free -- function to trim all free space in alloc. group
6409  * @sb:                 super block for file system
6410  * @group:              group to be trimmed
6411  * @start:              first group block to examine
6412  * @max:                last group block to examine
6413  * @minblocks:          minimum extent block count
6414  * @set_trimmed:        set the trimmed flag if at least one block is trimmed
6415  *
6416  * ext4_trim_all_free walks through group's block bitmap searching for free
6417  * extents. When the free extent is found, mark it as used in group buddy
6418  * bitmap. Then issue a TRIM command on this extent and free the extent in
6419  * the group buddy bitmap.
6420  */
6421 static ext4_grpblk_t
6422 ext4_trim_all_free(struct super_block *sb, ext4_group_t group,
6423                    ext4_grpblk_t start, ext4_grpblk_t max,
6424                    ext4_grpblk_t minblocks, bool set_trimmed)
6425 {
6426         struct ext4_buddy e4b;
6427         int ret;
6428
6429         trace_ext4_trim_all_free(sb, group, start, max);
6430
6431         ret = ext4_mb_load_buddy(sb, group, &e4b);
6432         if (ret) {
6433                 ext4_warning(sb, "Error %d loading buddy information for %u",
6434                              ret, group);
6435                 return ret;
6436         }
6437
6438         ext4_lock_group(sb, group);
6439
6440         if (!EXT4_MB_GRP_WAS_TRIMMED(e4b.bd_info) ||
6441             minblocks < EXT4_SB(sb)->s_last_trim_minblks) {
6442                 ret = ext4_try_to_trim_range(sb, &e4b, start, max, minblocks);
6443                 if (ret >= 0 && set_trimmed)
6444                         EXT4_MB_GRP_SET_TRIMMED(e4b.bd_info);
6445         } else {
6446                 ret = 0;
6447         }
6448
6449         ext4_unlock_group(sb, group);
6450         ext4_mb_unload_buddy(&e4b);
6451
6452         ext4_debug("trimmed %d blocks in the group %d\n",
6453                 ret, group);
6454
6455         return ret;
6456 }
6457
6458 /**
6459  * ext4_trim_fs() -- trim ioctl handle function
6460  * @sb:                 superblock for filesystem
6461  * @range:              fstrim_range structure
6462  *
6463  * start:       First Byte to trim
6464  * len:         number of Bytes to trim from start
6465  * minlen:      minimum extent length in Bytes
6466  * ext4_trim_fs goes through all allocation groups containing Bytes from
6467  * start to start+len. For each such a group ext4_trim_all_free function
6468  * is invoked to trim all free space.
6469  */
6470 int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range)
6471 {
6472         unsigned int discard_granularity = bdev_discard_granularity(sb->s_bdev);
6473         struct ext4_group_info *grp;
6474         ext4_group_t group, first_group, last_group;
6475         ext4_grpblk_t cnt = 0, first_cluster, last_cluster;
6476         uint64_t start, end, minlen, trimmed = 0;
6477         ext4_fsblk_t first_data_blk =
6478                         le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block);
6479         ext4_fsblk_t max_blks = ext4_blocks_count(EXT4_SB(sb)->s_es);
6480         bool whole_group, eof = false;
6481         int ret = 0;
6482
6483         start = range->start >> sb->s_blocksize_bits;
6484         end = start + (range->len >> sb->s_blocksize_bits) - 1;
6485         minlen = EXT4_NUM_B2C(EXT4_SB(sb),
6486                               range->minlen >> sb->s_blocksize_bits);
6487
6488         if (minlen > EXT4_CLUSTERS_PER_GROUP(sb) ||
6489             start >= max_blks ||
6490             range->len < sb->s_blocksize)
6491                 return -EINVAL;
6492         /* No point to try to trim less than discard granularity */
6493         if (range->minlen < discard_granularity) {
6494                 minlen = EXT4_NUM_B2C(EXT4_SB(sb),
6495                                 discard_granularity >> sb->s_blocksize_bits);
6496                 if (minlen > EXT4_CLUSTERS_PER_GROUP(sb))
6497                         goto out;
6498         }
6499         if (end >= max_blks - 1) {
6500                 end = max_blks - 1;
6501                 eof = true;
6502         }
6503         if (end <= first_data_blk)
6504                 goto out;
6505         if (start < first_data_blk)
6506                 start = first_data_blk;
6507
6508         /* Determine first and last group to examine based on start and end */
6509         ext4_get_group_no_and_offset(sb, (ext4_fsblk_t) start,
6510                                      &first_group, &first_cluster);
6511         ext4_get_group_no_and_offset(sb, (ext4_fsblk_t) end,
6512                                      &last_group, &last_cluster);
6513
6514         /* end now represents the last cluster to discard in this group */
6515         end = EXT4_CLUSTERS_PER_GROUP(sb) - 1;
6516         whole_group = true;
6517
6518         for (group = first_group; group <= last_group; group++) {
6519                 grp = ext4_get_group_info(sb, group);
6520                 /* We only do this if the grp has never been initialized */
6521                 if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
6522                         ret = ext4_mb_init_group(sb, group, GFP_NOFS);
6523                         if (ret)
6524                                 break;
6525                 }
6526
6527                 /*
6528                  * For all the groups except the last one, last cluster will
6529                  * always be EXT4_CLUSTERS_PER_GROUP(sb)-1, so we only need to
6530                  * change it for the last group, note that last_cluster is
6531                  * already computed earlier by ext4_get_group_no_and_offset()
6532                  */
6533                 if (group == last_group) {
6534                         end = last_cluster;
6535                         whole_group = eof ? true : end == EXT4_CLUSTERS_PER_GROUP(sb) - 1;
6536                 }
6537                 if (grp->bb_free >= minlen) {
6538                         cnt = ext4_trim_all_free(sb, group, first_cluster,
6539                                                  end, minlen, whole_group);
6540                         if (cnt < 0) {
6541                                 ret = cnt;
6542                                 break;
6543                         }
6544                         trimmed += cnt;
6545                 }
6546
6547                 /*
6548                  * For every group except the first one, we are sure
6549                  * that the first cluster to discard will be cluster #0.
6550                  */
6551                 first_cluster = 0;
6552         }
6553
6554         if (!ret)
6555                 EXT4_SB(sb)->s_last_trim_minblks = minlen;
6556
6557 out:
6558         range->len = EXT4_C2B(EXT4_SB(sb), trimmed) << sb->s_blocksize_bits;
6559         return ret;
6560 }
6561
6562 /* Iterate all the free extents in the group. */
6563 int
6564 ext4_mballoc_query_range(
6565         struct super_block              *sb,
6566         ext4_group_t                    group,
6567         ext4_grpblk_t                   start,
6568         ext4_grpblk_t                   end,
6569         ext4_mballoc_query_range_fn     formatter,
6570         void                            *priv)
6571 {
6572         void                            *bitmap;
6573         ext4_grpblk_t                   next;
6574         struct ext4_buddy               e4b;
6575         int                             error;
6576
6577         error = ext4_mb_load_buddy(sb, group, &e4b);
6578         if (error)
6579                 return error;
6580         bitmap = e4b.bd_bitmap;
6581
6582         ext4_lock_group(sb, group);
6583
6584         start = (e4b.bd_info->bb_first_free > start) ?
6585                 e4b.bd_info->bb_first_free : start;
6586         if (end >= EXT4_CLUSTERS_PER_GROUP(sb))
6587                 end = EXT4_CLUSTERS_PER_GROUP(sb) - 1;
6588
6589         while (start <= end) {
6590                 start = mb_find_next_zero_bit(bitmap, end + 1, start);
6591                 if (start > end)
6592                         break;
6593                 next = mb_find_next_bit(bitmap, end + 1, start);
6594
6595                 ext4_unlock_group(sb, group);
6596                 error = formatter(sb, group, start, next - start, priv);
6597                 if (error)
6598                         goto out_unload;
6599                 ext4_lock_group(sb, group);
6600
6601                 start = next + 1;
6602         }
6603
6604         ext4_unlock_group(sb, group);
6605 out_unload:
6606         ext4_mb_unload_buddy(&e4b);
6607
6608         return error;
6609 }