Merge tag 'coccinelle-5.16-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-microblaze.git] / fs / btrfs / space-info.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 #include "misc.h"
4 #include "ctree.h"
5 #include "space-info.h"
6 #include "sysfs.h"
7 #include "volumes.h"
8 #include "free-space-cache.h"
9 #include "ordered-data.h"
10 #include "transaction.h"
11 #include "block-group.h"
12
13 /*
14  * HOW DOES SPACE RESERVATION WORK
15  *
16  * If you want to know about delalloc specifically, there is a separate comment
17  * for that with the delalloc code.  This comment is about how the whole system
18  * works generally.
19  *
20  * BASIC CONCEPTS
21  *
22  *   1) space_info.  This is the ultimate arbiter of how much space we can use.
23  *   There's a description of the bytes_ fields with the struct declaration,
24  *   refer to that for specifics on each field.  Suffice it to say that for
25  *   reservations we care about total_bytes - SUM(space_info->bytes_) when
26  *   determining if there is space to make an allocation.  There is a space_info
27  *   for METADATA, SYSTEM, and DATA areas.
28  *
29  *   2) block_rsv's.  These are basically buckets for every different type of
30  *   metadata reservation we have.  You can see the comment in the block_rsv
31  *   code on the rules for each type, but generally block_rsv->reserved is how
32  *   much space is accounted for in space_info->bytes_may_use.
33  *
34  *   3) btrfs_calc*_size.  These are the worst case calculations we used based
35  *   on the number of items we will want to modify.  We have one for changing
36  *   items, and one for inserting new items.  Generally we use these helpers to
37  *   determine the size of the block reserves, and then use the actual bytes
38  *   values to adjust the space_info counters.
39  *
40  * MAKING RESERVATIONS, THE NORMAL CASE
41  *
42  *   We call into either btrfs_reserve_data_bytes() or
43  *   btrfs_reserve_metadata_bytes(), depending on which we're looking for, with
44  *   num_bytes we want to reserve.
45  *
46  *   ->reserve
47  *     space_info->bytes_may_reserve += num_bytes
48  *
49  *   ->extent allocation
50  *     Call btrfs_add_reserved_bytes() which does
51  *     space_info->bytes_may_reserve -= num_bytes
52  *     space_info->bytes_reserved += extent_bytes
53  *
54  *   ->insert reference
55  *     Call btrfs_update_block_group() which does
56  *     space_info->bytes_reserved -= extent_bytes
57  *     space_info->bytes_used += extent_bytes
58  *
59  * MAKING RESERVATIONS, FLUSHING NORMALLY (non-priority)
60  *
61  *   Assume we are unable to simply make the reservation because we do not have
62  *   enough space
63  *
64  *   -> __reserve_bytes
65  *     create a reserve_ticket with ->bytes set to our reservation, add it to
66  *     the tail of space_info->tickets, kick async flush thread
67  *
68  *   ->handle_reserve_ticket
69  *     wait on ticket->wait for ->bytes to be reduced to 0, or ->error to be set
70  *     on the ticket.
71  *
72  *   -> btrfs_async_reclaim_metadata_space/btrfs_async_reclaim_data_space
73  *     Flushes various things attempting to free up space.
74  *
75  *   -> btrfs_try_granting_tickets()
76  *     This is called by anything that either subtracts space from
77  *     space_info->bytes_may_use, ->bytes_pinned, etc, or adds to the
78  *     space_info->total_bytes.  This loops through the ->priority_tickets and
79  *     then the ->tickets list checking to see if the reservation can be
80  *     completed.  If it can the space is added to space_info->bytes_may_use and
81  *     the ticket is woken up.
82  *
83  *   -> ticket wakeup
84  *     Check if ->bytes == 0, if it does we got our reservation and we can carry
85  *     on, if not return the appropriate error (ENOSPC, but can be EINTR if we
86  *     were interrupted.)
87  *
88  * MAKING RESERVATIONS, FLUSHING HIGH PRIORITY
89  *
90  *   Same as the above, except we add ourselves to the
91  *   space_info->priority_tickets, and we do not use ticket->wait, we simply
92  *   call flush_space() ourselves for the states that are safe for us to call
93  *   without deadlocking and hope for the best.
94  *
95  * THE FLUSHING STATES
96  *
97  *   Generally speaking we will have two cases for each state, a "nice" state
98  *   and a "ALL THE THINGS" state.  In btrfs we delay a lot of work in order to
99  *   reduce the locking over head on the various trees, and even to keep from
100  *   doing any work at all in the case of delayed refs.  Each of these delayed
101  *   things however hold reservations, and so letting them run allows us to
102  *   reclaim space so we can make new reservations.
103  *
104  *   FLUSH_DELAYED_ITEMS
105  *     Every inode has a delayed item to update the inode.  Take a simple write
106  *     for example, we would update the inode item at write time to update the
107  *     mtime, and then again at finish_ordered_io() time in order to update the
108  *     isize or bytes.  We keep these delayed items to coalesce these operations
109  *     into a single operation done on demand.  These are an easy way to reclaim
110  *     metadata space.
111  *
112  *   FLUSH_DELALLOC
113  *     Look at the delalloc comment to get an idea of how much space is reserved
114  *     for delayed allocation.  We can reclaim some of this space simply by
115  *     running delalloc, but usually we need to wait for ordered extents to
116  *     reclaim the bulk of this space.
117  *
118  *   FLUSH_DELAYED_REFS
119  *     We have a block reserve for the outstanding delayed refs space, and every
120  *     delayed ref operation holds a reservation.  Running these is a quick way
121  *     to reclaim space, but we want to hold this until the end because COW can
122  *     churn a lot and we can avoid making some extent tree modifications if we
123  *     are able to delay for as long as possible.
124  *
125  *   ALLOC_CHUNK
126  *     We will skip this the first time through space reservation, because of
127  *     overcommit and we don't want to have a lot of useless metadata space when
128  *     our worst case reservations will likely never come true.
129  *
130  *   RUN_DELAYED_IPUTS
131  *     If we're freeing inodes we're likely freeing checksums, file extent
132  *     items, and extent tree items.  Loads of space could be freed up by these
133  *     operations, however they won't be usable until the transaction commits.
134  *
135  *   COMMIT_TRANS
136  *     This will commit the transaction.  Historically we had a lot of logic
137  *     surrounding whether or not we'd commit the transaction, but this waits born
138  *     out of a pre-tickets era where we could end up committing the transaction
139  *     thousands of times in a row without making progress.  Now thanks to our
140  *     ticketing system we know if we're not making progress and can error
141  *     everybody out after a few commits rather than burning the disk hoping for
142  *     a different answer.
143  *
144  * OVERCOMMIT
145  *
146  *   Because we hold so many reservations for metadata we will allow you to
147  *   reserve more space than is currently free in the currently allocate
148  *   metadata space.  This only happens with metadata, data does not allow
149  *   overcommitting.
150  *
151  *   You can see the current logic for when we allow overcommit in
152  *   btrfs_can_overcommit(), but it only applies to unallocated space.  If there
153  *   is no unallocated space to be had, all reservations are kept within the
154  *   free space in the allocated metadata chunks.
155  *
156  *   Because of overcommitting, you generally want to use the
157  *   btrfs_can_overcommit() logic for metadata allocations, as it does the right
158  *   thing with or without extra unallocated space.
159  */
160
161 u64 __pure btrfs_space_info_used(struct btrfs_space_info *s_info,
162                           bool may_use_included)
163 {
164         ASSERT(s_info);
165         return s_info->bytes_used + s_info->bytes_reserved +
166                 s_info->bytes_pinned + s_info->bytes_readonly +
167                 s_info->bytes_zone_unusable +
168                 (may_use_included ? s_info->bytes_may_use : 0);
169 }
170
171 /*
172  * after adding space to the filesystem, we need to clear the full flags
173  * on all the space infos.
174  */
175 void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
176 {
177         struct list_head *head = &info->space_info;
178         struct btrfs_space_info *found;
179
180         list_for_each_entry(found, head, list)
181                 found->full = 0;
182 }
183
184 static int create_space_info(struct btrfs_fs_info *info, u64 flags)
185 {
186
187         struct btrfs_space_info *space_info;
188         int i;
189         int ret;
190
191         space_info = kzalloc(sizeof(*space_info), GFP_NOFS);
192         if (!space_info)
193                 return -ENOMEM;
194
195         for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
196                 INIT_LIST_HEAD(&space_info->block_groups[i]);
197         init_rwsem(&space_info->groups_sem);
198         spin_lock_init(&space_info->lock);
199         space_info->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
200         space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
201         INIT_LIST_HEAD(&space_info->ro_bgs);
202         INIT_LIST_HEAD(&space_info->tickets);
203         INIT_LIST_HEAD(&space_info->priority_tickets);
204         space_info->clamp = 1;
205
206         ret = btrfs_sysfs_add_space_info_type(info, space_info);
207         if (ret)
208                 return ret;
209
210         list_add(&space_info->list, &info->space_info);
211         if (flags & BTRFS_BLOCK_GROUP_DATA)
212                 info->data_sinfo = space_info;
213
214         return ret;
215 }
216
217 int btrfs_init_space_info(struct btrfs_fs_info *fs_info)
218 {
219         struct btrfs_super_block *disk_super;
220         u64 features;
221         u64 flags;
222         int mixed = 0;
223         int ret;
224
225         disk_super = fs_info->super_copy;
226         if (!btrfs_super_root(disk_super))
227                 return -EINVAL;
228
229         features = btrfs_super_incompat_flags(disk_super);
230         if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
231                 mixed = 1;
232
233         flags = BTRFS_BLOCK_GROUP_SYSTEM;
234         ret = create_space_info(fs_info, flags);
235         if (ret)
236                 goto out;
237
238         if (mixed) {
239                 flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA;
240                 ret = create_space_info(fs_info, flags);
241         } else {
242                 flags = BTRFS_BLOCK_GROUP_METADATA;
243                 ret = create_space_info(fs_info, flags);
244                 if (ret)
245                         goto out;
246
247                 flags = BTRFS_BLOCK_GROUP_DATA;
248                 ret = create_space_info(fs_info, flags);
249         }
250 out:
251         return ret;
252 }
253
254 void btrfs_update_space_info(struct btrfs_fs_info *info, u64 flags,
255                              u64 total_bytes, u64 bytes_used,
256                              u64 bytes_readonly, u64 bytes_zone_unusable,
257                              struct btrfs_space_info **space_info)
258 {
259         struct btrfs_space_info *found;
260         int factor;
261
262         factor = btrfs_bg_type_to_factor(flags);
263
264         found = btrfs_find_space_info(info, flags);
265         ASSERT(found);
266         spin_lock(&found->lock);
267         found->total_bytes += total_bytes;
268         found->disk_total += total_bytes * factor;
269         found->bytes_used += bytes_used;
270         found->disk_used += bytes_used * factor;
271         found->bytes_readonly += bytes_readonly;
272         found->bytes_zone_unusable += bytes_zone_unusable;
273         if (total_bytes > 0)
274                 found->full = 0;
275         btrfs_try_granting_tickets(info, found);
276         spin_unlock(&found->lock);
277         *space_info = found;
278 }
279
280 struct btrfs_space_info *btrfs_find_space_info(struct btrfs_fs_info *info,
281                                                u64 flags)
282 {
283         struct list_head *head = &info->space_info;
284         struct btrfs_space_info *found;
285
286         flags &= BTRFS_BLOCK_GROUP_TYPE_MASK;
287
288         list_for_each_entry(found, head, list) {
289                 if (found->flags & flags)
290                         return found;
291         }
292         return NULL;
293 }
294
295 static u64 calc_available_free_space(struct btrfs_fs_info *fs_info,
296                           struct btrfs_space_info *space_info,
297                           enum btrfs_reserve_flush_enum flush)
298 {
299         u64 profile;
300         u64 avail;
301         int factor;
302
303         if (space_info->flags & BTRFS_BLOCK_GROUP_SYSTEM)
304                 profile = btrfs_system_alloc_profile(fs_info);
305         else
306                 profile = btrfs_metadata_alloc_profile(fs_info);
307
308         avail = atomic64_read(&fs_info->free_chunk_space);
309
310         /*
311          * If we have dup, raid1 or raid10 then only half of the free
312          * space is actually usable.  For raid56, the space info used
313          * doesn't include the parity drive, so we don't have to
314          * change the math
315          */
316         factor = btrfs_bg_type_to_factor(profile);
317         avail = div_u64(avail, factor);
318
319         /*
320          * If we aren't flushing all things, let us overcommit up to
321          * 1/2th of the space. If we can flush, don't let us overcommit
322          * too much, let it overcommit up to 1/8 of the space.
323          */
324         if (flush == BTRFS_RESERVE_FLUSH_ALL)
325                 avail >>= 3;
326         else
327                 avail >>= 1;
328         return avail;
329 }
330
331 int btrfs_can_overcommit(struct btrfs_fs_info *fs_info,
332                          struct btrfs_space_info *space_info, u64 bytes,
333                          enum btrfs_reserve_flush_enum flush)
334 {
335         u64 avail;
336         u64 used;
337
338         /* Don't overcommit when in mixed mode */
339         if (space_info->flags & BTRFS_BLOCK_GROUP_DATA)
340                 return 0;
341
342         used = btrfs_space_info_used(space_info, true);
343         avail = calc_available_free_space(fs_info, space_info, flush);
344
345         if (used + bytes < space_info->total_bytes + avail)
346                 return 1;
347         return 0;
348 }
349
350 static void remove_ticket(struct btrfs_space_info *space_info,
351                           struct reserve_ticket *ticket)
352 {
353         if (!list_empty(&ticket->list)) {
354                 list_del_init(&ticket->list);
355                 ASSERT(space_info->reclaim_size >= ticket->bytes);
356                 space_info->reclaim_size -= ticket->bytes;
357         }
358 }
359
360 /*
361  * This is for space we already have accounted in space_info->bytes_may_use, so
362  * basically when we're returning space from block_rsv's.
363  */
364 void btrfs_try_granting_tickets(struct btrfs_fs_info *fs_info,
365                                 struct btrfs_space_info *space_info)
366 {
367         struct list_head *head;
368         enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_NO_FLUSH;
369
370         lockdep_assert_held(&space_info->lock);
371
372         head = &space_info->priority_tickets;
373 again:
374         while (!list_empty(head)) {
375                 struct reserve_ticket *ticket;
376                 u64 used = btrfs_space_info_used(space_info, true);
377
378                 ticket = list_first_entry(head, struct reserve_ticket, list);
379
380                 /* Check and see if our ticket can be satisfied now. */
381                 if ((used + ticket->bytes <= space_info->total_bytes) ||
382                     btrfs_can_overcommit(fs_info, space_info, ticket->bytes,
383                                          flush)) {
384                         btrfs_space_info_update_bytes_may_use(fs_info,
385                                                               space_info,
386                                                               ticket->bytes);
387                         remove_ticket(space_info, ticket);
388                         ticket->bytes = 0;
389                         space_info->tickets_id++;
390                         wake_up(&ticket->wait);
391                 } else {
392                         break;
393                 }
394         }
395
396         if (head == &space_info->priority_tickets) {
397                 head = &space_info->tickets;
398                 flush = BTRFS_RESERVE_FLUSH_ALL;
399                 goto again;
400         }
401 }
402
403 #define DUMP_BLOCK_RSV(fs_info, rsv_name)                               \
404 do {                                                                    \
405         struct btrfs_block_rsv *__rsv = &(fs_info)->rsv_name;           \
406         spin_lock(&__rsv->lock);                                        \
407         btrfs_info(fs_info, #rsv_name ": size %llu reserved %llu",      \
408                    __rsv->size, __rsv->reserved);                       \
409         spin_unlock(&__rsv->lock);                                      \
410 } while (0)
411
412 static void __btrfs_dump_space_info(struct btrfs_fs_info *fs_info,
413                                     struct btrfs_space_info *info)
414 {
415         lockdep_assert_held(&info->lock);
416
417         /* The free space could be negative in case of overcommit */
418         btrfs_info(fs_info, "space_info %llu has %lld free, is %sfull",
419                    info->flags,
420                    (s64)(info->total_bytes - btrfs_space_info_used(info, true)),
421                    info->full ? "" : "not ");
422         btrfs_info(fs_info,
423                 "space_info total=%llu, used=%llu, pinned=%llu, reserved=%llu, may_use=%llu, readonly=%llu zone_unusable=%llu",
424                 info->total_bytes, info->bytes_used, info->bytes_pinned,
425                 info->bytes_reserved, info->bytes_may_use,
426                 info->bytes_readonly, info->bytes_zone_unusable);
427
428         DUMP_BLOCK_RSV(fs_info, global_block_rsv);
429         DUMP_BLOCK_RSV(fs_info, trans_block_rsv);
430         DUMP_BLOCK_RSV(fs_info, chunk_block_rsv);
431         DUMP_BLOCK_RSV(fs_info, delayed_block_rsv);
432         DUMP_BLOCK_RSV(fs_info, delayed_refs_rsv);
433
434 }
435
436 void btrfs_dump_space_info(struct btrfs_fs_info *fs_info,
437                            struct btrfs_space_info *info, u64 bytes,
438                            int dump_block_groups)
439 {
440         struct btrfs_block_group *cache;
441         int index = 0;
442
443         spin_lock(&info->lock);
444         __btrfs_dump_space_info(fs_info, info);
445         spin_unlock(&info->lock);
446
447         if (!dump_block_groups)
448                 return;
449
450         down_read(&info->groups_sem);
451 again:
452         list_for_each_entry(cache, &info->block_groups[index], list) {
453                 spin_lock(&cache->lock);
454                 btrfs_info(fs_info,
455                         "block group %llu has %llu bytes, %llu used %llu pinned %llu reserved %llu zone_unusable %s",
456                         cache->start, cache->length, cache->used, cache->pinned,
457                         cache->reserved, cache->zone_unusable,
458                         cache->ro ? "[readonly]" : "");
459                 spin_unlock(&cache->lock);
460                 btrfs_dump_free_space(cache, bytes);
461         }
462         if (++index < BTRFS_NR_RAID_TYPES)
463                 goto again;
464         up_read(&info->groups_sem);
465 }
466
467 static inline u64 calc_reclaim_items_nr(struct btrfs_fs_info *fs_info,
468                                         u64 to_reclaim)
469 {
470         u64 bytes;
471         u64 nr;
472
473         bytes = btrfs_calc_insert_metadata_size(fs_info, 1);
474         nr = div64_u64(to_reclaim, bytes);
475         if (!nr)
476                 nr = 1;
477         return nr;
478 }
479
480 #define EXTENT_SIZE_PER_ITEM    SZ_256K
481
482 /*
483  * shrink metadata reservation for delalloc
484  */
485 static void shrink_delalloc(struct btrfs_fs_info *fs_info,
486                             struct btrfs_space_info *space_info,
487                             u64 to_reclaim, bool wait_ordered,
488                             bool for_preempt)
489 {
490         struct btrfs_trans_handle *trans;
491         u64 delalloc_bytes;
492         u64 ordered_bytes;
493         u64 items;
494         long time_left;
495         int loops;
496
497         delalloc_bytes = percpu_counter_sum_positive(&fs_info->delalloc_bytes);
498         ordered_bytes = percpu_counter_sum_positive(&fs_info->ordered_bytes);
499         if (delalloc_bytes == 0 && ordered_bytes == 0)
500                 return;
501
502         /* Calc the number of the pages we need flush for space reservation */
503         if (to_reclaim == U64_MAX) {
504                 items = U64_MAX;
505         } else {
506                 /*
507                  * to_reclaim is set to however much metadata we need to
508                  * reclaim, but reclaiming that much data doesn't really track
509                  * exactly.  What we really want to do is reclaim full inode's
510                  * worth of reservations, however that's not available to us
511                  * here.  We will take a fraction of the delalloc bytes for our
512                  * flushing loops and hope for the best.  Delalloc will expand
513                  * the amount we write to cover an entire dirty extent, which
514                  * will reclaim the metadata reservation for that range.  If
515                  * it's not enough subsequent flush stages will be more
516                  * aggressive.
517                  */
518                 to_reclaim = max(to_reclaim, delalloc_bytes >> 3);
519                 items = calc_reclaim_items_nr(fs_info, to_reclaim) * 2;
520         }
521
522         trans = (struct btrfs_trans_handle *)current->journal_info;
523
524         /*
525          * If we are doing more ordered than delalloc we need to just wait on
526          * ordered extents, otherwise we'll waste time trying to flush delalloc
527          * that likely won't give us the space back we need.
528          */
529         if (ordered_bytes > delalloc_bytes && !for_preempt)
530                 wait_ordered = true;
531
532         loops = 0;
533         while ((delalloc_bytes || ordered_bytes) && loops < 3) {
534                 u64 temp = min(delalloc_bytes, to_reclaim) >> PAGE_SHIFT;
535                 long nr_pages = min_t(u64, temp, LONG_MAX);
536                 int async_pages;
537
538                 btrfs_start_delalloc_roots(fs_info, nr_pages, true);
539
540                 /*
541                  * We need to make sure any outstanding async pages are now
542                  * processed before we continue.  This is because things like
543                  * sync_inode() try to be smart and skip writing if the inode is
544                  * marked clean.  We don't use filemap_fwrite for flushing
545                  * because we want to control how many pages we write out at a
546                  * time, thus this is the only safe way to make sure we've
547                  * waited for outstanding compressed workers to have started
548                  * their jobs and thus have ordered extents set up properly.
549                  *
550                  * This exists because we do not want to wait for each
551                  * individual inode to finish its async work, we simply want to
552                  * start the IO on everybody, and then come back here and wait
553                  * for all of the async work to catch up.  Once we're done with
554                  * that we know we'll have ordered extents for everything and we
555                  * can decide if we wait for that or not.
556                  *
557                  * If we choose to replace this in the future, make absolutely
558                  * sure that the proper waiting is being done in the async case,
559                  * as there have been bugs in that area before.
560                  */
561                 async_pages = atomic_read(&fs_info->async_delalloc_pages);
562                 if (!async_pages)
563                         goto skip_async;
564
565                 /*
566                  * We don't want to wait forever, if we wrote less pages in this
567                  * loop than we have outstanding, only wait for that number of
568                  * pages, otherwise we can wait for all async pages to finish
569                  * before continuing.
570                  */
571                 if (async_pages > nr_pages)
572                         async_pages -= nr_pages;
573                 else
574                         async_pages = 0;
575                 wait_event(fs_info->async_submit_wait,
576                            atomic_read(&fs_info->async_delalloc_pages) <=
577                            async_pages);
578 skip_async:
579                 loops++;
580                 if (wait_ordered && !trans) {
581                         btrfs_wait_ordered_roots(fs_info, items, 0, (u64)-1);
582                 } else {
583                         time_left = schedule_timeout_killable(1);
584                         if (time_left)
585                                 break;
586                 }
587
588                 /*
589                  * If we are for preemption we just want a one-shot of delalloc
590                  * flushing so we can stop flushing if we decide we don't need
591                  * to anymore.
592                  */
593                 if (for_preempt)
594                         break;
595
596                 spin_lock(&space_info->lock);
597                 if (list_empty(&space_info->tickets) &&
598                     list_empty(&space_info->priority_tickets)) {
599                         spin_unlock(&space_info->lock);
600                         break;
601                 }
602                 spin_unlock(&space_info->lock);
603
604                 delalloc_bytes = percpu_counter_sum_positive(
605                                                 &fs_info->delalloc_bytes);
606                 ordered_bytes = percpu_counter_sum_positive(
607                                                 &fs_info->ordered_bytes);
608         }
609 }
610
611 /*
612  * Try to flush some data based on policy set by @state. This is only advisory
613  * and may fail for various reasons. The caller is supposed to examine the
614  * state of @space_info to detect the outcome.
615  */
616 static void flush_space(struct btrfs_fs_info *fs_info,
617                        struct btrfs_space_info *space_info, u64 num_bytes,
618                        enum btrfs_flush_state state, bool for_preempt)
619 {
620         struct btrfs_root *root = fs_info->extent_root;
621         struct btrfs_trans_handle *trans;
622         int nr;
623         int ret = 0;
624
625         switch (state) {
626         case FLUSH_DELAYED_ITEMS_NR:
627         case FLUSH_DELAYED_ITEMS:
628                 if (state == FLUSH_DELAYED_ITEMS_NR)
629                         nr = calc_reclaim_items_nr(fs_info, num_bytes) * 2;
630                 else
631                         nr = -1;
632
633                 trans = btrfs_join_transaction(root);
634                 if (IS_ERR(trans)) {
635                         ret = PTR_ERR(trans);
636                         break;
637                 }
638                 ret = btrfs_run_delayed_items_nr(trans, nr);
639                 btrfs_end_transaction(trans);
640                 break;
641         case FLUSH_DELALLOC:
642         case FLUSH_DELALLOC_WAIT:
643         case FLUSH_DELALLOC_FULL:
644                 if (state == FLUSH_DELALLOC_FULL)
645                         num_bytes = U64_MAX;
646                 shrink_delalloc(fs_info, space_info, num_bytes,
647                                 state != FLUSH_DELALLOC, for_preempt);
648                 break;
649         case FLUSH_DELAYED_REFS_NR:
650         case FLUSH_DELAYED_REFS:
651                 trans = btrfs_join_transaction(root);
652                 if (IS_ERR(trans)) {
653                         ret = PTR_ERR(trans);
654                         break;
655                 }
656                 if (state == FLUSH_DELAYED_REFS_NR)
657                         nr = calc_reclaim_items_nr(fs_info, num_bytes);
658                 else
659                         nr = 0;
660                 btrfs_run_delayed_refs(trans, nr);
661                 btrfs_end_transaction(trans);
662                 break;
663         case ALLOC_CHUNK:
664         case ALLOC_CHUNK_FORCE:
665                 trans = btrfs_join_transaction(root);
666                 if (IS_ERR(trans)) {
667                         ret = PTR_ERR(trans);
668                         break;
669                 }
670                 ret = btrfs_chunk_alloc(trans,
671                                 btrfs_get_alloc_profile(fs_info, space_info->flags),
672                                 (state == ALLOC_CHUNK) ? CHUNK_ALLOC_NO_FORCE :
673                                         CHUNK_ALLOC_FORCE);
674                 btrfs_end_transaction(trans);
675                 if (ret > 0 || ret == -ENOSPC)
676                         ret = 0;
677                 break;
678         case RUN_DELAYED_IPUTS:
679                 /*
680                  * If we have pending delayed iputs then we could free up a
681                  * bunch of pinned space, so make sure we run the iputs before
682                  * we do our pinned bytes check below.
683                  */
684                 btrfs_run_delayed_iputs(fs_info);
685                 btrfs_wait_on_delayed_iputs(fs_info);
686                 break;
687         case COMMIT_TRANS:
688                 ASSERT(current->journal_info == NULL);
689                 trans = btrfs_join_transaction(root);
690                 if (IS_ERR(trans)) {
691                         ret = PTR_ERR(trans);
692                         break;
693                 }
694                 ret = btrfs_commit_transaction(trans);
695                 break;
696         default:
697                 ret = -ENOSPC;
698                 break;
699         }
700
701         trace_btrfs_flush_space(fs_info, space_info->flags, num_bytes, state,
702                                 ret, for_preempt);
703         return;
704 }
705
706 static inline u64
707 btrfs_calc_reclaim_metadata_size(struct btrfs_fs_info *fs_info,
708                                  struct btrfs_space_info *space_info)
709 {
710         u64 used;
711         u64 avail;
712         u64 to_reclaim = space_info->reclaim_size;
713
714         lockdep_assert_held(&space_info->lock);
715
716         avail = calc_available_free_space(fs_info, space_info,
717                                           BTRFS_RESERVE_FLUSH_ALL);
718         used = btrfs_space_info_used(space_info, true);
719
720         /*
721          * We may be flushing because suddenly we have less space than we had
722          * before, and now we're well over-committed based on our current free
723          * space.  If that's the case add in our overage so we make sure to put
724          * appropriate pressure on the flushing state machine.
725          */
726         if (space_info->total_bytes + avail < used)
727                 to_reclaim += used - (space_info->total_bytes + avail);
728
729         return to_reclaim;
730 }
731
732 static bool need_preemptive_reclaim(struct btrfs_fs_info *fs_info,
733                                     struct btrfs_space_info *space_info)
734 {
735         u64 global_rsv_size = fs_info->global_block_rsv.reserved;
736         u64 ordered, delalloc;
737         u64 thresh = div_factor_fine(space_info->total_bytes, 90);
738         u64 used;
739
740         /* If we're just plain full then async reclaim just slows us down. */
741         if ((space_info->bytes_used + space_info->bytes_reserved +
742              global_rsv_size) >= thresh)
743                 return false;
744
745         used = space_info->bytes_may_use + space_info->bytes_pinned;
746
747         /* The total flushable belongs to the global rsv, don't flush. */
748         if (global_rsv_size >= used)
749                 return false;
750
751         /*
752          * 128MiB is 1/4 of the maximum global rsv size.  If we have less than
753          * that devoted to other reservations then there's no sense in flushing,
754          * we don't have a lot of things that need flushing.
755          */
756         if (used - global_rsv_size <= SZ_128M)
757                 return false;
758
759         /*
760          * We have tickets queued, bail so we don't compete with the async
761          * flushers.
762          */
763         if (space_info->reclaim_size)
764                 return false;
765
766         /*
767          * If we have over half of the free space occupied by reservations or
768          * pinned then we want to start flushing.
769          *
770          * We do not do the traditional thing here, which is to say
771          *
772          *   if (used >= ((total_bytes + avail) / 2))
773          *     return 1;
774          *
775          * because this doesn't quite work how we want.  If we had more than 50%
776          * of the space_info used by bytes_used and we had 0 available we'd just
777          * constantly run the background flusher.  Instead we want it to kick in
778          * if our reclaimable space exceeds our clamped free space.
779          *
780          * Our clamping range is 2^1 -> 2^8.  Practically speaking that means
781          * the following:
782          *
783          * Amount of RAM        Minimum threshold       Maximum threshold
784          *
785          *        256GiB                     1GiB                  128GiB
786          *        128GiB                   512MiB                   64GiB
787          *         64GiB                   256MiB                   32GiB
788          *         32GiB                   128MiB                   16GiB
789          *         16GiB                    64MiB                    8GiB
790          *
791          * These are the range our thresholds will fall in, corresponding to how
792          * much delalloc we need for the background flusher to kick in.
793          */
794
795         thresh = calc_available_free_space(fs_info, space_info,
796                                            BTRFS_RESERVE_FLUSH_ALL);
797         used = space_info->bytes_used + space_info->bytes_reserved +
798                space_info->bytes_readonly + global_rsv_size;
799         if (used < space_info->total_bytes)
800                 thresh += space_info->total_bytes - used;
801         thresh >>= space_info->clamp;
802
803         used = space_info->bytes_pinned;
804
805         /*
806          * If we have more ordered bytes than delalloc bytes then we're either
807          * doing a lot of DIO, or we simply don't have a lot of delalloc waiting
808          * around.  Preemptive flushing is only useful in that it can free up
809          * space before tickets need to wait for things to finish.  In the case
810          * of ordered extents, preemptively waiting on ordered extents gets us
811          * nothing, if our reservations are tied up in ordered extents we'll
812          * simply have to slow down writers by forcing them to wait on ordered
813          * extents.
814          *
815          * In the case that ordered is larger than delalloc, only include the
816          * block reserves that we would actually be able to directly reclaim
817          * from.  In this case if we're heavy on metadata operations this will
818          * clearly be heavy enough to warrant preemptive flushing.  In the case
819          * of heavy DIO or ordered reservations, preemptive flushing will just
820          * waste time and cause us to slow down.
821          *
822          * We want to make sure we truly are maxed out on ordered however, so
823          * cut ordered in half, and if it's still higher than delalloc then we
824          * can keep flushing.  This is to avoid the case where we start
825          * flushing, and now delalloc == ordered and we stop preemptively
826          * flushing when we could still have several gigs of delalloc to flush.
827          */
828         ordered = percpu_counter_read_positive(&fs_info->ordered_bytes) >> 1;
829         delalloc = percpu_counter_read_positive(&fs_info->delalloc_bytes);
830         if (ordered >= delalloc)
831                 used += fs_info->delayed_refs_rsv.reserved +
832                         fs_info->delayed_block_rsv.reserved;
833         else
834                 used += space_info->bytes_may_use - global_rsv_size;
835
836         return (used >= thresh && !btrfs_fs_closing(fs_info) &&
837                 !test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state));
838 }
839
840 static bool steal_from_global_rsv(struct btrfs_fs_info *fs_info,
841                                   struct btrfs_space_info *space_info,
842                                   struct reserve_ticket *ticket)
843 {
844         struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
845         u64 min_bytes;
846
847         if (global_rsv->space_info != space_info)
848                 return false;
849
850         spin_lock(&global_rsv->lock);
851         min_bytes = div_factor(global_rsv->size, 1);
852         if (global_rsv->reserved < min_bytes + ticket->bytes) {
853                 spin_unlock(&global_rsv->lock);
854                 return false;
855         }
856         global_rsv->reserved -= ticket->bytes;
857         remove_ticket(space_info, ticket);
858         ticket->bytes = 0;
859         wake_up(&ticket->wait);
860         space_info->tickets_id++;
861         if (global_rsv->reserved < global_rsv->size)
862                 global_rsv->full = 0;
863         spin_unlock(&global_rsv->lock);
864
865         return true;
866 }
867
868 /*
869  * maybe_fail_all_tickets - we've exhausted our flushing, start failing tickets
870  * @fs_info - fs_info for this fs
871  * @space_info - the space info we were flushing
872  *
873  * We call this when we've exhausted our flushing ability and haven't made
874  * progress in satisfying tickets.  The reservation code handles tickets in
875  * order, so if there is a large ticket first and then smaller ones we could
876  * very well satisfy the smaller tickets.  This will attempt to wake up any
877  * tickets in the list to catch this case.
878  *
879  * This function returns true if it was able to make progress by clearing out
880  * other tickets, or if it stumbles across a ticket that was smaller than the
881  * first ticket.
882  */
883 static bool maybe_fail_all_tickets(struct btrfs_fs_info *fs_info,
884                                    struct btrfs_space_info *space_info)
885 {
886         struct reserve_ticket *ticket;
887         u64 tickets_id = space_info->tickets_id;
888         const bool aborted = BTRFS_FS_ERROR(fs_info);
889
890         trace_btrfs_fail_all_tickets(fs_info, space_info);
891
892         if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) {
893                 btrfs_info(fs_info, "cannot satisfy tickets, dumping space info");
894                 __btrfs_dump_space_info(fs_info, space_info);
895         }
896
897         while (!list_empty(&space_info->tickets) &&
898                tickets_id == space_info->tickets_id) {
899                 ticket = list_first_entry(&space_info->tickets,
900                                           struct reserve_ticket, list);
901
902                 if (!aborted && ticket->steal &&
903                     steal_from_global_rsv(fs_info, space_info, ticket))
904                         return true;
905
906                 if (!aborted && btrfs_test_opt(fs_info, ENOSPC_DEBUG))
907                         btrfs_info(fs_info, "failing ticket with %llu bytes",
908                                    ticket->bytes);
909
910                 remove_ticket(space_info, ticket);
911                 if (aborted)
912                         ticket->error = -EIO;
913                 else
914                         ticket->error = -ENOSPC;
915                 wake_up(&ticket->wait);
916
917                 /*
918                  * We're just throwing tickets away, so more flushing may not
919                  * trip over btrfs_try_granting_tickets, so we need to call it
920                  * here to see if we can make progress with the next ticket in
921                  * the list.
922                  */
923                 if (!aborted)
924                         btrfs_try_granting_tickets(fs_info, space_info);
925         }
926         return (tickets_id != space_info->tickets_id);
927 }
928
929 /*
930  * This is for normal flushers, we can wait all goddamned day if we want to.  We
931  * will loop and continuously try to flush as long as we are making progress.
932  * We count progress as clearing off tickets each time we have to loop.
933  */
934 static void btrfs_async_reclaim_metadata_space(struct work_struct *work)
935 {
936         struct btrfs_fs_info *fs_info;
937         struct btrfs_space_info *space_info;
938         u64 to_reclaim;
939         enum btrfs_flush_state flush_state;
940         int commit_cycles = 0;
941         u64 last_tickets_id;
942
943         fs_info = container_of(work, struct btrfs_fs_info, async_reclaim_work);
944         space_info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
945
946         spin_lock(&space_info->lock);
947         to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, space_info);
948         if (!to_reclaim) {
949                 space_info->flush = 0;
950                 spin_unlock(&space_info->lock);
951                 return;
952         }
953         last_tickets_id = space_info->tickets_id;
954         spin_unlock(&space_info->lock);
955
956         flush_state = FLUSH_DELAYED_ITEMS_NR;
957         do {
958                 flush_space(fs_info, space_info, to_reclaim, flush_state, false);
959                 spin_lock(&space_info->lock);
960                 if (list_empty(&space_info->tickets)) {
961                         space_info->flush = 0;
962                         spin_unlock(&space_info->lock);
963                         return;
964                 }
965                 to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info,
966                                                               space_info);
967                 if (last_tickets_id == space_info->tickets_id) {
968                         flush_state++;
969                 } else {
970                         last_tickets_id = space_info->tickets_id;
971                         flush_state = FLUSH_DELAYED_ITEMS_NR;
972                         if (commit_cycles)
973                                 commit_cycles--;
974                 }
975
976                 /*
977                  * We do not want to empty the system of delalloc unless we're
978                  * under heavy pressure, so allow one trip through the flushing
979                  * logic before we start doing a FLUSH_DELALLOC_FULL.
980                  */
981                 if (flush_state == FLUSH_DELALLOC_FULL && !commit_cycles)
982                         flush_state++;
983
984                 /*
985                  * We don't want to force a chunk allocation until we've tried
986                  * pretty hard to reclaim space.  Think of the case where we
987                  * freed up a bunch of space and so have a lot of pinned space
988                  * to reclaim.  We would rather use that than possibly create a
989                  * underutilized metadata chunk.  So if this is our first run
990                  * through the flushing state machine skip ALLOC_CHUNK_FORCE and
991                  * commit the transaction.  If nothing has changed the next go
992                  * around then we can force a chunk allocation.
993                  */
994                 if (flush_state == ALLOC_CHUNK_FORCE && !commit_cycles)
995                         flush_state++;
996
997                 if (flush_state > COMMIT_TRANS) {
998                         commit_cycles++;
999                         if (commit_cycles > 2) {
1000                                 if (maybe_fail_all_tickets(fs_info, space_info)) {
1001                                         flush_state = FLUSH_DELAYED_ITEMS_NR;
1002                                         commit_cycles--;
1003                                 } else {
1004                                         space_info->flush = 0;
1005                                 }
1006                         } else {
1007                                 flush_state = FLUSH_DELAYED_ITEMS_NR;
1008                         }
1009                 }
1010                 spin_unlock(&space_info->lock);
1011         } while (flush_state <= COMMIT_TRANS);
1012 }
1013
1014 /*
1015  * This handles pre-flushing of metadata space before we get to the point that
1016  * we need to start blocking threads on tickets.  The logic here is different
1017  * from the other flush paths because it doesn't rely on tickets to tell us how
1018  * much we need to flush, instead it attempts to keep us below the 80% full
1019  * watermark of space by flushing whichever reservation pool is currently the
1020  * largest.
1021  */
1022 static void btrfs_preempt_reclaim_metadata_space(struct work_struct *work)
1023 {
1024         struct btrfs_fs_info *fs_info;
1025         struct btrfs_space_info *space_info;
1026         struct btrfs_block_rsv *delayed_block_rsv;
1027         struct btrfs_block_rsv *delayed_refs_rsv;
1028         struct btrfs_block_rsv *global_rsv;
1029         struct btrfs_block_rsv *trans_rsv;
1030         int loops = 0;
1031
1032         fs_info = container_of(work, struct btrfs_fs_info,
1033                                preempt_reclaim_work);
1034         space_info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
1035         delayed_block_rsv = &fs_info->delayed_block_rsv;
1036         delayed_refs_rsv = &fs_info->delayed_refs_rsv;
1037         global_rsv = &fs_info->global_block_rsv;
1038         trans_rsv = &fs_info->trans_block_rsv;
1039
1040         spin_lock(&space_info->lock);
1041         while (need_preemptive_reclaim(fs_info, space_info)) {
1042                 enum btrfs_flush_state flush;
1043                 u64 delalloc_size = 0;
1044                 u64 to_reclaim, block_rsv_size;
1045                 u64 global_rsv_size = global_rsv->reserved;
1046
1047                 loops++;
1048
1049                 /*
1050                  * We don't have a precise counter for the metadata being
1051                  * reserved for delalloc, so we'll approximate it by subtracting
1052                  * out the block rsv's space from the bytes_may_use.  If that
1053                  * amount is higher than the individual reserves, then we can
1054                  * assume it's tied up in delalloc reservations.
1055                  */
1056                 block_rsv_size = global_rsv_size +
1057                         delayed_block_rsv->reserved +
1058                         delayed_refs_rsv->reserved +
1059                         trans_rsv->reserved;
1060                 if (block_rsv_size < space_info->bytes_may_use)
1061                         delalloc_size = space_info->bytes_may_use - block_rsv_size;
1062                 spin_unlock(&space_info->lock);
1063
1064                 /*
1065                  * We don't want to include the global_rsv in our calculation,
1066                  * because that's space we can't touch.  Subtract it from the
1067                  * block_rsv_size for the next checks.
1068                  */
1069                 block_rsv_size -= global_rsv_size;
1070
1071                 /*
1072                  * We really want to avoid flushing delalloc too much, as it
1073                  * could result in poor allocation patterns, so only flush it if
1074                  * it's larger than the rest of the pools combined.
1075                  */
1076                 if (delalloc_size > block_rsv_size) {
1077                         to_reclaim = delalloc_size;
1078                         flush = FLUSH_DELALLOC;
1079                 } else if (space_info->bytes_pinned >
1080                            (delayed_block_rsv->reserved +
1081                             delayed_refs_rsv->reserved)) {
1082                         to_reclaim = space_info->bytes_pinned;
1083                         flush = COMMIT_TRANS;
1084                 } else if (delayed_block_rsv->reserved >
1085                            delayed_refs_rsv->reserved) {
1086                         to_reclaim = delayed_block_rsv->reserved;
1087                         flush = FLUSH_DELAYED_ITEMS_NR;
1088                 } else {
1089                         to_reclaim = delayed_refs_rsv->reserved;
1090                         flush = FLUSH_DELAYED_REFS_NR;
1091                 }
1092
1093                 /*
1094                  * We don't want to reclaim everything, just a portion, so scale
1095                  * down the to_reclaim by 1/4.  If it takes us down to 0,
1096                  * reclaim 1 items worth.
1097                  */
1098                 to_reclaim >>= 2;
1099                 if (!to_reclaim)
1100                         to_reclaim = btrfs_calc_insert_metadata_size(fs_info, 1);
1101                 flush_space(fs_info, space_info, to_reclaim, flush, true);
1102                 cond_resched();
1103                 spin_lock(&space_info->lock);
1104         }
1105
1106         /* We only went through once, back off our clamping. */
1107         if (loops == 1 && !space_info->reclaim_size)
1108                 space_info->clamp = max(1, space_info->clamp - 1);
1109         trace_btrfs_done_preemptive_reclaim(fs_info, space_info);
1110         spin_unlock(&space_info->lock);
1111 }
1112
1113 /*
1114  * FLUSH_DELALLOC_WAIT:
1115  *   Space is freed from flushing delalloc in one of two ways.
1116  *
1117  *   1) compression is on and we allocate less space than we reserved
1118  *   2) we are overwriting existing space
1119  *
1120  *   For #1 that extra space is reclaimed as soon as the delalloc pages are
1121  *   COWed, by way of btrfs_add_reserved_bytes() which adds the actual extent
1122  *   length to ->bytes_reserved, and subtracts the reserved space from
1123  *   ->bytes_may_use.
1124  *
1125  *   For #2 this is trickier.  Once the ordered extent runs we will drop the
1126  *   extent in the range we are overwriting, which creates a delayed ref for
1127  *   that freed extent.  This however is not reclaimed until the transaction
1128  *   commits, thus the next stages.
1129  *
1130  * RUN_DELAYED_IPUTS
1131  *   If we are freeing inodes, we want to make sure all delayed iputs have
1132  *   completed, because they could have been on an inode with i_nlink == 0, and
1133  *   thus have been truncated and freed up space.  But again this space is not
1134  *   immediately re-usable, it comes in the form of a delayed ref, which must be
1135  *   run and then the transaction must be committed.
1136  *
1137  * COMMIT_TRANS
1138  *   This is where we reclaim all of the pinned space generated by running the
1139  *   iputs
1140  *
1141  * ALLOC_CHUNK_FORCE
1142  *   For data we start with alloc chunk force, however we could have been full
1143  *   before, and then the transaction commit could have freed new block groups,
1144  *   so if we now have space to allocate do the force chunk allocation.
1145  */
1146 static const enum btrfs_flush_state data_flush_states[] = {
1147         FLUSH_DELALLOC_FULL,
1148         RUN_DELAYED_IPUTS,
1149         COMMIT_TRANS,
1150         ALLOC_CHUNK_FORCE,
1151 };
1152
1153 static void btrfs_async_reclaim_data_space(struct work_struct *work)
1154 {
1155         struct btrfs_fs_info *fs_info;
1156         struct btrfs_space_info *space_info;
1157         u64 last_tickets_id;
1158         enum btrfs_flush_state flush_state = 0;
1159
1160         fs_info = container_of(work, struct btrfs_fs_info, async_data_reclaim_work);
1161         space_info = fs_info->data_sinfo;
1162
1163         spin_lock(&space_info->lock);
1164         if (list_empty(&space_info->tickets)) {
1165                 space_info->flush = 0;
1166                 spin_unlock(&space_info->lock);
1167                 return;
1168         }
1169         last_tickets_id = space_info->tickets_id;
1170         spin_unlock(&space_info->lock);
1171
1172         while (!space_info->full) {
1173                 flush_space(fs_info, space_info, U64_MAX, ALLOC_CHUNK_FORCE, false);
1174                 spin_lock(&space_info->lock);
1175                 if (list_empty(&space_info->tickets)) {
1176                         space_info->flush = 0;
1177                         spin_unlock(&space_info->lock);
1178                         return;
1179                 }
1180
1181                 /* Something happened, fail everything and bail. */
1182                 if (BTRFS_FS_ERROR(fs_info))
1183                         goto aborted_fs;
1184                 last_tickets_id = space_info->tickets_id;
1185                 spin_unlock(&space_info->lock);
1186         }
1187
1188         while (flush_state < ARRAY_SIZE(data_flush_states)) {
1189                 flush_space(fs_info, space_info, U64_MAX,
1190                             data_flush_states[flush_state], false);
1191                 spin_lock(&space_info->lock);
1192                 if (list_empty(&space_info->tickets)) {
1193                         space_info->flush = 0;
1194                         spin_unlock(&space_info->lock);
1195                         return;
1196                 }
1197
1198                 if (last_tickets_id == space_info->tickets_id) {
1199                         flush_state++;
1200                 } else {
1201                         last_tickets_id = space_info->tickets_id;
1202                         flush_state = 0;
1203                 }
1204
1205                 if (flush_state >= ARRAY_SIZE(data_flush_states)) {
1206                         if (space_info->full) {
1207                                 if (maybe_fail_all_tickets(fs_info, space_info))
1208                                         flush_state = 0;
1209                                 else
1210                                         space_info->flush = 0;
1211                         } else {
1212                                 flush_state = 0;
1213                         }
1214
1215                         /* Something happened, fail everything and bail. */
1216                         if (BTRFS_FS_ERROR(fs_info))
1217                                 goto aborted_fs;
1218
1219                 }
1220                 spin_unlock(&space_info->lock);
1221         }
1222         return;
1223
1224 aborted_fs:
1225         maybe_fail_all_tickets(fs_info, space_info);
1226         space_info->flush = 0;
1227         spin_unlock(&space_info->lock);
1228 }
1229
1230 void btrfs_init_async_reclaim_work(struct btrfs_fs_info *fs_info)
1231 {
1232         INIT_WORK(&fs_info->async_reclaim_work, btrfs_async_reclaim_metadata_space);
1233         INIT_WORK(&fs_info->async_data_reclaim_work, btrfs_async_reclaim_data_space);
1234         INIT_WORK(&fs_info->preempt_reclaim_work,
1235                   btrfs_preempt_reclaim_metadata_space);
1236 }
1237
1238 static const enum btrfs_flush_state priority_flush_states[] = {
1239         FLUSH_DELAYED_ITEMS_NR,
1240         FLUSH_DELAYED_ITEMS,
1241         ALLOC_CHUNK,
1242 };
1243
1244 static const enum btrfs_flush_state evict_flush_states[] = {
1245         FLUSH_DELAYED_ITEMS_NR,
1246         FLUSH_DELAYED_ITEMS,
1247         FLUSH_DELAYED_REFS_NR,
1248         FLUSH_DELAYED_REFS,
1249         FLUSH_DELALLOC,
1250         FLUSH_DELALLOC_WAIT,
1251         FLUSH_DELALLOC_FULL,
1252         ALLOC_CHUNK,
1253         COMMIT_TRANS,
1254 };
1255
1256 static void priority_reclaim_metadata_space(struct btrfs_fs_info *fs_info,
1257                                 struct btrfs_space_info *space_info,
1258                                 struct reserve_ticket *ticket,
1259                                 const enum btrfs_flush_state *states,
1260                                 int states_nr)
1261 {
1262         u64 to_reclaim;
1263         int flush_state;
1264
1265         spin_lock(&space_info->lock);
1266         to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, space_info);
1267         if (!to_reclaim) {
1268                 spin_unlock(&space_info->lock);
1269                 return;
1270         }
1271         spin_unlock(&space_info->lock);
1272
1273         flush_state = 0;
1274         do {
1275                 flush_space(fs_info, space_info, to_reclaim, states[flush_state],
1276                             false);
1277                 flush_state++;
1278                 spin_lock(&space_info->lock);
1279                 if (ticket->bytes == 0) {
1280                         spin_unlock(&space_info->lock);
1281                         return;
1282                 }
1283                 spin_unlock(&space_info->lock);
1284         } while (flush_state < states_nr);
1285 }
1286
1287 static void priority_reclaim_data_space(struct btrfs_fs_info *fs_info,
1288                                         struct btrfs_space_info *space_info,
1289                                         struct reserve_ticket *ticket)
1290 {
1291         while (!space_info->full) {
1292                 flush_space(fs_info, space_info, U64_MAX, ALLOC_CHUNK_FORCE, false);
1293                 spin_lock(&space_info->lock);
1294                 if (ticket->bytes == 0) {
1295                         spin_unlock(&space_info->lock);
1296                         return;
1297                 }
1298                 spin_unlock(&space_info->lock);
1299         }
1300 }
1301
1302 static void wait_reserve_ticket(struct btrfs_fs_info *fs_info,
1303                                 struct btrfs_space_info *space_info,
1304                                 struct reserve_ticket *ticket)
1305
1306 {
1307         DEFINE_WAIT(wait);
1308         int ret = 0;
1309
1310         spin_lock(&space_info->lock);
1311         while (ticket->bytes > 0 && ticket->error == 0) {
1312                 ret = prepare_to_wait_event(&ticket->wait, &wait, TASK_KILLABLE);
1313                 if (ret) {
1314                         /*
1315                          * Delete us from the list. After we unlock the space
1316                          * info, we don't want the async reclaim job to reserve
1317                          * space for this ticket. If that would happen, then the
1318                          * ticket's task would not known that space was reserved
1319                          * despite getting an error, resulting in a space leak
1320                          * (bytes_may_use counter of our space_info).
1321                          */
1322                         remove_ticket(space_info, ticket);
1323                         ticket->error = -EINTR;
1324                         break;
1325                 }
1326                 spin_unlock(&space_info->lock);
1327
1328                 schedule();
1329
1330                 finish_wait(&ticket->wait, &wait);
1331                 spin_lock(&space_info->lock);
1332         }
1333         spin_unlock(&space_info->lock);
1334 }
1335
1336 /**
1337  * Do the appropriate flushing and waiting for a ticket
1338  *
1339  * @fs_info:    the filesystem
1340  * @space_info: space info for the reservation
1341  * @ticket:     ticket for the reservation
1342  * @start_ns:   timestamp when the reservation started
1343  * @orig_bytes: amount of bytes originally reserved
1344  * @flush:      how much we can flush
1345  *
1346  * This does the work of figuring out how to flush for the ticket, waiting for
1347  * the reservation, and returning the appropriate error if there is one.
1348  */
1349 static int handle_reserve_ticket(struct btrfs_fs_info *fs_info,
1350                                  struct btrfs_space_info *space_info,
1351                                  struct reserve_ticket *ticket,
1352                                  u64 start_ns, u64 orig_bytes,
1353                                  enum btrfs_reserve_flush_enum flush)
1354 {
1355         int ret;
1356
1357         switch (flush) {
1358         case BTRFS_RESERVE_FLUSH_DATA:
1359         case BTRFS_RESERVE_FLUSH_ALL:
1360         case BTRFS_RESERVE_FLUSH_ALL_STEAL:
1361                 wait_reserve_ticket(fs_info, space_info, ticket);
1362                 break;
1363         case BTRFS_RESERVE_FLUSH_LIMIT:
1364                 priority_reclaim_metadata_space(fs_info, space_info, ticket,
1365                                                 priority_flush_states,
1366                                                 ARRAY_SIZE(priority_flush_states));
1367                 break;
1368         case BTRFS_RESERVE_FLUSH_EVICT:
1369                 priority_reclaim_metadata_space(fs_info, space_info, ticket,
1370                                                 evict_flush_states,
1371                                                 ARRAY_SIZE(evict_flush_states));
1372                 break;
1373         case BTRFS_RESERVE_FLUSH_FREE_SPACE_INODE:
1374                 priority_reclaim_data_space(fs_info, space_info, ticket);
1375                 break;
1376         default:
1377                 ASSERT(0);
1378                 break;
1379         }
1380
1381         spin_lock(&space_info->lock);
1382         ret = ticket->error;
1383         if (ticket->bytes || ticket->error) {
1384                 /*
1385                  * We were a priority ticket, so we need to delete ourselves
1386                  * from the list.  Because we could have other priority tickets
1387                  * behind us that require less space, run
1388                  * btrfs_try_granting_tickets() to see if their reservations can
1389                  * now be made.
1390                  */
1391                 if (!list_empty(&ticket->list)) {
1392                         remove_ticket(space_info, ticket);
1393                         btrfs_try_granting_tickets(fs_info, space_info);
1394                 }
1395
1396                 if (!ret)
1397                         ret = -ENOSPC;
1398         }
1399         spin_unlock(&space_info->lock);
1400         ASSERT(list_empty(&ticket->list));
1401         /*
1402          * Check that we can't have an error set if the reservation succeeded,
1403          * as that would confuse tasks and lead them to error out without
1404          * releasing reserved space (if an error happens the expectation is that
1405          * space wasn't reserved at all).
1406          */
1407         ASSERT(!(ticket->bytes == 0 && ticket->error));
1408         trace_btrfs_reserve_ticket(fs_info, space_info->flags, orig_bytes,
1409                                    start_ns, flush, ticket->error);
1410         return ret;
1411 }
1412
1413 /*
1414  * This returns true if this flush state will go through the ordinary flushing
1415  * code.
1416  */
1417 static inline bool is_normal_flushing(enum btrfs_reserve_flush_enum flush)
1418 {
1419         return  (flush == BTRFS_RESERVE_FLUSH_ALL) ||
1420                 (flush == BTRFS_RESERVE_FLUSH_ALL_STEAL);
1421 }
1422
1423 static inline void maybe_clamp_preempt(struct btrfs_fs_info *fs_info,
1424                                        struct btrfs_space_info *space_info)
1425 {
1426         u64 ordered = percpu_counter_sum_positive(&fs_info->ordered_bytes);
1427         u64 delalloc = percpu_counter_sum_positive(&fs_info->delalloc_bytes);
1428
1429         /*
1430          * If we're heavy on ordered operations then clamping won't help us.  We
1431          * need to clamp specifically to keep up with dirty'ing buffered
1432          * writers, because there's not a 1:1 correlation of writing delalloc
1433          * and freeing space, like there is with flushing delayed refs or
1434          * delayed nodes.  If we're already more ordered than delalloc then
1435          * we're keeping up, otherwise we aren't and should probably clamp.
1436          */
1437         if (ordered < delalloc)
1438                 space_info->clamp = min(space_info->clamp + 1, 8);
1439 }
1440
1441 /**
1442  * Try to reserve bytes from the block_rsv's space
1443  *
1444  * @fs_info:    the filesystem
1445  * @space_info: space info we want to allocate from
1446  * @orig_bytes: number of bytes we want
1447  * @flush:      whether or not we can flush to make our reservation
1448  *
1449  * This will reserve orig_bytes number of bytes from the space info associated
1450  * with the block_rsv.  If there is not enough space it will make an attempt to
1451  * flush out space to make room.  It will do this by flushing delalloc if
1452  * possible or committing the transaction.  If flush is 0 then no attempts to
1453  * regain reservations will be made and this will fail if there is not enough
1454  * space already.
1455  */
1456 static int __reserve_bytes(struct btrfs_fs_info *fs_info,
1457                            struct btrfs_space_info *space_info, u64 orig_bytes,
1458                            enum btrfs_reserve_flush_enum flush)
1459 {
1460         struct work_struct *async_work;
1461         struct reserve_ticket ticket;
1462         u64 start_ns = 0;
1463         u64 used;
1464         int ret = 0;
1465         bool pending_tickets;
1466
1467         ASSERT(orig_bytes);
1468         ASSERT(!current->journal_info || flush != BTRFS_RESERVE_FLUSH_ALL);
1469
1470         if (flush == BTRFS_RESERVE_FLUSH_DATA)
1471                 async_work = &fs_info->async_data_reclaim_work;
1472         else
1473                 async_work = &fs_info->async_reclaim_work;
1474
1475         spin_lock(&space_info->lock);
1476         ret = -ENOSPC;
1477         used = btrfs_space_info_used(space_info, true);
1478
1479         /*
1480          * We don't want NO_FLUSH allocations to jump everybody, they can
1481          * generally handle ENOSPC in a different way, so treat them the same as
1482          * normal flushers when it comes to skipping pending tickets.
1483          */
1484         if (is_normal_flushing(flush) || (flush == BTRFS_RESERVE_NO_FLUSH))
1485                 pending_tickets = !list_empty(&space_info->tickets) ||
1486                         !list_empty(&space_info->priority_tickets);
1487         else
1488                 pending_tickets = !list_empty(&space_info->priority_tickets);
1489
1490         /*
1491          * Carry on if we have enough space (short-circuit) OR call
1492          * can_overcommit() to ensure we can overcommit to continue.
1493          */
1494         if (!pending_tickets &&
1495             ((used + orig_bytes <= space_info->total_bytes) ||
1496              btrfs_can_overcommit(fs_info, space_info, orig_bytes, flush))) {
1497                 btrfs_space_info_update_bytes_may_use(fs_info, space_info,
1498                                                       orig_bytes);
1499                 ret = 0;
1500         }
1501
1502         /*
1503          * If we couldn't make a reservation then setup our reservation ticket
1504          * and kick the async worker if it's not already running.
1505          *
1506          * If we are a priority flusher then we just need to add our ticket to
1507          * the list and we will do our own flushing further down.
1508          */
1509         if (ret && flush != BTRFS_RESERVE_NO_FLUSH) {
1510                 ticket.bytes = orig_bytes;
1511                 ticket.error = 0;
1512                 space_info->reclaim_size += ticket.bytes;
1513                 init_waitqueue_head(&ticket.wait);
1514                 ticket.steal = (flush == BTRFS_RESERVE_FLUSH_ALL_STEAL);
1515                 if (trace_btrfs_reserve_ticket_enabled())
1516                         start_ns = ktime_get_ns();
1517
1518                 if (flush == BTRFS_RESERVE_FLUSH_ALL ||
1519                     flush == BTRFS_RESERVE_FLUSH_ALL_STEAL ||
1520                     flush == BTRFS_RESERVE_FLUSH_DATA) {
1521                         list_add_tail(&ticket.list, &space_info->tickets);
1522                         if (!space_info->flush) {
1523                                 /*
1524                                  * We were forced to add a reserve ticket, so
1525                                  * our preemptive flushing is unable to keep
1526                                  * up.  Clamp down on the threshold for the
1527                                  * preemptive flushing in order to keep up with
1528                                  * the workload.
1529                                  */
1530                                 maybe_clamp_preempt(fs_info, space_info);
1531
1532                                 space_info->flush = 1;
1533                                 trace_btrfs_trigger_flush(fs_info,
1534                                                           space_info->flags,
1535                                                           orig_bytes, flush,
1536                                                           "enospc");
1537                                 queue_work(system_unbound_wq, async_work);
1538                         }
1539                 } else {
1540                         list_add_tail(&ticket.list,
1541                                       &space_info->priority_tickets);
1542                 }
1543         } else if (!ret && space_info->flags & BTRFS_BLOCK_GROUP_METADATA) {
1544                 used += orig_bytes;
1545                 /*
1546                  * We will do the space reservation dance during log replay,
1547                  * which means we won't have fs_info->fs_root set, so don't do
1548                  * the async reclaim as we will panic.
1549                  */
1550                 if (!test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags) &&
1551                     !work_busy(&fs_info->preempt_reclaim_work) &&
1552                     need_preemptive_reclaim(fs_info, space_info)) {
1553                         trace_btrfs_trigger_flush(fs_info, space_info->flags,
1554                                                   orig_bytes, flush, "preempt");
1555                         queue_work(system_unbound_wq,
1556                                    &fs_info->preempt_reclaim_work);
1557                 }
1558         }
1559         spin_unlock(&space_info->lock);
1560         if (!ret || flush == BTRFS_RESERVE_NO_FLUSH)
1561                 return ret;
1562
1563         return handle_reserve_ticket(fs_info, space_info, &ticket, start_ns,
1564                                      orig_bytes, flush);
1565 }
1566
1567 /**
1568  * Trye to reserve metadata bytes from the block_rsv's space
1569  *
1570  * @root:       the root we're allocating for
1571  * @block_rsv:  block_rsv we're allocating for
1572  * @orig_bytes: number of bytes we want
1573  * @flush:      whether or not we can flush to make our reservation
1574  *
1575  * This will reserve orig_bytes number of bytes from the space info associated
1576  * with the block_rsv.  If there is not enough space it will make an attempt to
1577  * flush out space to make room.  It will do this by flushing delalloc if
1578  * possible or committing the transaction.  If flush is 0 then no attempts to
1579  * regain reservations will be made and this will fail if there is not enough
1580  * space already.
1581  */
1582 int btrfs_reserve_metadata_bytes(struct btrfs_root *root,
1583                                  struct btrfs_block_rsv *block_rsv,
1584                                  u64 orig_bytes,
1585                                  enum btrfs_reserve_flush_enum flush)
1586 {
1587         struct btrfs_fs_info *fs_info = root->fs_info;
1588         struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
1589         int ret;
1590
1591         ret = __reserve_bytes(fs_info, block_rsv->space_info, orig_bytes, flush);
1592         if (ret == -ENOSPC &&
1593             unlikely(root->orphan_cleanup_state == ORPHAN_CLEANUP_STARTED)) {
1594                 if (block_rsv != global_rsv &&
1595                     !btrfs_block_rsv_use_bytes(global_rsv, orig_bytes))
1596                         ret = 0;
1597         }
1598         if (ret == -ENOSPC) {
1599                 trace_btrfs_space_reservation(fs_info, "space_info:enospc",
1600                                               block_rsv->space_info->flags,
1601                                               orig_bytes, 1);
1602
1603                 if (btrfs_test_opt(fs_info, ENOSPC_DEBUG))
1604                         btrfs_dump_space_info(fs_info, block_rsv->space_info,
1605                                               orig_bytes, 0);
1606         }
1607         return ret;
1608 }
1609
1610 /**
1611  * Try to reserve data bytes for an allocation
1612  *
1613  * @fs_info: the filesystem
1614  * @bytes:   number of bytes we need
1615  * @flush:   how we are allowed to flush
1616  *
1617  * This will reserve bytes from the data space info.  If there is not enough
1618  * space then we will attempt to flush space as specified by flush.
1619  */
1620 int btrfs_reserve_data_bytes(struct btrfs_fs_info *fs_info, u64 bytes,
1621                              enum btrfs_reserve_flush_enum flush)
1622 {
1623         struct btrfs_space_info *data_sinfo = fs_info->data_sinfo;
1624         int ret;
1625
1626         ASSERT(flush == BTRFS_RESERVE_FLUSH_DATA ||
1627                flush == BTRFS_RESERVE_FLUSH_FREE_SPACE_INODE);
1628         ASSERT(!current->journal_info || flush != BTRFS_RESERVE_FLUSH_DATA);
1629
1630         ret = __reserve_bytes(fs_info, data_sinfo, bytes, flush);
1631         if (ret == -ENOSPC) {
1632                 trace_btrfs_space_reservation(fs_info, "space_info:enospc",
1633                                               data_sinfo->flags, bytes, 1);
1634                 if (btrfs_test_opt(fs_info, ENOSPC_DEBUG))
1635                         btrfs_dump_space_info(fs_info, data_sinfo, bytes, 0);
1636         }
1637         return ret;
1638 }