2 * fs/ext4/extents_status.c
4 * Written by Yongqiang Yang <xiaoqiangnk@gmail.com>
6 * Allison Henderson <achender@linux.vnet.ibm.com>
7 * Hugh Dickins <hughd@google.com>
8 * Zheng Liu <wenqing.lz@taobao.com>
10 * Ext4 extents status tree core functions.
12 #include <linux/rbtree.h>
14 #include "extents_status.h"
15 #include "ext4_extents.h"
17 #include <trace/events/ext4.h>
20 * According to previous discussion in Ext4 Developer Workshop, we
21 * will introduce a new structure called io tree to track all extent
22 * status in order to solve some problems that we have met
23 * (e.g. Reservation space warning), and provide extent-level locking.
24 * Delay extent tree is the first step to achieve this goal. It is
25 * original built by Yongqiang Yang. At that time it is called delay
26 * extent tree, whose goal is only track delayed extents in memory to
27 * simplify the implementation of fiemap and bigalloc, and introduce
28 * lseek SEEK_DATA/SEEK_HOLE support. That is why it is still called
29 * delay extent tree at the first commit. But for better understand
30 * what it does, it has been rename to extent status tree.
33 * Currently the first step has been done. All delayed extents are
34 * tracked in the tree. It maintains the delayed extent when a delayed
35 * allocation is issued, and the delayed extent is written out or
36 * invalidated. Therefore the implementation of fiemap and bigalloc
37 * are simplified, and SEEK_DATA/SEEK_HOLE are introduced.
39 * The following comment describes the implemenmtation of extent
40 * status tree and future works.
43 * In this step all extent status are tracked by extent status tree.
44 * Thus, we can first try to lookup a block mapping in this tree before
45 * finding it in extent tree. Hence, single extent cache can be removed
46 * because extent status tree can do a better job. Extents in status
47 * tree are loaded on-demand. Therefore, the extent status tree may not
48 * contain all of the extents in a file. Meanwhile we define a shrinker
49 * to reclaim memory from extent status tree because fragmented extent
50 * tree will make status tree cost too much memory. written/unwritten/-
51 * hole extents in the tree will be reclaimed by this shrinker when we
52 * are under high memory pressure. Delayed extents will not be
53 * reclimed because fiemap, bigalloc, and seek_data/hole need it.
57 * Extent status tree implementation for ext4.
60 * ==========================================================================
61 * Extent status tree tracks all extent status.
63 * 1. Why we need to implement extent status tree?
65 * Without extent status tree, ext4 identifies a delayed extent by looking
66 * up page cache, this has several deficiencies - complicated, buggy,
67 * and inefficient code.
69 * FIEMAP, SEEK_HOLE/DATA, bigalloc, and writeout all need to know if a
70 * block or a range of blocks are belonged to a delayed extent.
72 * Let us have a look at how they do without extent status tree.
74 * FIEMAP looks up page cache to identify delayed allocations from holes.
77 * SEEK_HOLE/DATA has the same problem as FIEMAP.
80 * bigalloc looks up page cache to figure out if a block is
81 * already under delayed allocation or not to determine whether
82 * quota reserving is needed for the cluster.
85 * Writeout looks up whole page cache to see if a buffer is
86 * mapped, If there are not very many delayed buffers, then it is
89 * With extent status tree implementation, FIEMAP, SEEK_HOLE/DATA,
90 * bigalloc and writeout can figure out if a block or a range of
91 * blocks is under delayed allocation(belonged to a delayed extent) or
92 * not by searching the extent tree.
95 * ==========================================================================
96 * 2. Ext4 extent status tree impelmentation
99 * A extent is a range of blocks which are contiguous logically and
100 * physically. Unlike extent in extent tree, this extent in ext4 is
101 * a in-memory struct, there is no corresponding on-disk data. There
102 * is no limit on length of extent, so an extent can contain as many
103 * blocks as they are contiguous logically and physically.
105 * -- extent status tree
106 * Every inode has an extent status tree and all allocation blocks
107 * are added to the tree with different status. The extent in the
108 * tree are ordered by logical block no.
110 * -- operations on a extent status tree
111 * There are three important operations on a delayed extent tree: find
112 * next extent, adding a extent(a range of blocks) and removing a extent.
114 * -- race on a extent status tree
115 * Extent status tree is protected by inode->i_es_lock.
117 * -- memory consumption
118 * Fragmented extent tree will make extent status tree cost too much
119 * memory. Hence, we will reclaim written/unwritten/hole extents from
120 * the tree under a heavy memory pressure.
123 * ==========================================================================
124 * 3. Performance analysis
127 * 1. There is a cache extent for write access, so if writes are
128 * not very random, adding space operaions are in O(1) time.
131 * 2. Code is much simpler, more readable, more maintainable and
135 * ==========================================================================
138 * -- Refactor delayed space reservation
140 * -- Extent-level locking
143 static struct kmem_cache *ext4_es_cachep;
145 static int __es_insert_extent(struct inode *inode, struct extent_status *newes);
146 static int __es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
148 static int __es_try_to_reclaim_extents(struct ext4_inode_info *ei,
151 int __init ext4_init_es(void)
153 ext4_es_cachep = kmem_cache_create("ext4_extent_status",
154 sizeof(struct extent_status),
155 0, (SLAB_RECLAIM_ACCOUNT), NULL);
156 if (ext4_es_cachep == NULL)
161 void ext4_exit_es(void)
164 kmem_cache_destroy(ext4_es_cachep);
167 void ext4_es_init_tree(struct ext4_es_tree *tree)
169 tree->root = RB_ROOT;
170 tree->cache_es = NULL;
174 static void ext4_es_print_tree(struct inode *inode)
176 struct ext4_es_tree *tree;
177 struct rb_node *node;
179 printk(KERN_DEBUG "status extents for inode %lu:", inode->i_ino);
180 tree = &EXT4_I(inode)->i_es_tree;
181 node = rb_first(&tree->root);
183 struct extent_status *es;
184 es = rb_entry(node, struct extent_status, rb_node);
185 printk(KERN_DEBUG " [%u/%u) %llu %llx",
186 es->es_lblk, es->es_len,
187 ext4_es_pblock(es), ext4_es_status(es));
188 node = rb_next(node);
190 printk(KERN_DEBUG "\n");
193 #define ext4_es_print_tree(inode)
196 static inline ext4_lblk_t ext4_es_end(struct extent_status *es)
198 BUG_ON(es->es_lblk + es->es_len < es->es_lblk);
199 return es->es_lblk + es->es_len - 1;
203 * search through the tree for an delayed extent with a given offset. If
204 * it can't be found, try to find next extent.
206 static struct extent_status *__es_tree_search(struct rb_root *root,
209 struct rb_node *node = root->rb_node;
210 struct extent_status *es = NULL;
213 es = rb_entry(node, struct extent_status, rb_node);
214 if (lblk < es->es_lblk)
215 node = node->rb_left;
216 else if (lblk > ext4_es_end(es))
217 node = node->rb_right;
222 if (es && lblk < es->es_lblk)
225 if (es && lblk > ext4_es_end(es)) {
226 node = rb_next(&es->rb_node);
227 return node ? rb_entry(node, struct extent_status, rb_node) :
235 * ext4_es_find_delayed_extent: find the 1st delayed extent covering @es->lblk
236 * if it exists, otherwise, the next extent after @es->lblk.
238 * @inode: the inode which owns delayed extents
239 * @lblk: the offset where we start to search
240 * @es: delayed extent that we found
242 void ext4_es_find_delayed_extent(struct inode *inode, ext4_lblk_t lblk,
243 struct extent_status *es)
245 struct ext4_es_tree *tree = NULL;
246 struct extent_status *es1 = NULL;
247 struct rb_node *node;
250 trace_ext4_es_find_delayed_extent_enter(inode, lblk);
252 read_lock(&EXT4_I(inode)->i_es_lock);
253 tree = &EXT4_I(inode)->i_es_tree;
255 /* find extent in cache firstly */
256 es->es_lblk = es->es_len = es->es_pblk = 0;
257 if (tree->cache_es) {
258 es1 = tree->cache_es;
259 if (in_range(lblk, es1->es_lblk, es1->es_len)) {
260 es_debug("%u cached by [%u/%u) %llu %llx\n",
261 lblk, es1->es_lblk, es1->es_len,
262 ext4_es_pblock(es1), ext4_es_status(es1));
267 es1 = __es_tree_search(&tree->root, lblk);
270 if (es1 && !ext4_es_is_delayed(es1)) {
271 while ((node = rb_next(&es1->rb_node)) != NULL) {
272 es1 = rb_entry(node, struct extent_status, rb_node);
273 if (ext4_es_is_delayed(es1))
278 if (es1 && ext4_es_is_delayed(es1)) {
279 tree->cache_es = es1;
280 es->es_lblk = es1->es_lblk;
281 es->es_len = es1->es_len;
282 es->es_pblk = es1->es_pblk;
285 read_unlock(&EXT4_I(inode)->i_es_lock);
287 ext4_es_lru_add(inode);
288 trace_ext4_es_find_delayed_extent_exit(inode, es);
291 static struct extent_status *
292 ext4_es_alloc_extent(struct inode *inode, ext4_lblk_t lblk, ext4_lblk_t len,
295 struct extent_status *es;
296 es = kmem_cache_alloc(ext4_es_cachep, GFP_ATOMIC);
304 * We don't count delayed extent because we never try to reclaim them
306 if (!ext4_es_is_delayed(es)) {
307 EXT4_I(inode)->i_es_lru_nr++;
308 percpu_counter_inc(&EXT4_SB(inode->i_sb)->s_extent_cache_cnt);
314 static void ext4_es_free_extent(struct inode *inode, struct extent_status *es)
316 /* Decrease the lru counter when this es is not delayed */
317 if (!ext4_es_is_delayed(es)) {
318 BUG_ON(EXT4_I(inode)->i_es_lru_nr == 0);
319 EXT4_I(inode)->i_es_lru_nr--;
320 percpu_counter_dec(&EXT4_SB(inode->i_sb)->s_extent_cache_cnt);
323 kmem_cache_free(ext4_es_cachep, es);
327 * Check whether or not two extents can be merged
329 * - logical block number is contiguous
330 * - physical block number is contiguous
333 static int ext4_es_can_be_merged(struct extent_status *es1,
334 struct extent_status *es2)
336 if (ext4_es_status(es1) != ext4_es_status(es2))
339 if (((__u64) es1->es_len) + es2->es_len > 0xFFFFFFFFULL)
342 if (((__u64) es1->es_lblk) + es1->es_len != es2->es_lblk)
345 if ((ext4_es_is_written(es1) || ext4_es_is_unwritten(es1)) &&
346 (ext4_es_pblock(es1) + es1->es_len == ext4_es_pblock(es2)))
349 if (ext4_es_is_hole(es1))
352 /* we need to check delayed extent is without unwritten status */
353 if (ext4_es_is_delayed(es1) && !ext4_es_is_unwritten(es1))
359 static struct extent_status *
360 ext4_es_try_to_merge_left(struct inode *inode, struct extent_status *es)
362 struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree;
363 struct extent_status *es1;
364 struct rb_node *node;
366 node = rb_prev(&es->rb_node);
370 es1 = rb_entry(node, struct extent_status, rb_node);
371 if (ext4_es_can_be_merged(es1, es)) {
372 es1->es_len += es->es_len;
373 rb_erase(&es->rb_node, &tree->root);
374 ext4_es_free_extent(inode, es);
381 static struct extent_status *
382 ext4_es_try_to_merge_right(struct inode *inode, struct extent_status *es)
384 struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree;
385 struct extent_status *es1;
386 struct rb_node *node;
388 node = rb_next(&es->rb_node);
392 es1 = rb_entry(node, struct extent_status, rb_node);
393 if (ext4_es_can_be_merged(es, es1)) {
394 es->es_len += es1->es_len;
395 rb_erase(node, &tree->root);
396 ext4_es_free_extent(inode, es1);
402 #ifdef ES_AGGRESSIVE_TEST
403 static void ext4_es_insert_extent_ext_check(struct inode *inode,
404 struct extent_status *es)
406 struct ext4_ext_path *path = NULL;
407 struct ext4_extent *ex;
408 ext4_lblk_t ee_block;
409 ext4_fsblk_t ee_start;
410 unsigned short ee_len;
411 int depth, ee_status, es_status;
413 path = ext4_ext_find_extent(inode, es->es_lblk, NULL);
417 depth = ext_depth(inode);
418 ex = path[depth].p_ext;
422 ee_block = le32_to_cpu(ex->ee_block);
423 ee_start = ext4_ext_pblock(ex);
424 ee_len = ext4_ext_get_actual_len(ex);
426 ee_status = ext4_ext_is_uninitialized(ex) ? 1 : 0;
427 es_status = ext4_es_is_unwritten(es) ? 1 : 0;
430 * Make sure ex and es are not overlap when we try to insert
431 * a delayed/hole extent.
433 if (!ext4_es_is_written(es) && !ext4_es_is_unwritten(es)) {
434 if (in_range(es->es_lblk, ee_block, ee_len)) {
435 pr_warn("ES insert assertation failed for "
436 "inode: %lu we can find an extent "
437 "at block [%d/%d/%llu/%c], but we "
438 "want to add an delayed/hole extent "
439 "[%d/%d/%llu/%llx]\n",
440 inode->i_ino, ee_block, ee_len,
441 ee_start, ee_status ? 'u' : 'w',
442 es->es_lblk, es->es_len,
443 ext4_es_pblock(es), ext4_es_status(es));
449 * We don't check ee_block == es->es_lblk, etc. because es
450 * might be a part of whole extent, vice versa.
452 if (es->es_lblk < ee_block ||
453 ext4_es_pblock(es) != ee_start + es->es_lblk - ee_block) {
454 pr_warn("ES insert assertation failed for inode: %lu "
455 "ex_status [%d/%d/%llu/%c] != "
456 "es_status [%d/%d/%llu/%c]\n", inode->i_ino,
457 ee_block, ee_len, ee_start,
458 ee_status ? 'u' : 'w', es->es_lblk, es->es_len,
459 ext4_es_pblock(es), es_status ? 'u' : 'w');
463 if (ee_status ^ es_status) {
464 pr_warn("ES insert assertation failed for inode: %lu "
465 "ex_status [%d/%d/%llu/%c] != "
466 "es_status [%d/%d/%llu/%c]\n", inode->i_ino,
467 ee_block, ee_len, ee_start,
468 ee_status ? 'u' : 'w', es->es_lblk, es->es_len,
469 ext4_es_pblock(es), es_status ? 'u' : 'w');
473 * We can't find an extent on disk. So we need to make sure
474 * that we don't want to add an written/unwritten extent.
476 if (!ext4_es_is_delayed(es) && !ext4_es_is_hole(es)) {
477 pr_warn("ES insert assertation failed for inode: %lu "
478 "can't find an extent at block %d but we want "
479 "to add an written/unwritten extent "
480 "[%d/%d/%llu/%llx]\n", inode->i_ino,
481 es->es_lblk, es->es_lblk, es->es_len,
482 ext4_es_pblock(es), ext4_es_status(es));
487 ext4_ext_drop_refs(path);
492 static void ext4_es_insert_extent_ind_check(struct inode *inode,
493 struct extent_status *es)
495 struct ext4_map_blocks map;
499 * Here we call ext4_ind_map_blocks to lookup a block mapping because
500 * 'Indirect' structure is defined in indirect.c. So we couldn't
501 * access direct/indirect tree from outside. It is too dirty to define
502 * this function in indirect.c file.
505 map.m_lblk = es->es_lblk;
506 map.m_len = es->es_len;
508 retval = ext4_ind_map_blocks(NULL, inode, &map, 0);
510 if (ext4_es_is_delayed(es) || ext4_es_is_hole(es)) {
512 * We want to add a delayed/hole extent but this
513 * block has been allocated.
515 pr_warn("ES insert assertation failed for inode: %lu "
516 "We can find blocks but we want to add a "
517 "delayed/hole extent [%d/%d/%llu/%llx]\n",
518 inode->i_ino, es->es_lblk, es->es_len,
519 ext4_es_pblock(es), ext4_es_status(es));
521 } else if (ext4_es_is_written(es)) {
522 if (retval != es->es_len) {
523 pr_warn("ES insert assertation failed for "
524 "inode: %lu retval %d != es_len %d\n",
525 inode->i_ino, retval, es->es_len);
528 if (map.m_pblk != ext4_es_pblock(es)) {
529 pr_warn("ES insert assertation failed for "
530 "inode: %lu m_pblk %llu != "
532 inode->i_ino, map.m_pblk,
538 * We don't need to check unwritten extent because
539 * indirect-based file doesn't have it.
543 } else if (retval == 0) {
544 if (ext4_es_is_written(es)) {
545 pr_warn("ES insert assertation failed for inode: %lu "
546 "We can't find the block but we want to add "
547 "an written extent [%d/%d/%llu/%llx]\n",
548 inode->i_ino, es->es_lblk, es->es_len,
549 ext4_es_pblock(es), ext4_es_status(es));
555 static inline void ext4_es_insert_extent_check(struct inode *inode,
556 struct extent_status *es)
559 * We don't need to worry about the race condition because
560 * caller takes i_data_sem locking.
562 BUG_ON(!rwsem_is_locked(&EXT4_I(inode)->i_data_sem));
563 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
564 ext4_es_insert_extent_ext_check(inode, es);
566 ext4_es_insert_extent_ind_check(inode, es);
569 static inline void ext4_es_insert_extent_check(struct inode *inode,
570 struct extent_status *es)
575 static int __es_insert_extent(struct inode *inode, struct extent_status *newes)
577 struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree;
578 struct rb_node **p = &tree->root.rb_node;
579 struct rb_node *parent = NULL;
580 struct extent_status *es;
584 es = rb_entry(parent, struct extent_status, rb_node);
586 if (newes->es_lblk < es->es_lblk) {
587 if (ext4_es_can_be_merged(newes, es)) {
589 * Here we can modify es_lblk directly
590 * because it isn't overlapped.
592 es->es_lblk = newes->es_lblk;
593 es->es_len += newes->es_len;
594 if (ext4_es_is_written(es) ||
595 ext4_es_is_unwritten(es))
596 ext4_es_store_pblock(es,
598 es = ext4_es_try_to_merge_left(inode, es);
602 } else if (newes->es_lblk > ext4_es_end(es)) {
603 if (ext4_es_can_be_merged(es, newes)) {
604 es->es_len += newes->es_len;
605 es = ext4_es_try_to_merge_right(inode, es);
615 es = ext4_es_alloc_extent(inode, newes->es_lblk, newes->es_len,
619 rb_link_node(&es->rb_node, parent, p);
620 rb_insert_color(&es->rb_node, &tree->root);
628 * ext4_es_insert_extent() adds a space to a extent status tree.
630 * ext4_es_insert_extent is called by ext4_da_write_begin and
631 * ext4_es_remove_extent.
633 * Return 0 on success, error code on failure.
635 int ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk,
636 ext4_lblk_t len, ext4_fsblk_t pblk,
637 unsigned long long status)
639 struct extent_status newes;
640 ext4_lblk_t end = lblk + len - 1;
643 es_debug("add [%u/%u) %llu %llx to extent status tree of inode %lu\n",
644 lblk, len, pblk, status, inode->i_ino);
651 newes.es_lblk = lblk;
653 ext4_es_store_pblock(&newes, pblk);
654 ext4_es_store_status(&newes, status);
655 trace_ext4_es_insert_extent(inode, &newes);
657 ext4_es_insert_extent_check(inode, &newes);
659 write_lock(&EXT4_I(inode)->i_es_lock);
660 err = __es_remove_extent(inode, lblk, end);
663 err = __es_insert_extent(inode, &newes);
666 write_unlock(&EXT4_I(inode)->i_es_lock);
668 ext4_es_lru_add(inode);
669 ext4_es_print_tree(inode);
675 * ext4_es_lookup_extent() looks up an extent in extent status tree.
677 * ext4_es_lookup_extent is called by ext4_map_blocks/ext4_da_map_blocks.
679 * Return: 1 on found, 0 on not
681 int ext4_es_lookup_extent(struct inode *inode, ext4_lblk_t lblk,
682 struct extent_status *es)
684 struct ext4_es_tree *tree;
685 struct extent_status *es1 = NULL;
686 struct rb_node *node;
689 trace_ext4_es_lookup_extent_enter(inode, lblk);
690 es_debug("lookup extent in block %u\n", lblk);
692 tree = &EXT4_I(inode)->i_es_tree;
693 read_lock(&EXT4_I(inode)->i_es_lock);
695 /* find extent in cache firstly */
696 es->es_lblk = es->es_len = es->es_pblk = 0;
697 if (tree->cache_es) {
698 es1 = tree->cache_es;
699 if (in_range(lblk, es1->es_lblk, es1->es_len)) {
700 es_debug("%u cached by [%u/%u)\n",
701 lblk, es1->es_lblk, es1->es_len);
707 node = tree->root.rb_node;
709 es1 = rb_entry(node, struct extent_status, rb_node);
710 if (lblk < es1->es_lblk)
711 node = node->rb_left;
712 else if (lblk > ext4_es_end(es1))
713 node = node->rb_right;
723 es->es_lblk = es1->es_lblk;
724 es->es_len = es1->es_len;
725 es->es_pblk = es1->es_pblk;
728 read_unlock(&EXT4_I(inode)->i_es_lock);
730 ext4_es_lru_add(inode);
731 trace_ext4_es_lookup_extent_exit(inode, es, found);
735 static int __es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
738 struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree;
739 struct rb_node *node;
740 struct extent_status *es;
741 struct extent_status orig_es;
742 ext4_lblk_t len1, len2;
746 es = __es_tree_search(&tree->root, lblk);
749 if (es->es_lblk > end)
752 /* Simply invalidate cache_es. */
753 tree->cache_es = NULL;
755 orig_es.es_lblk = es->es_lblk;
756 orig_es.es_len = es->es_len;
757 orig_es.es_pblk = es->es_pblk;
759 len1 = lblk > es->es_lblk ? lblk - es->es_lblk : 0;
760 len2 = ext4_es_end(es) > end ? ext4_es_end(es) - end : 0;
765 struct extent_status newes;
767 newes.es_lblk = end + 1;
769 if (ext4_es_is_written(&orig_es) ||
770 ext4_es_is_unwritten(&orig_es)) {
771 block = ext4_es_pblock(&orig_es) +
772 orig_es.es_len - len2;
773 ext4_es_store_pblock(&newes, block);
775 ext4_es_store_status(&newes, ext4_es_status(&orig_es));
776 err = __es_insert_extent(inode, &newes);
778 es->es_lblk = orig_es.es_lblk;
779 es->es_len = orig_es.es_len;
783 es->es_lblk = end + 1;
785 if (ext4_es_is_written(es) ||
786 ext4_es_is_unwritten(es)) {
787 block = orig_es.es_pblk + orig_es.es_len - len2;
788 ext4_es_store_pblock(es, block);
795 node = rb_next(&es->rb_node);
797 es = rb_entry(node, struct extent_status, rb_node);
802 while (es && ext4_es_end(es) <= end) {
803 node = rb_next(&es->rb_node);
804 rb_erase(&es->rb_node, &tree->root);
805 ext4_es_free_extent(inode, es);
810 es = rb_entry(node, struct extent_status, rb_node);
813 if (es && es->es_lblk < end + 1) {
814 ext4_lblk_t orig_len = es->es_len;
816 len1 = ext4_es_end(es) - end;
817 es->es_lblk = end + 1;
819 if (ext4_es_is_written(es) || ext4_es_is_unwritten(es)) {
820 block = es->es_pblk + orig_len - len1;
821 ext4_es_store_pblock(es, block);
830 * ext4_es_remove_extent() removes a space from a extent status tree.
832 * Return 0 on success, error code on failure.
834 int ext4_es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
840 trace_ext4_es_remove_extent(inode, lblk, len);
841 es_debug("remove [%u/%u) from extent status tree of inode %lu\n",
842 lblk, len, inode->i_ino);
847 end = lblk + len - 1;
850 write_lock(&EXT4_I(inode)->i_es_lock);
851 err = __es_remove_extent(inode, lblk, end);
852 write_unlock(&EXT4_I(inode)->i_es_lock);
853 ext4_es_print_tree(inode);
857 int ext4_es_zeroout(struct inode *inode, struct ext4_extent *ex)
859 ext4_lblk_t ee_block;
860 ext4_fsblk_t ee_pblock;
863 ee_block = le32_to_cpu(ex->ee_block);
864 ee_len = ext4_ext_get_actual_len(ex);
865 ee_pblock = ext4_ext_pblock(ex);
870 return ext4_es_insert_extent(inode, ee_block, ee_len, ee_pblock,
871 EXTENT_STATUS_WRITTEN);
874 static int ext4_es_shrink(struct shrinker *shrink, struct shrink_control *sc)
876 struct ext4_sb_info *sbi = container_of(shrink,
877 struct ext4_sb_info, s_es_shrinker);
878 struct ext4_inode_info *ei;
879 struct list_head *cur, *tmp, scanned;
880 int nr_to_scan = sc->nr_to_scan;
881 int ret, nr_shrunk = 0;
883 ret = percpu_counter_read_positive(&sbi->s_extent_cache_cnt);
884 trace_ext4_es_shrink_enter(sbi->s_sb, nr_to_scan, ret);
889 INIT_LIST_HEAD(&scanned);
891 spin_lock(&sbi->s_es_lru_lock);
892 list_for_each_safe(cur, tmp, &sbi->s_es_lru) {
893 list_move_tail(cur, &scanned);
895 ei = list_entry(cur, struct ext4_inode_info, i_es_lru);
897 read_lock(&ei->i_es_lock);
898 if (ei->i_es_lru_nr == 0) {
899 read_unlock(&ei->i_es_lock);
902 read_unlock(&ei->i_es_lock);
904 write_lock(&ei->i_es_lock);
905 ret = __es_try_to_reclaim_extents(ei, nr_to_scan);
906 write_unlock(&ei->i_es_lock);
913 list_splice_tail(&scanned, &sbi->s_es_lru);
914 spin_unlock(&sbi->s_es_lru_lock);
916 ret = percpu_counter_read_positive(&sbi->s_extent_cache_cnt);
917 trace_ext4_es_shrink_exit(sbi->s_sb, nr_shrunk, ret);
921 void ext4_es_register_shrinker(struct super_block *sb)
923 struct ext4_sb_info *sbi;
926 INIT_LIST_HEAD(&sbi->s_es_lru);
927 spin_lock_init(&sbi->s_es_lru_lock);
928 sbi->s_es_shrinker.shrink = ext4_es_shrink;
929 sbi->s_es_shrinker.seeks = DEFAULT_SEEKS;
930 register_shrinker(&sbi->s_es_shrinker);
933 void ext4_es_unregister_shrinker(struct super_block *sb)
935 unregister_shrinker(&EXT4_SB(sb)->s_es_shrinker);
938 void ext4_es_lru_add(struct inode *inode)
940 struct ext4_inode_info *ei = EXT4_I(inode);
941 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
943 spin_lock(&sbi->s_es_lru_lock);
944 if (list_empty(&ei->i_es_lru))
945 list_add_tail(&ei->i_es_lru, &sbi->s_es_lru);
947 list_move_tail(&ei->i_es_lru, &sbi->s_es_lru);
948 spin_unlock(&sbi->s_es_lru_lock);
951 void ext4_es_lru_del(struct inode *inode)
953 struct ext4_inode_info *ei = EXT4_I(inode);
954 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
956 spin_lock(&sbi->s_es_lru_lock);
957 if (!list_empty(&ei->i_es_lru))
958 list_del_init(&ei->i_es_lru);
959 spin_unlock(&sbi->s_es_lru_lock);
962 static int __es_try_to_reclaim_extents(struct ext4_inode_info *ei,
965 struct inode *inode = &ei->vfs_inode;
966 struct ext4_es_tree *tree = &ei->i_es_tree;
967 struct rb_node *node;
968 struct extent_status *es;
971 if (ei->i_es_lru_nr == 0)
974 node = rb_first(&tree->root);
975 while (node != NULL) {
976 es = rb_entry(node, struct extent_status, rb_node);
977 node = rb_next(&es->rb_node);
979 * We can't reclaim delayed extent from status tree because
980 * fiemap, bigallic, and seek_data/hole need to use it.
982 if (!ext4_es_is_delayed(es)) {
983 rb_erase(&es->rb_node, &tree->root);
984 ext4_es_free_extent(inode, es);
986 if (--nr_to_scan == 0)
990 tree->cache_es = NULL;