2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/version.h>
21 #include <linux/blkdev.h>
22 #include <linux/scatterlist.h>
23 #include <linux/swap.h>
24 #include <linux/radix-tree.h>
25 #include <linux/writeback.h>
26 #include <linux/buffer_head.h> // for block_sync_page
27 #include <linux/workqueue.h>
28 #include <linux/kthread.h>
29 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
30 # include <linux/freezer.h>
32 # include <linux/sched.h>
37 #include "transaction.h"
38 #include "btrfs_inode.h"
40 #include "print-tree.h"
41 #include "async-thread.h"
43 #include "ref-cache.h"
47 static int check_tree_block(struct btrfs_root *root, struct extent_buffer *buf)
49 if (extent_buffer_blocknr(buf) != btrfs_header_blocknr(buf)) {
50 printk(KERN_CRIT "buf blocknr(buf) is %llu, header is %llu\n",
51 (unsigned long long)extent_buffer_blocknr(buf),
52 (unsigned long long)btrfs_header_blocknr(buf));
59 static struct extent_io_ops btree_extent_io_ops;
60 static void end_workqueue_fn(struct btrfs_work *work);
66 struct btrfs_fs_info *info;
69 struct list_head list;
70 struct btrfs_work work;
73 struct async_submit_bio {
76 struct list_head list;
77 extent_submit_bio_hook_t *submit_bio_hook;
80 struct btrfs_work work;
83 struct extent_map *btree_get_extent(struct inode *inode, struct page *page,
84 size_t page_offset, u64 start, u64 len,
87 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
88 struct extent_map *em;
91 spin_lock(&em_tree->lock);
92 em = lookup_extent_mapping(em_tree, start, len);
95 BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
96 spin_unlock(&em_tree->lock);
99 spin_unlock(&em_tree->lock);
101 em = alloc_extent_map(GFP_NOFS);
103 em = ERR_PTR(-ENOMEM);
109 em->bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
111 spin_lock(&em_tree->lock);
112 ret = add_extent_mapping(em_tree, em);
113 if (ret == -EEXIST) {
114 u64 failed_start = em->start;
115 u64 failed_len = em->len;
117 printk("failed to insert %Lu %Lu -> %Lu into tree\n",
118 em->start, em->len, em->block_start);
120 em = lookup_extent_mapping(em_tree, start, len);
122 printk("after failing, found %Lu %Lu %Lu\n",
123 em->start, em->len, em->block_start);
126 em = lookup_extent_mapping(em_tree, failed_start,
129 printk("double failure lookup gives us "
130 "%Lu %Lu -> %Lu\n", em->start,
131 em->len, em->block_start);
140 spin_unlock(&em_tree->lock);
148 u32 btrfs_csum_data(struct btrfs_root *root, char *data, u32 seed, size_t len)
150 return btrfs_crc32c(seed, data, len);
153 void btrfs_csum_final(u32 crc, char *result)
155 *(__le32 *)result = ~cpu_to_le32(crc);
158 static int csum_tree_block(struct btrfs_root *root, struct extent_buffer *buf,
161 char result[BTRFS_CRC32_SIZE];
163 unsigned long cur_len;
164 unsigned long offset = BTRFS_CSUM_SIZE;
165 char *map_token = NULL;
167 unsigned long map_start;
168 unsigned long map_len;
172 len = buf->len - offset;
174 err = map_private_extent_buffer(buf, offset, 32,
176 &map_start, &map_len, KM_USER0);
178 printk("failed to map extent buffer! %lu\n",
182 cur_len = min(len, map_len - (offset - map_start));
183 crc = btrfs_csum_data(root, kaddr + offset - map_start,
187 unmap_extent_buffer(buf, map_token, KM_USER0);
189 btrfs_csum_final(crc, result);
192 /* FIXME, this is not good */
193 if (memcmp_extent_buffer(buf, result, 0, BTRFS_CRC32_SIZE)) {
196 memcpy(&found, result, BTRFS_CRC32_SIZE);
198 read_extent_buffer(buf, &val, 0, BTRFS_CRC32_SIZE);
199 printk("btrfs: %s checksum verify failed on %llu "
200 "wanted %X found %X level %d\n",
201 root->fs_info->sb->s_id,
202 buf->start, val, found, btrfs_header_level(buf));
206 write_extent_buffer(buf, result, 0, BTRFS_CRC32_SIZE);
211 static int verify_parent_transid(struct extent_io_tree *io_tree,
212 struct extent_buffer *eb, u64 parent_transid)
216 if (!parent_transid || btrfs_header_generation(eb) == parent_transid)
219 lock_extent(io_tree, eb->start, eb->start + eb->len - 1, GFP_NOFS);
220 if (extent_buffer_uptodate(io_tree, eb) &&
221 btrfs_header_generation(eb) == parent_transid) {
225 printk("parent transid verify failed on %llu wanted %llu found %llu\n",
226 (unsigned long long)eb->start,
227 (unsigned long long)parent_transid,
228 (unsigned long long)btrfs_header_generation(eb));
230 clear_extent_buffer_uptodate(io_tree, eb);
232 unlock_extent(io_tree, eb->start, eb->start + eb->len - 1,
238 static int btree_read_extent_buffer_pages(struct btrfs_root *root,
239 struct extent_buffer *eb,
240 u64 start, u64 parent_transid)
242 struct extent_io_tree *io_tree;
247 io_tree = &BTRFS_I(root->fs_info->btree_inode)->io_tree;
249 ret = read_extent_buffer_pages(io_tree, eb, start, 1,
250 btree_get_extent, mirror_num);
252 !verify_parent_transid(io_tree, eb, parent_transid))
254 printk("read extent buffer pages failed with ret %d mirror no %d\n", ret, mirror_num);
255 num_copies = btrfs_num_copies(&root->fs_info->mapping_tree,
261 if (mirror_num > num_copies)
267 int csum_dirty_buffer(struct btrfs_root *root, struct page *page)
269 struct extent_io_tree *tree;
270 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
274 struct extent_buffer *eb;
277 tree = &BTRFS_I(page->mapping->host)->io_tree;
279 if (page->private == EXTENT_PAGE_PRIVATE)
283 len = page->private >> 2;
287 eb = alloc_extent_buffer(tree, start, len, page, GFP_NOFS);
288 ret = btree_read_extent_buffer_pages(root, eb, start + PAGE_CACHE_SIZE,
289 btrfs_header_generation(eb));
291 found_start = btrfs_header_bytenr(eb);
292 if (found_start != start) {
293 printk("warning: eb start incorrect %Lu buffer %Lu len %lu\n",
294 start, found_start, len);
298 if (eb->first_page != page) {
299 printk("bad first page %lu %lu\n", eb->first_page->index,
304 if (!PageUptodate(page)) {
305 printk("csum not up to date page %lu\n", page->index);
309 found_level = btrfs_header_level(eb);
311 csum_tree_block(root, eb, 0);
313 free_extent_buffer(eb);
318 int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end,
319 struct extent_state *state)
321 struct extent_io_tree *tree;
325 struct extent_buffer *eb;
326 struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
329 tree = &BTRFS_I(page->mapping->host)->io_tree;
330 if (page->private == EXTENT_PAGE_PRIVATE)
334 len = page->private >> 2;
338 eb = alloc_extent_buffer(tree, start, len, page, GFP_NOFS);
340 found_start = btrfs_header_bytenr(eb);
341 if (found_start != start) {
342 printk("bad tree block start %llu %llu\n",
343 (unsigned long long)found_start,
344 (unsigned long long)eb->start);
348 if (eb->first_page != page) {
349 printk("bad first page %lu %lu\n", eb->first_page->index,
355 if (memcmp_extent_buffer(eb, root->fs_info->fsid,
356 (unsigned long)btrfs_header_fsid(eb),
358 printk("bad fsid on block %Lu\n", eb->start);
362 found_level = btrfs_header_level(eb);
364 ret = csum_tree_block(root, eb, 1);
368 end = min_t(u64, eb->len, PAGE_CACHE_SIZE);
369 end = eb->start + end - 1;
371 free_extent_buffer(eb);
376 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
377 static void end_workqueue_bio(struct bio *bio, int err)
379 static int end_workqueue_bio(struct bio *bio,
380 unsigned int bytes_done, int err)
383 struct end_io_wq *end_io_wq = bio->bi_private;
384 struct btrfs_fs_info *fs_info;
386 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
391 fs_info = end_io_wq->info;
392 end_io_wq->error = err;
393 end_io_wq->work.func = end_workqueue_fn;
394 end_io_wq->work.flags = 0;
395 if (bio->bi_rw & (1 << BIO_RW))
396 btrfs_queue_worker(&fs_info->endio_write_workers,
399 btrfs_queue_worker(&fs_info->endio_workers, &end_io_wq->work);
401 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
406 int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
409 struct end_io_wq *end_io_wq;
410 end_io_wq = kmalloc(sizeof(*end_io_wq), GFP_NOFS);
414 end_io_wq->private = bio->bi_private;
415 end_io_wq->end_io = bio->bi_end_io;
416 end_io_wq->info = info;
417 end_io_wq->error = 0;
418 end_io_wq->bio = bio;
419 end_io_wq->metadata = metadata;
421 bio->bi_private = end_io_wq;
422 bio->bi_end_io = end_workqueue_bio;
426 unsigned long btrfs_async_submit_limit(struct btrfs_fs_info *info)
428 unsigned long limit = min_t(unsigned long,
429 info->workers.max_workers,
430 info->fs_devices->open_devices);
434 int btrfs_congested_async(struct btrfs_fs_info *info, int iodone)
436 return atomic_read(&info->nr_async_bios) >
437 btrfs_async_submit_limit(info);
440 static void run_one_async_submit(struct btrfs_work *work)
442 struct btrfs_fs_info *fs_info;
443 struct async_submit_bio *async;
446 async = container_of(work, struct async_submit_bio, work);
447 fs_info = BTRFS_I(async->inode)->root->fs_info;
449 limit = btrfs_async_submit_limit(fs_info);
450 limit = limit * 2 / 3;
452 atomic_dec(&fs_info->nr_async_submits);
454 if (atomic_read(&fs_info->nr_async_submits) < limit &&
455 waitqueue_active(&fs_info->async_submit_wait))
456 wake_up(&fs_info->async_submit_wait);
458 async->submit_bio_hook(async->inode, async->rw, async->bio,
463 int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
464 int rw, struct bio *bio, int mirror_num,
465 extent_submit_bio_hook_t *submit_bio_hook)
467 struct async_submit_bio *async;
468 int limit = btrfs_async_submit_limit(fs_info);
470 async = kmalloc(sizeof(*async), GFP_NOFS);
474 async->inode = inode;
477 async->mirror_num = mirror_num;
478 async->submit_bio_hook = submit_bio_hook;
479 async->work.func = run_one_async_submit;
480 async->work.flags = 0;
481 atomic_inc(&fs_info->nr_async_submits);
482 btrfs_queue_worker(&fs_info->workers, &async->work);
484 if (atomic_read(&fs_info->nr_async_submits) > limit) {
485 wait_event_timeout(fs_info->async_submit_wait,
486 (atomic_read(&fs_info->nr_async_submits) < limit),
489 wait_event_timeout(fs_info->async_submit_wait,
490 (atomic_read(&fs_info->nr_async_bios) < limit),
496 static int btree_csum_one_bio(struct bio *bio)
498 struct bio_vec *bvec = bio->bi_io_vec;
500 struct btrfs_root *root;
502 WARN_ON(bio->bi_vcnt <= 0);
503 while(bio_index < bio->bi_vcnt) {
504 root = BTRFS_I(bvec->bv_page->mapping->host)->root;
505 csum_dirty_buffer(root, bvec->bv_page);
512 static int __btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
515 struct btrfs_root *root = BTRFS_I(inode)->root;
519 offset = bio->bi_sector << 9;
522 * when we're called for a write, we're already in the async
523 * submission context. Just jump into btrfs_map_bio
525 if (rw & (1 << BIO_RW)) {
526 btree_csum_one_bio(bio);
527 return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio,
532 * called for a read, do the setup so that checksum validation
533 * can happen in the async kernel threads
535 ret = btrfs_bio_wq_end_io(root->fs_info, bio, 1);
538 return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio, mirror_num, 1);
541 static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
545 * kthread helpers are used to submit writes so that checksumming
546 * can happen in parallel across all CPUs
548 if (!(rw & (1 << BIO_RW))) {
549 return __btree_submit_bio_hook(inode, rw, bio, mirror_num);
551 return btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
552 inode, rw, bio, mirror_num,
553 __btree_submit_bio_hook);
556 static int btree_writepage(struct page *page, struct writeback_control *wbc)
558 struct extent_io_tree *tree;
559 tree = &BTRFS_I(page->mapping->host)->io_tree;
561 if (current->flags & PF_MEMALLOC) {
562 redirty_page_for_writepage(wbc, page);
566 return extent_write_full_page(tree, page, btree_get_extent, wbc);
569 static int btree_writepages(struct address_space *mapping,
570 struct writeback_control *wbc)
572 struct extent_io_tree *tree;
573 tree = &BTRFS_I(mapping->host)->io_tree;
574 if (wbc->sync_mode == WB_SYNC_NONE) {
577 unsigned long thresh = 8 * 1024 * 1024;
579 if (wbc->for_kupdate)
582 num_dirty = count_range_bits(tree, &start, (u64)-1,
583 thresh, EXTENT_DIRTY);
584 if (num_dirty < thresh) {
588 return extent_writepages(tree, mapping, btree_get_extent, wbc);
591 int btree_readpage(struct file *file, struct page *page)
593 struct extent_io_tree *tree;
594 tree = &BTRFS_I(page->mapping->host)->io_tree;
595 return extent_read_full_page(tree, page, btree_get_extent);
598 static int btree_releasepage(struct page *page, gfp_t gfp_flags)
600 struct extent_io_tree *tree;
601 struct extent_map_tree *map;
604 if (PageWriteback(page) || PageDirty(page))
607 tree = &BTRFS_I(page->mapping->host)->io_tree;
608 map = &BTRFS_I(page->mapping->host)->extent_tree;
610 ret = try_release_extent_state(map, tree, page, gfp_flags);
615 ret = try_release_extent_buffer(tree, page);
617 ClearPagePrivate(page);
618 set_page_private(page, 0);
619 page_cache_release(page);
625 static void btree_invalidatepage(struct page *page, unsigned long offset)
627 struct extent_io_tree *tree;
628 tree = &BTRFS_I(page->mapping->host)->io_tree;
629 extent_invalidatepage(tree, page, offset);
630 btree_releasepage(page, GFP_NOFS);
631 if (PagePrivate(page)) {
632 printk("warning page private not zero on page %Lu\n",
634 ClearPagePrivate(page);
635 set_page_private(page, 0);
636 page_cache_release(page);
641 static int btree_writepage(struct page *page, struct writeback_control *wbc)
643 struct buffer_head *bh;
644 struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
645 struct buffer_head *head;
646 if (!page_has_buffers(page)) {
647 create_empty_buffers(page, root->fs_info->sb->s_blocksize,
648 (1 << BH_Dirty)|(1 << BH_Uptodate));
650 head = page_buffers(page);
653 if (buffer_dirty(bh))
654 csum_tree_block(root, bh, 0);
655 bh = bh->b_this_page;
656 } while (bh != head);
657 return block_write_full_page(page, btree_get_block, wbc);
661 static struct address_space_operations btree_aops = {
662 .readpage = btree_readpage,
663 .writepage = btree_writepage,
664 .writepages = btree_writepages,
665 .releasepage = btree_releasepage,
666 .invalidatepage = btree_invalidatepage,
667 .sync_page = block_sync_page,
670 int readahead_tree_block(struct btrfs_root *root, u64 bytenr, u32 blocksize,
673 struct extent_buffer *buf = NULL;
674 struct inode *btree_inode = root->fs_info->btree_inode;
677 buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
680 read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree,
681 buf, 0, 0, btree_get_extent, 0);
682 free_extent_buffer(buf);
686 struct extent_buffer *btrfs_find_tree_block(struct btrfs_root *root,
687 u64 bytenr, u32 blocksize)
689 struct inode *btree_inode = root->fs_info->btree_inode;
690 struct extent_buffer *eb;
691 eb = find_extent_buffer(&BTRFS_I(btree_inode)->io_tree,
692 bytenr, blocksize, GFP_NOFS);
696 struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root,
697 u64 bytenr, u32 blocksize)
699 struct inode *btree_inode = root->fs_info->btree_inode;
700 struct extent_buffer *eb;
702 eb = alloc_extent_buffer(&BTRFS_I(btree_inode)->io_tree,
703 bytenr, blocksize, NULL, GFP_NOFS);
708 int btrfs_write_tree_block(struct extent_buffer *buf)
710 return btrfs_fdatawrite_range(buf->first_page->mapping, buf->start,
711 buf->start + buf->len - 1, WB_SYNC_NONE);
714 int btrfs_wait_tree_block_writeback(struct extent_buffer *buf)
716 return btrfs_wait_on_page_writeback_range(buf->first_page->mapping,
717 buf->start, buf->start + buf->len -1);
720 struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr,
721 u32 blocksize, u64 parent_transid)
723 struct extent_buffer *buf = NULL;
724 struct inode *btree_inode = root->fs_info->btree_inode;
725 struct extent_io_tree *io_tree;
728 io_tree = &BTRFS_I(btree_inode)->io_tree;
730 buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
734 ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
737 buf->flags |= EXTENT_UPTODATE;
745 int clean_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root,
746 struct extent_buffer *buf)
748 struct inode *btree_inode = root->fs_info->btree_inode;
749 if (btrfs_header_generation(buf) ==
750 root->fs_info->running_transaction->transid) {
751 WARN_ON(!btrfs_tree_locked(buf));
752 clear_extent_buffer_dirty(&BTRFS_I(btree_inode)->io_tree,
758 static int __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize,
759 u32 stripesize, struct btrfs_root *root,
760 struct btrfs_fs_info *fs_info,
765 root->commit_root = NULL;
766 root->ref_tree = NULL;
767 root->sectorsize = sectorsize;
768 root->nodesize = nodesize;
769 root->leafsize = leafsize;
770 root->stripesize = stripesize;
772 root->track_dirty = 0;
774 root->fs_info = fs_info;
775 root->objectid = objectid;
776 root->last_trans = 0;
777 root->highest_inode = 0;
778 root->last_inode_alloc = 0;
782 INIT_LIST_HEAD(&root->dirty_list);
783 INIT_LIST_HEAD(&root->orphan_list);
784 INIT_LIST_HEAD(&root->dead_list);
785 spin_lock_init(&root->node_lock);
786 spin_lock_init(&root->list_lock);
787 mutex_init(&root->objectid_mutex);
788 mutex_init(&root->log_mutex);
789 extent_io_tree_init(&root->dirty_log_pages,
790 fs_info->btree_inode->i_mapping, GFP_NOFS);
792 btrfs_leaf_ref_tree_init(&root->ref_tree_struct);
793 root->ref_tree = &root->ref_tree_struct;
795 memset(&root->root_key, 0, sizeof(root->root_key));
796 memset(&root->root_item, 0, sizeof(root->root_item));
797 memset(&root->defrag_progress, 0, sizeof(root->defrag_progress));
798 memset(&root->root_kobj, 0, sizeof(root->root_kobj));
799 root->defrag_trans_start = fs_info->generation;
800 init_completion(&root->kobj_unregister);
801 root->defrag_running = 0;
802 root->defrag_level = 0;
803 root->root_key.objectid = objectid;
807 static int find_and_setup_root(struct btrfs_root *tree_root,
808 struct btrfs_fs_info *fs_info,
810 struct btrfs_root *root)
815 __setup_root(tree_root->nodesize, tree_root->leafsize,
816 tree_root->sectorsize, tree_root->stripesize,
817 root, fs_info, objectid);
818 ret = btrfs_find_last_root(tree_root, objectid,
819 &root->root_item, &root->root_key);
822 blocksize = btrfs_level_size(root, btrfs_root_level(&root->root_item));
823 root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item),
829 int btrfs_free_log_root_tree(struct btrfs_trans_handle *trans,
830 struct btrfs_fs_info *fs_info)
832 struct extent_buffer *eb;
833 struct btrfs_root *log_root_tree = fs_info->log_root_tree;
842 ret = find_first_extent_bit(&log_root_tree->dirty_log_pages,
843 0, &start, &end, EXTENT_DIRTY);
847 clear_extent_dirty(&log_root_tree->dirty_log_pages,
848 start, end, GFP_NOFS);
850 eb = fs_info->log_root_tree->node;
852 WARN_ON(btrfs_header_level(eb) != 0);
853 WARN_ON(btrfs_header_nritems(eb) != 0);
855 ret = btrfs_free_reserved_extent(fs_info->tree_root,
859 free_extent_buffer(eb);
860 kfree(fs_info->log_root_tree);
861 fs_info->log_root_tree = NULL;
865 int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans,
866 struct btrfs_fs_info *fs_info)
868 struct btrfs_root *root;
869 struct btrfs_root *tree_root = fs_info->tree_root;
871 root = kzalloc(sizeof(*root), GFP_NOFS);
875 __setup_root(tree_root->nodesize, tree_root->leafsize,
876 tree_root->sectorsize, tree_root->stripesize,
877 root, fs_info, BTRFS_TREE_LOG_OBJECTID);
879 root->root_key.objectid = BTRFS_TREE_LOG_OBJECTID;
880 root->root_key.type = BTRFS_ROOT_ITEM_KEY;
881 root->root_key.offset = BTRFS_TREE_LOG_OBJECTID;
884 root->node = btrfs_alloc_free_block(trans, root, root->leafsize,
885 0, BTRFS_TREE_LOG_OBJECTID,
886 trans->transid, 0, 0, 0);
888 btrfs_set_header_nritems(root->node, 0);
889 btrfs_set_header_level(root->node, 0);
890 btrfs_set_header_bytenr(root->node, root->node->start);
891 btrfs_set_header_generation(root->node, trans->transid);
892 btrfs_set_header_owner(root->node, BTRFS_TREE_LOG_OBJECTID);
894 write_extent_buffer(root->node, root->fs_info->fsid,
895 (unsigned long)btrfs_header_fsid(root->node),
897 btrfs_mark_buffer_dirty(root->node);
898 btrfs_tree_unlock(root->node);
899 fs_info->log_root_tree = root;
903 struct btrfs_root *btrfs_read_fs_root_no_radix(struct btrfs_root *tree_root,
904 struct btrfs_key *location)
906 struct btrfs_root *root;
907 struct btrfs_fs_info *fs_info = tree_root->fs_info;
908 struct btrfs_path *path;
909 struct extent_buffer *l;
914 root = kzalloc(sizeof(*root), GFP_NOFS);
916 return ERR_PTR(-ENOMEM);
917 if (location->offset == (u64)-1) {
918 ret = find_and_setup_root(tree_root, fs_info,
919 location->objectid, root);
927 __setup_root(tree_root->nodesize, tree_root->leafsize,
928 tree_root->sectorsize, tree_root->stripesize,
929 root, fs_info, location->objectid);
931 path = btrfs_alloc_path();
933 ret = btrfs_search_slot(NULL, tree_root, location, path, 0, 0);
940 read_extent_buffer(l, &root->root_item,
941 btrfs_item_ptr_offset(l, path->slots[0]),
942 sizeof(root->root_item));
943 memcpy(&root->root_key, location, sizeof(*location));
946 btrfs_release_path(root, path);
947 btrfs_free_path(path);
952 blocksize = btrfs_level_size(root, btrfs_root_level(&root->root_item));
953 root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item),
957 if (location->objectid != BTRFS_TREE_LOG_OBJECTID) {
959 ret = btrfs_find_highest_inode(root, &highest_inode);
961 root->highest_inode = highest_inode;
962 root->last_inode_alloc = highest_inode;
968 struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info,
971 struct btrfs_root *root;
973 if (root_objectid == BTRFS_ROOT_TREE_OBJECTID)
974 return fs_info->tree_root;
975 if (root_objectid == BTRFS_EXTENT_TREE_OBJECTID)
976 return fs_info->extent_root;
978 root = radix_tree_lookup(&fs_info->fs_roots_radix,
979 (unsigned long)root_objectid);
983 struct btrfs_root *btrfs_read_fs_root_no_name(struct btrfs_fs_info *fs_info,
984 struct btrfs_key *location)
986 struct btrfs_root *root;
989 if (location->objectid == BTRFS_ROOT_TREE_OBJECTID)
990 return fs_info->tree_root;
991 if (location->objectid == BTRFS_EXTENT_TREE_OBJECTID)
992 return fs_info->extent_root;
993 if (location->objectid == BTRFS_CHUNK_TREE_OBJECTID)
994 return fs_info->chunk_root;
995 if (location->objectid == BTRFS_DEV_TREE_OBJECTID)
996 return fs_info->dev_root;
998 root = radix_tree_lookup(&fs_info->fs_roots_radix,
999 (unsigned long)location->objectid);
1003 root = btrfs_read_fs_root_no_radix(fs_info->tree_root, location);
1006 ret = radix_tree_insert(&fs_info->fs_roots_radix,
1007 (unsigned long)root->root_key.objectid,
1010 free_extent_buffer(root->node);
1012 return ERR_PTR(ret);
1014 ret = btrfs_find_dead_roots(fs_info->tree_root,
1015 root->root_key.objectid, root);
1021 struct btrfs_root *btrfs_read_fs_root(struct btrfs_fs_info *fs_info,
1022 struct btrfs_key *location,
1023 const char *name, int namelen)
1025 struct btrfs_root *root;
1028 root = btrfs_read_fs_root_no_name(fs_info, location);
1035 ret = btrfs_set_root_name(root, name, namelen);
1037 free_extent_buffer(root->node);
1039 return ERR_PTR(ret);
1042 ret = btrfs_sysfs_add_root(root);
1044 free_extent_buffer(root->node);
1047 return ERR_PTR(ret);
1053 static int add_hasher(struct btrfs_fs_info *info, char *type) {
1054 struct btrfs_hasher *hasher;
1056 hasher = kmalloc(sizeof(*hasher), GFP_NOFS);
1059 hasher->hash_tfm = crypto_alloc_hash(type, 0, CRYPTO_ALG_ASYNC);
1060 if (!hasher->hash_tfm) {
1064 spin_lock(&info->hash_lock);
1065 list_add(&hasher->list, &info->hashers);
1066 spin_unlock(&info->hash_lock);
1071 static int btrfs_congested_fn(void *congested_data, int bdi_bits)
1073 struct btrfs_fs_info *info = (struct btrfs_fs_info *)congested_data;
1075 struct list_head *cur;
1076 struct btrfs_device *device;
1077 struct backing_dev_info *bdi;
1079 if ((bdi_bits & (1 << BDI_write_congested)) &&
1080 btrfs_congested_async(info, 0))
1083 list_for_each(cur, &info->fs_devices->devices) {
1084 device = list_entry(cur, struct btrfs_device, dev_list);
1087 bdi = blk_get_backing_dev_info(device->bdev);
1088 if (bdi && bdi_congested(bdi, bdi_bits)) {
1097 * this unplugs every device on the box, and it is only used when page
1100 static void __unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
1102 struct list_head *cur;
1103 struct btrfs_device *device;
1104 struct btrfs_fs_info *info;
1106 info = (struct btrfs_fs_info *)bdi->unplug_io_data;
1107 list_for_each(cur, &info->fs_devices->devices) {
1108 device = list_entry(cur, struct btrfs_device, dev_list);
1109 bdi = blk_get_backing_dev_info(device->bdev);
1110 if (bdi->unplug_io_fn) {
1111 bdi->unplug_io_fn(bdi, page);
1116 void btrfs_unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
1118 struct inode *inode;
1119 struct extent_map_tree *em_tree;
1120 struct extent_map *em;
1121 struct address_space *mapping;
1124 /* the generic O_DIRECT read code does this */
1126 __unplug_io_fn(bdi, page);
1131 * page->mapping may change at any time. Get a consistent copy
1132 * and use that for everything below
1135 mapping = page->mapping;
1139 inode = mapping->host;
1140 offset = page_offset(page);
1142 em_tree = &BTRFS_I(inode)->extent_tree;
1143 spin_lock(&em_tree->lock);
1144 em = lookup_extent_mapping(em_tree, offset, PAGE_CACHE_SIZE);
1145 spin_unlock(&em_tree->lock);
1147 __unplug_io_fn(bdi, page);
1151 if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
1152 free_extent_map(em);
1153 __unplug_io_fn(bdi, page);
1156 offset = offset - em->start;
1157 btrfs_unplug_page(&BTRFS_I(inode)->root->fs_info->mapping_tree,
1158 em->block_start + offset, page);
1159 free_extent_map(em);
1162 static int setup_bdi(struct btrfs_fs_info *info, struct backing_dev_info *bdi)
1164 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1167 bdi->ra_pages = default_backing_dev_info.ra_pages;
1169 bdi->capabilities = default_backing_dev_info.capabilities;
1170 bdi->unplug_io_fn = btrfs_unplug_io_fn;
1171 bdi->unplug_io_data = info;
1172 bdi->congested_fn = btrfs_congested_fn;
1173 bdi->congested_data = info;
1177 static int bio_ready_for_csum(struct bio *bio)
1183 struct extent_io_tree *io_tree = NULL;
1184 struct btrfs_fs_info *info = NULL;
1185 struct bio_vec *bvec;
1189 bio_for_each_segment(bvec, bio, i) {
1190 page = bvec->bv_page;
1191 if (page->private == EXTENT_PAGE_PRIVATE) {
1192 length += bvec->bv_len;
1195 if (!page->private) {
1196 length += bvec->bv_len;
1199 length = bvec->bv_len;
1200 buf_len = page->private >> 2;
1201 start = page_offset(page) + bvec->bv_offset;
1202 io_tree = &BTRFS_I(page->mapping->host)->io_tree;
1203 info = BTRFS_I(page->mapping->host)->root->fs_info;
1205 /* are we fully contained in this bio? */
1206 if (buf_len <= length)
1209 ret = extent_range_uptodate(io_tree, start + length,
1210 start + buf_len - 1);
1217 * called by the kthread helper functions to finally call the bio end_io
1218 * functions. This is where read checksum verification actually happens
1220 static void end_workqueue_fn(struct btrfs_work *work)
1223 struct end_io_wq *end_io_wq;
1224 struct btrfs_fs_info *fs_info;
1227 end_io_wq = container_of(work, struct end_io_wq, work);
1228 bio = end_io_wq->bio;
1229 fs_info = end_io_wq->info;
1231 /* metadata bios are special because the whole tree block must
1232 * be checksummed at once. This makes sure the entire block is in
1233 * ram and up to date before trying to verify things. For
1234 * blocksize <= pagesize, it is basically a noop
1236 if (end_io_wq->metadata && !bio_ready_for_csum(bio)) {
1237 btrfs_queue_worker(&fs_info->endio_workers,
1241 error = end_io_wq->error;
1242 bio->bi_private = end_io_wq->private;
1243 bio->bi_end_io = end_io_wq->end_io;
1245 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1246 bio_endio(bio, bio->bi_size, error);
1248 bio_endio(bio, error);
1252 static int cleaner_kthread(void *arg)
1254 struct btrfs_root *root = arg;
1258 if (root->fs_info->closing)
1261 vfs_check_frozen(root->fs_info->sb, SB_FREEZE_WRITE);
1262 mutex_lock(&root->fs_info->cleaner_mutex);
1263 btrfs_clean_old_snapshots(root);
1264 mutex_unlock(&root->fs_info->cleaner_mutex);
1266 if (freezing(current)) {
1270 if (root->fs_info->closing)
1272 set_current_state(TASK_INTERRUPTIBLE);
1274 __set_current_state(TASK_RUNNING);
1276 } while (!kthread_should_stop());
1280 static int transaction_kthread(void *arg)
1282 struct btrfs_root *root = arg;
1283 struct btrfs_trans_handle *trans;
1284 struct btrfs_transaction *cur;
1286 unsigned long delay;
1291 if (root->fs_info->closing)
1295 vfs_check_frozen(root->fs_info->sb, SB_FREEZE_WRITE);
1296 mutex_lock(&root->fs_info->transaction_kthread_mutex);
1298 if (root->fs_info->total_ref_cache_size > 20 * 1024 * 1024) {
1299 printk("btrfs: total reference cache size %Lu\n",
1300 root->fs_info->total_ref_cache_size);
1303 mutex_lock(&root->fs_info->trans_mutex);
1304 cur = root->fs_info->running_transaction;
1306 mutex_unlock(&root->fs_info->trans_mutex);
1310 now = get_seconds();
1311 if (now < cur->start_time || now - cur->start_time < 30) {
1312 mutex_unlock(&root->fs_info->trans_mutex);
1316 mutex_unlock(&root->fs_info->trans_mutex);
1317 trans = btrfs_start_transaction(root, 1);
1318 ret = btrfs_commit_transaction(trans, root);
1320 wake_up_process(root->fs_info->cleaner_kthread);
1321 mutex_unlock(&root->fs_info->transaction_kthread_mutex);
1323 if (freezing(current)) {
1326 if (root->fs_info->closing)
1328 set_current_state(TASK_INTERRUPTIBLE);
1329 schedule_timeout(delay);
1330 __set_current_state(TASK_RUNNING);
1332 } while (!kthread_should_stop());
1336 struct btrfs_root *open_ctree(struct super_block *sb,
1337 struct btrfs_fs_devices *fs_devices,
1345 struct buffer_head *bh;
1346 struct btrfs_root *extent_root = kzalloc(sizeof(struct btrfs_root),
1348 struct btrfs_root *tree_root = kzalloc(sizeof(struct btrfs_root),
1350 struct btrfs_fs_info *fs_info = kzalloc(sizeof(*fs_info),
1352 struct btrfs_root *chunk_root = kzalloc(sizeof(struct btrfs_root),
1354 struct btrfs_root *dev_root = kzalloc(sizeof(struct btrfs_root),
1356 struct btrfs_root *log_tree_root;
1361 struct btrfs_super_block *disk_super;
1363 if (!extent_root || !tree_root || !fs_info) {
1367 INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_NOFS);
1368 INIT_LIST_HEAD(&fs_info->trans_list);
1369 INIT_LIST_HEAD(&fs_info->dead_roots);
1370 INIT_LIST_HEAD(&fs_info->hashers);
1371 INIT_LIST_HEAD(&fs_info->delalloc_inodes);
1372 spin_lock_init(&fs_info->hash_lock);
1373 spin_lock_init(&fs_info->delalloc_lock);
1374 spin_lock_init(&fs_info->new_trans_lock);
1375 spin_lock_init(&fs_info->ref_cache_lock);
1377 init_completion(&fs_info->kobj_unregister);
1378 fs_info->tree_root = tree_root;
1379 fs_info->extent_root = extent_root;
1380 fs_info->chunk_root = chunk_root;
1381 fs_info->dev_root = dev_root;
1382 fs_info->fs_devices = fs_devices;
1383 INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots);
1384 INIT_LIST_HEAD(&fs_info->space_info);
1385 btrfs_mapping_init(&fs_info->mapping_tree);
1386 atomic_set(&fs_info->nr_async_submits, 0);
1387 atomic_set(&fs_info->nr_async_bios, 0);
1388 atomic_set(&fs_info->throttles, 0);
1389 atomic_set(&fs_info->throttle_gen, 0);
1391 fs_info->max_extent = (u64)-1;
1392 fs_info->max_inline = 8192 * 1024;
1393 setup_bdi(fs_info, &fs_info->bdi);
1394 fs_info->btree_inode = new_inode(sb);
1395 fs_info->btree_inode->i_ino = 1;
1396 fs_info->btree_inode->i_nlink = 1;
1397 fs_info->thread_pool_size = min(num_online_cpus() + 2, 8);
1399 INIT_LIST_HEAD(&fs_info->ordered_extents);
1400 spin_lock_init(&fs_info->ordered_extent_lock);
1402 sb->s_blocksize = 4096;
1403 sb->s_blocksize_bits = blksize_bits(4096);
1406 * we set the i_size on the btree inode to the max possible int.
1407 * the real end of the address space is determined by all of
1408 * the devices in the system
1410 fs_info->btree_inode->i_size = OFFSET_MAX;
1411 fs_info->btree_inode->i_mapping->a_ops = &btree_aops;
1412 fs_info->btree_inode->i_mapping->backing_dev_info = &fs_info->bdi;
1414 extent_io_tree_init(&BTRFS_I(fs_info->btree_inode)->io_tree,
1415 fs_info->btree_inode->i_mapping,
1417 extent_map_tree_init(&BTRFS_I(fs_info->btree_inode)->extent_tree,
1420 BTRFS_I(fs_info->btree_inode)->io_tree.ops = &btree_extent_io_ops;
1422 spin_lock_init(&fs_info->block_group_cache_lock);
1423 fs_info->block_group_cache_tree.rb_node = NULL;
1425 extent_io_tree_init(&fs_info->pinned_extents,
1426 fs_info->btree_inode->i_mapping, GFP_NOFS);
1427 extent_io_tree_init(&fs_info->pending_del,
1428 fs_info->btree_inode->i_mapping, GFP_NOFS);
1429 extent_io_tree_init(&fs_info->extent_ins,
1430 fs_info->btree_inode->i_mapping, GFP_NOFS);
1431 fs_info->do_barriers = 1;
1433 BTRFS_I(fs_info->btree_inode)->root = tree_root;
1434 memset(&BTRFS_I(fs_info->btree_inode)->location, 0,
1435 sizeof(struct btrfs_key));
1436 insert_inode_hash(fs_info->btree_inode);
1438 mutex_init(&fs_info->trans_mutex);
1439 mutex_init(&fs_info->tree_log_mutex);
1440 mutex_init(&fs_info->drop_mutex);
1441 mutex_init(&fs_info->alloc_mutex);
1442 mutex_init(&fs_info->chunk_mutex);
1443 mutex_init(&fs_info->transaction_kthread_mutex);
1444 mutex_init(&fs_info->cleaner_mutex);
1445 mutex_init(&fs_info->volume_mutex);
1446 init_waitqueue_head(&fs_info->transaction_throttle);
1447 init_waitqueue_head(&fs_info->transaction_wait);
1448 init_waitqueue_head(&fs_info->async_submit_wait);
1449 init_waitqueue_head(&fs_info->tree_log_wait);
1450 atomic_set(&fs_info->tree_log_commit, 0);
1451 atomic_set(&fs_info->tree_log_writers, 0);
1452 fs_info->tree_log_transid = 0;
1455 ret = add_hasher(fs_info, "crc32c");
1457 printk("btrfs: failed hash setup, modprobe cryptomgr?\n");
1462 __setup_root(4096, 4096, 4096, 4096, tree_root,
1463 fs_info, BTRFS_ROOT_TREE_OBJECTID);
1466 bh = __bread(fs_devices->latest_bdev,
1467 BTRFS_SUPER_INFO_OFFSET / 4096, 4096);
1471 memcpy(&fs_info->super_copy, bh->b_data, sizeof(fs_info->super_copy));
1474 memcpy(fs_info->fsid, fs_info->super_copy.fsid, BTRFS_FSID_SIZE);
1476 disk_super = &fs_info->super_copy;
1477 if (!btrfs_super_root(disk_super))
1478 goto fail_sb_buffer;
1480 err = btrfs_parse_options(tree_root, options);
1482 goto fail_sb_buffer;
1485 * we need to start all the end_io workers up front because the
1486 * queue work function gets called at interrupt time, and so it
1487 * cannot dynamically grow.
1489 btrfs_init_workers(&fs_info->workers, "worker",
1490 fs_info->thread_pool_size);
1491 btrfs_init_workers(&fs_info->submit_workers, "submit",
1492 min_t(u64, fs_devices->num_devices,
1493 fs_info->thread_pool_size));
1495 /* a higher idle thresh on the submit workers makes it much more
1496 * likely that bios will be send down in a sane order to the
1499 fs_info->submit_workers.idle_thresh = 64;
1501 /* fs_info->workers is responsible for checksumming file data
1502 * blocks and metadata. Using a larger idle thresh allows each
1503 * worker thread to operate on things in roughly the order they
1504 * were sent by the writeback daemons, improving overall locality
1505 * of the IO going down the pipe.
1507 fs_info->workers.idle_thresh = 128;
1509 btrfs_init_workers(&fs_info->fixup_workers, "fixup", 1);
1510 btrfs_init_workers(&fs_info->endio_workers, "endio",
1511 fs_info->thread_pool_size);
1512 btrfs_init_workers(&fs_info->endio_write_workers, "endio-write",
1513 fs_info->thread_pool_size);
1516 * endios are largely parallel and should have a very
1519 fs_info->endio_workers.idle_thresh = 4;
1520 fs_info->endio_write_workers.idle_thresh = 64;
1522 btrfs_start_workers(&fs_info->workers, 1);
1523 btrfs_start_workers(&fs_info->submit_workers, 1);
1524 btrfs_start_workers(&fs_info->fixup_workers, 1);
1525 btrfs_start_workers(&fs_info->endio_workers, fs_info->thread_pool_size);
1526 btrfs_start_workers(&fs_info->endio_write_workers,
1527 fs_info->thread_pool_size);
1530 if (btrfs_super_num_devices(disk_super) > fs_devices->open_devices) {
1531 printk("Btrfs: wanted %llu devices, but found %llu\n",
1532 (unsigned long long)btrfs_super_num_devices(disk_super),
1533 (unsigned long long)fs_devices->open_devices);
1534 if (btrfs_test_opt(tree_root, DEGRADED))
1535 printk("continuing in degraded mode\n");
1537 goto fail_sb_buffer;
1541 fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super);
1543 nodesize = btrfs_super_nodesize(disk_super);
1544 leafsize = btrfs_super_leafsize(disk_super);
1545 sectorsize = btrfs_super_sectorsize(disk_super);
1546 stripesize = btrfs_super_stripesize(disk_super);
1547 tree_root->nodesize = nodesize;
1548 tree_root->leafsize = leafsize;
1549 tree_root->sectorsize = sectorsize;
1550 tree_root->stripesize = stripesize;
1552 sb->s_blocksize = sectorsize;
1553 sb->s_blocksize_bits = blksize_bits(sectorsize);
1555 if (strncmp((char *)(&disk_super->magic), BTRFS_MAGIC,
1556 sizeof(disk_super->magic))) {
1557 printk("btrfs: valid FS not found on %s\n", sb->s_id);
1558 goto fail_sb_buffer;
1561 mutex_lock(&fs_info->chunk_mutex);
1562 ret = btrfs_read_sys_array(tree_root);
1563 mutex_unlock(&fs_info->chunk_mutex);
1565 printk("btrfs: failed to read the system array on %s\n",
1567 goto fail_sys_array;
1570 blocksize = btrfs_level_size(tree_root,
1571 btrfs_super_chunk_root_level(disk_super));
1573 __setup_root(nodesize, leafsize, sectorsize, stripesize,
1574 chunk_root, fs_info, BTRFS_CHUNK_TREE_OBJECTID);
1576 chunk_root->node = read_tree_block(chunk_root,
1577 btrfs_super_chunk_root(disk_super),
1579 BUG_ON(!chunk_root->node);
1581 read_extent_buffer(chunk_root->node, fs_info->chunk_tree_uuid,
1582 (unsigned long)btrfs_header_chunk_tree_uuid(chunk_root->node),
1585 mutex_lock(&fs_info->chunk_mutex);
1586 ret = btrfs_read_chunk_tree(chunk_root);
1587 mutex_unlock(&fs_info->chunk_mutex);
1590 btrfs_close_extra_devices(fs_devices);
1592 blocksize = btrfs_level_size(tree_root,
1593 btrfs_super_root_level(disk_super));
1596 tree_root->node = read_tree_block(tree_root,
1597 btrfs_super_root(disk_super),
1599 if (!tree_root->node)
1600 goto fail_sb_buffer;
1603 ret = find_and_setup_root(tree_root, fs_info,
1604 BTRFS_EXTENT_TREE_OBJECTID, extent_root);
1606 goto fail_tree_root;
1607 extent_root->track_dirty = 1;
1609 ret = find_and_setup_root(tree_root, fs_info,
1610 BTRFS_DEV_TREE_OBJECTID, dev_root);
1611 dev_root->track_dirty = 1;
1614 goto fail_extent_root;
1616 btrfs_read_block_groups(extent_root);
1618 fs_info->generation = btrfs_super_generation(disk_super) + 1;
1619 fs_info->data_alloc_profile = (u64)-1;
1620 fs_info->metadata_alloc_profile = (u64)-1;
1621 fs_info->system_alloc_profile = fs_info->metadata_alloc_profile;
1622 fs_info->cleaner_kthread = kthread_run(cleaner_kthread, tree_root,
1624 if (!fs_info->cleaner_kthread)
1625 goto fail_extent_root;
1627 fs_info->transaction_kthread = kthread_run(transaction_kthread,
1629 "btrfs-transaction");
1630 if (!fs_info->transaction_kthread)
1633 if (btrfs_super_log_root(disk_super) != 0) {
1635 u64 bytenr = btrfs_super_log_root(disk_super);
1638 btrfs_level_size(tree_root,
1639 btrfs_super_log_root_level(disk_super));
1641 log_tree_root = kzalloc(sizeof(struct btrfs_root),
1644 __setup_root(nodesize, leafsize, sectorsize, stripesize,
1645 log_tree_root, fs_info, BTRFS_TREE_LOG_OBJECTID);
1647 log_tree_root->node = read_tree_block(tree_root, bytenr,
1649 ret = btrfs_recover_log_trees(log_tree_root);
1652 fs_info->last_trans_committed = btrfs_super_generation(disk_super);
1656 kthread_stop(fs_info->cleaner_kthread);
1658 free_extent_buffer(extent_root->node);
1660 free_extent_buffer(tree_root->node);
1663 btrfs_stop_workers(&fs_info->fixup_workers);
1664 btrfs_stop_workers(&fs_info->workers);
1665 btrfs_stop_workers(&fs_info->endio_workers);
1666 btrfs_stop_workers(&fs_info->endio_write_workers);
1667 btrfs_stop_workers(&fs_info->submit_workers);
1669 iput(fs_info->btree_inode);
1671 btrfs_close_devices(fs_info->fs_devices);
1672 btrfs_mapping_tree_free(&fs_info->mapping_tree);
1676 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1677 bdi_destroy(&fs_info->bdi);
1680 return ERR_PTR(err);
1683 static void btrfs_end_buffer_write_sync(struct buffer_head *bh, int uptodate)
1685 char b[BDEVNAME_SIZE];
1688 set_buffer_uptodate(bh);
1690 if (!buffer_eopnotsupp(bh) && printk_ratelimit()) {
1691 printk(KERN_WARNING "lost page write due to "
1692 "I/O error on %s\n",
1693 bdevname(bh->b_bdev, b));
1695 /* note, we dont' set_buffer_write_io_error because we have
1696 * our own ways of dealing with the IO errors
1698 clear_buffer_uptodate(bh);
1704 int write_all_supers(struct btrfs_root *root)
1706 struct list_head *cur;
1707 struct list_head *head = &root->fs_info->fs_devices->devices;
1708 struct btrfs_device *dev;
1709 struct btrfs_super_block *sb;
1710 struct btrfs_dev_item *dev_item;
1711 struct buffer_head *bh;
1715 int total_errors = 0;
1719 max_errors = btrfs_super_num_devices(&root->fs_info->super_copy) - 1;
1720 do_barriers = !btrfs_test_opt(root, NOBARRIER);
1722 sb = &root->fs_info->super_for_commit;
1723 dev_item = &sb->dev_item;
1724 list_for_each(cur, head) {
1725 dev = list_entry(cur, struct btrfs_device, dev_list);
1730 if (!dev->in_fs_metadata)
1733 btrfs_set_stack_device_type(dev_item, dev->type);
1734 btrfs_set_stack_device_id(dev_item, dev->devid);
1735 btrfs_set_stack_device_total_bytes(dev_item, dev->total_bytes);
1736 btrfs_set_stack_device_bytes_used(dev_item, dev->bytes_used);
1737 btrfs_set_stack_device_io_align(dev_item, dev->io_align);
1738 btrfs_set_stack_device_io_width(dev_item, dev->io_width);
1739 btrfs_set_stack_device_sector_size(dev_item, dev->sector_size);
1740 memcpy(dev_item->uuid, dev->uuid, BTRFS_UUID_SIZE);
1741 flags = btrfs_super_flags(sb);
1742 btrfs_set_super_flags(sb, flags | BTRFS_HEADER_FLAG_WRITTEN);
1746 crc = btrfs_csum_data(root, (char *)sb + BTRFS_CSUM_SIZE, crc,
1747 BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE);
1748 btrfs_csum_final(crc, sb->csum);
1750 bh = __getblk(dev->bdev, BTRFS_SUPER_INFO_OFFSET / 4096,
1751 BTRFS_SUPER_INFO_SIZE);
1753 memcpy(bh->b_data, sb, BTRFS_SUPER_INFO_SIZE);
1754 dev->pending_io = bh;
1757 set_buffer_uptodate(bh);
1759 bh->b_end_io = btrfs_end_buffer_write_sync;
1761 if (do_barriers && dev->barriers) {
1762 ret = submit_bh(WRITE_BARRIER, bh);
1763 if (ret == -EOPNOTSUPP) {
1764 printk("btrfs: disabling barriers on dev %s\n",
1766 set_buffer_uptodate(bh);
1770 ret = submit_bh(WRITE, bh);
1773 ret = submit_bh(WRITE, bh);
1778 if (total_errors > max_errors) {
1779 printk("btrfs: %d errors while writing supers\n", total_errors);
1784 list_for_each(cur, head) {
1785 dev = list_entry(cur, struct btrfs_device, dev_list);
1788 if (!dev->in_fs_metadata)
1791 BUG_ON(!dev->pending_io);
1792 bh = dev->pending_io;
1794 if (!buffer_uptodate(dev->pending_io)) {
1795 if (do_barriers && dev->barriers) {
1796 printk("btrfs: disabling barriers on dev %s\n",
1798 set_buffer_uptodate(bh);
1802 ret = submit_bh(WRITE, bh);
1805 if (!buffer_uptodate(bh))
1812 dev->pending_io = NULL;
1815 if (total_errors > max_errors) {
1816 printk("btrfs: %d errors while writing supers\n", total_errors);
1822 int write_ctree_super(struct btrfs_trans_handle *trans, struct btrfs_root
1827 ret = write_all_supers(root);
1831 int btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root)
1833 radix_tree_delete(&fs_info->fs_roots_radix,
1834 (unsigned long)root->root_key.objectid);
1836 btrfs_sysfs_del_root(root);
1840 free_extent_buffer(root->node);
1841 if (root->commit_root)
1842 free_extent_buffer(root->commit_root);
1849 static int del_fs_roots(struct btrfs_fs_info *fs_info)
1852 struct btrfs_root *gang[8];
1856 ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
1861 for (i = 0; i < ret; i++)
1862 btrfs_free_fs_root(fs_info, gang[i]);
1867 int close_ctree(struct btrfs_root *root)
1870 struct btrfs_trans_handle *trans;
1871 struct btrfs_fs_info *fs_info = root->fs_info;
1873 fs_info->closing = 1;
1876 kthread_stop(root->fs_info->transaction_kthread);
1877 kthread_stop(root->fs_info->cleaner_kthread);
1879 btrfs_clean_old_snapshots(root);
1880 trans = btrfs_start_transaction(root, 1);
1881 ret = btrfs_commit_transaction(trans, root);
1882 /* run commit again to drop the original snapshot */
1883 trans = btrfs_start_transaction(root, 1);
1884 btrfs_commit_transaction(trans, root);
1885 ret = btrfs_write_and_wait_transaction(NULL, root);
1888 write_ctree_super(NULL, root);
1890 if (fs_info->delalloc_bytes) {
1891 printk("btrfs: at unmount delalloc count %Lu\n",
1892 fs_info->delalloc_bytes);
1894 if (fs_info->total_ref_cache_size) {
1895 printk("btrfs: at umount reference cache size %Lu\n",
1896 fs_info->total_ref_cache_size);
1899 if (fs_info->extent_root->node)
1900 free_extent_buffer(fs_info->extent_root->node);
1902 if (fs_info->tree_root->node)
1903 free_extent_buffer(fs_info->tree_root->node);
1905 if (root->fs_info->chunk_root->node);
1906 free_extent_buffer(root->fs_info->chunk_root->node);
1908 if (root->fs_info->dev_root->node);
1909 free_extent_buffer(root->fs_info->dev_root->node);
1911 btrfs_free_block_groups(root->fs_info);
1912 fs_info->closing = 2;
1913 del_fs_roots(fs_info);
1915 filemap_write_and_wait(fs_info->btree_inode->i_mapping);
1917 truncate_inode_pages(fs_info->btree_inode->i_mapping, 0);
1919 btrfs_stop_workers(&fs_info->fixup_workers);
1920 btrfs_stop_workers(&fs_info->workers);
1921 btrfs_stop_workers(&fs_info->endio_workers);
1922 btrfs_stop_workers(&fs_info->endio_write_workers);
1923 btrfs_stop_workers(&fs_info->submit_workers);
1925 iput(fs_info->btree_inode);
1927 while(!list_empty(&fs_info->hashers)) {
1928 struct btrfs_hasher *hasher;
1929 hasher = list_entry(fs_info->hashers.next, struct btrfs_hasher,
1931 list_del(&hasher->hashers);
1932 crypto_free_hash(&fs_info->hash_tfm);
1936 btrfs_close_devices(fs_info->fs_devices);
1937 btrfs_mapping_tree_free(&fs_info->mapping_tree);
1939 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1940 bdi_destroy(&fs_info->bdi);
1943 kfree(fs_info->extent_root);
1944 kfree(fs_info->tree_root);
1945 kfree(fs_info->chunk_root);
1946 kfree(fs_info->dev_root);
1950 int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid)
1953 struct inode *btree_inode = buf->first_page->mapping->host;
1955 ret = extent_buffer_uptodate(&BTRFS_I(btree_inode)->io_tree, buf);
1959 ret = verify_parent_transid(&BTRFS_I(btree_inode)->io_tree, buf,
1964 int btrfs_set_buffer_uptodate(struct extent_buffer *buf)
1966 struct inode *btree_inode = buf->first_page->mapping->host;
1967 return set_extent_buffer_uptodate(&BTRFS_I(btree_inode)->io_tree,
1971 void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
1973 struct btrfs_root *root = BTRFS_I(buf->first_page->mapping->host)->root;
1974 u64 transid = btrfs_header_generation(buf);
1975 struct inode *btree_inode = root->fs_info->btree_inode;
1977 WARN_ON(!btrfs_tree_locked(buf));
1978 if (transid != root->fs_info->generation) {
1979 printk(KERN_CRIT "transid mismatch buffer %llu, found %Lu running %Lu\n",
1980 (unsigned long long)buf->start,
1981 transid, root->fs_info->generation);
1984 set_extent_buffer_dirty(&BTRFS_I(btree_inode)->io_tree, buf);
1987 void btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr)
1990 * looks as though older kernels can get into trouble with
1991 * this code, they end up stuck in balance_dirty_pages forever
1993 struct extent_io_tree *tree;
1996 unsigned long thresh = 96 * 1024 * 1024;
1997 tree = &BTRFS_I(root->fs_info->btree_inode)->io_tree;
1999 if (current_is_pdflush() || current->flags & PF_MEMALLOC)
2002 num_dirty = count_range_bits(tree, &start, (u64)-1,
2003 thresh, EXTENT_DIRTY);
2004 if (num_dirty > thresh) {
2005 balance_dirty_pages_ratelimited_nr(
2006 root->fs_info->btree_inode->i_mapping, 1);
2011 int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid)
2013 struct btrfs_root *root = BTRFS_I(buf->first_page->mapping->host)->root;
2015 ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
2017 buf->flags |= EXTENT_UPTODATE;
2022 int btree_lock_page_hook(struct page *page)
2024 struct inode *inode = page->mapping->host;
2025 struct btrfs_root *root = BTRFS_I(inode)->root;
2026 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
2027 struct extent_buffer *eb;
2029 u64 bytenr = page_offset(page);
2031 if (page->private == EXTENT_PAGE_PRIVATE)
2034 len = page->private >> 2;
2035 eb = find_extent_buffer(io_tree, bytenr, len, GFP_NOFS);
2039 btrfs_tree_lock(eb);
2040 spin_lock(&root->fs_info->hash_lock);
2041 btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
2042 spin_unlock(&root->fs_info->hash_lock);
2043 btrfs_tree_unlock(eb);
2044 free_extent_buffer(eb);
2050 static struct extent_io_ops btree_extent_io_ops = {
2051 .write_cache_pages_lock_hook = btree_lock_page_hook,
2052 .readpage_end_io_hook = btree_readpage_end_io_hook,
2053 .submit_bio_hook = btree_submit_bio_hook,
2054 /* note we're sharing with inode.c for the merge bio hook */
2055 .merge_bio_hook = btrfs_merge_bio_hook,