Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux...
[linux-2.6-microblaze.git] / fs / btrfs / disk-io.c
index 7ae51de..e1890b1 100644 (file)
@@ -44,6 +44,7 @@
 #include "free-space-cache.h"
 #include "inode-map.h"
 #include "check-integrity.h"
+#include "rcu-string.h"
 
 static struct extent_io_ops btree_extent_io_ops;
 static void end_workqueue_fn(struct btrfs_work *work);
@@ -2118,7 +2119,7 @@ int open_ctree(struct super_block *sb,
 
        features = btrfs_super_incompat_flags(disk_super);
        features |= BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF;
-       if (tree_root->fs_info->compress_type & BTRFS_COMPRESS_LZO)
+       if (tree_root->fs_info->compress_type == BTRFS_COMPRESS_LZO)
                features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO;
 
        /*
@@ -2575,8 +2576,9 @@ static void btrfs_end_buffer_write_sync(struct buffer_head *bh, int uptodate)
                struct btrfs_device *device = (struct btrfs_device *)
                        bh->b_private;
 
-               printk_ratelimited(KERN_WARNING "lost page write due to "
-                                  "I/O error on %s\n", device->name);
+               printk_ratelimited_in_rcu(KERN_WARNING "lost page write due to "
+                                         "I/O error on %s\n",
+                                         rcu_str_deref(device->name));
                /* note, we dont' set_buffer_write_io_error because we have
                 * our own ways of dealing with the IO errors
                 */
@@ -2749,8 +2751,8 @@ static int write_dev_flush(struct btrfs_device *device, int wait)
                wait_for_completion(&device->flush_wait);
 
                if (bio_flagged(bio, BIO_EOPNOTSUPP)) {
-                       printk("btrfs: disabling barriers on dev %s\n",
-                              device->name);
+                       printk_in_rcu("btrfs: disabling barriers on dev %s\n",
+                                     rcu_str_deref(device->name));
                        device->nobarriers = 1;
                }
                if (!bio_flagged(bio, BIO_UPTODATE)) {
@@ -3400,7 +3402,6 @@ int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
 
        delayed_refs = &trans->delayed_refs;
 
-again:
        spin_lock(&delayed_refs->lock);
        if (delayed_refs->num_entries == 0) {
                spin_unlock(&delayed_refs->lock);
@@ -3408,31 +3409,36 @@ again:
                return ret;
        }
 
-       node = rb_first(&delayed_refs->root);
-       while (node) {
+       while ((node = rb_first(&delayed_refs->root)) != NULL) {
                ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
-               node = rb_next(node);
-
-               ref->in_tree = 0;
-               rb_erase(&ref->rb_node, &delayed_refs->root);
-               delayed_refs->num_entries--;
 
                atomic_set(&ref->refs, 1);
                if (btrfs_delayed_ref_is_head(ref)) {
                        struct btrfs_delayed_ref_head *head;
 
                        head = btrfs_delayed_node_to_head(ref);
-                       spin_unlock(&delayed_refs->lock);
-                       mutex_lock(&head->mutex);
+                       if (!mutex_trylock(&head->mutex)) {
+                               atomic_inc(&ref->refs);
+                               spin_unlock(&delayed_refs->lock);
+
+                               /* Need to wait for the delayed ref to run */
+                               mutex_lock(&head->mutex);
+                               mutex_unlock(&head->mutex);
+                               btrfs_put_delayed_ref(ref);
+
+                               continue;
+                       }
+
                        kfree(head->extent_op);
                        delayed_refs->num_heads--;
                        if (list_empty(&head->cluster))
                                delayed_refs->num_heads_ready--;
                        list_del_init(&head->cluster);
-                       mutex_unlock(&head->mutex);
-                       btrfs_put_delayed_ref(ref);
-                       goto again;
                }
+               ref->in_tree = 0;
+               rb_erase(&ref->rb_node, &delayed_refs->root);
+               delayed_refs->num_entries--;
+
                spin_unlock(&delayed_refs->lock);
                btrfs_put_delayed_ref(ref);
 
@@ -3520,11 +3526,9 @@ static int btrfs_destroy_marked_extents(struct btrfs_root *root,
                             &(&BTRFS_I(page->mapping->host)->io_tree)->buffer,
                                               offset >> PAGE_CACHE_SHIFT);
                        spin_unlock(&dirty_pages->buffer_lock);
-                       if (eb) {
+                       if (eb)
                                ret = test_and_clear_bit(EXTENT_BUFFER_DIRTY,
                                                         &eb->bflags);
-                               atomic_set(&eb->refs, 1);
-                       }
                        if (PageWriteback(page))
                                end_page_writeback(page);
 
@@ -3538,8 +3542,8 @@ static int btrfs_destroy_marked_extents(struct btrfs_root *root,
                                spin_unlock_irq(&page->mapping->tree_lock);
                        }
 
-                       page->mapping->a_ops->invalidatepage(page, 0);
                        unlock_page(page);
+                       page_cache_release(page);
                }
        }
 
@@ -3553,8 +3557,10 @@ static int btrfs_destroy_pinned_extent(struct btrfs_root *root,
        u64 start;
        u64 end;
        int ret;
+       bool loop = true;
 
        unpin = pinned_extents;
+again:
        while (1) {
                ret = find_first_extent_bit(unpin, 0, &start, &end,
                                            EXTENT_DIRTY);
@@ -3572,6 +3578,15 @@ static int btrfs_destroy_pinned_extent(struct btrfs_root *root,
                cond_resched();
        }
 
+       if (loop) {
+               if (unpin == &root->fs_info->freed_extents[0])
+                       unpin = &root->fs_info->freed_extents[1];
+               else
+                       unpin = &root->fs_info->freed_extents[0];
+               loop = false;
+               goto again;
+       }
+
        return 0;
 }
 
@@ -3585,21 +3600,23 @@ void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans,
        /* FIXME: cleanup wait for commit */
        cur_trans->in_commit = 1;
        cur_trans->blocked = 1;
-       if (waitqueue_active(&root->fs_info->transaction_blocked_wait))
-               wake_up(&root->fs_info->transaction_blocked_wait);
+       wake_up(&root->fs_info->transaction_blocked_wait);
 
        cur_trans->blocked = 0;
-       if (waitqueue_active(&root->fs_info->transaction_wait))
-               wake_up(&root->fs_info->transaction_wait);
+       wake_up(&root->fs_info->transaction_wait);
 
        cur_trans->commit_done = 1;
-       if (waitqueue_active(&cur_trans->commit_wait))
-               wake_up(&cur_trans->commit_wait);
+       wake_up(&cur_trans->commit_wait);
+
+       btrfs_destroy_delayed_inodes(root);
+       btrfs_assert_delayed_root_empty(root);
 
        btrfs_destroy_pending_snapshots(cur_trans);
 
        btrfs_destroy_marked_extents(root, &cur_trans->dirty_pages,
                                     EXTENT_DIRTY);
+       btrfs_destroy_pinned_extent(root,
+                                   root->fs_info->pinned_extents);
 
        /*
        memset(cur_trans, 0, sizeof(*cur_trans));
@@ -3648,6 +3665,9 @@ int btrfs_cleanup_transaction(struct btrfs_root *root)
                if (waitqueue_active(&t->commit_wait))
                        wake_up(&t->commit_wait);
 
+               btrfs_destroy_delayed_inodes(root);
+               btrfs_assert_delayed_root_empty(root);
+
                btrfs_destroy_pending_snapshots(t);
 
                btrfs_destroy_delalloc_inodes(root);