Merge branch 'for-linus-4.6' of git://git.kernel.org/pub/scm/linux/kernel/git/mason...
authorLinus Torvalds <torvalds@linux-foundation.org>
Sat, 9 Apr 2016 17:41:34 +0000 (10:41 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 9 Apr 2016 17:41:34 +0000 (10:41 -0700)
Pull btrfs fixes from Chris Mason:
 "These are bug fixes, including a really old fsync bug, and a few trace
  points to help us track down problems in the quota code"

* 'for-linus-4.6' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs:
  Btrfs: fix file/data loss caused by fsync after rename and new inode
  btrfs: Reset IO error counters before start of device replacing
  btrfs: Add qgroup tracing
  Btrfs: don't use src fd for printk
  btrfs: fallback to vmalloc in btrfs_compare_tree
  btrfs: handle non-fatal errors in btrfs_qgroup_inherit()
  btrfs: Output more info for enospc_debug mount option
  Btrfs: fix invalid reference in replace_path
  Btrfs: Improve FL_KEEP_SIZE handling in fallocate

1  2 
fs/btrfs/extent-tree.c
fs/btrfs/file.c
fs/btrfs/ioctl.c
fs/btrfs/relocation.c
include/trace/events/btrfs.h

diff --combined fs/btrfs/extent-tree.c
@@@ -3452,7 -3452,7 +3452,7 @@@ again
                num_pages = 1;
  
        num_pages *= 16;
 -      num_pages *= PAGE_CACHE_SIZE;
 +      num_pages *= PAGE_SIZE;
  
        ret = btrfs_check_data_free_space(inode, 0, num_pages);
        if (ret)
@@@ -4639,7 -4639,7 +4639,7 @@@ static void shrink_delalloc(struct btrf
        loops = 0;
        while (delalloc_bytes && loops < 3) {
                max_reclaim = min(delalloc_bytes, to_reclaim);
 -              nr_pages = max_reclaim >> PAGE_CACHE_SHIFT;
 +              nr_pages = max_reclaim >> PAGE_SHIFT;
                btrfs_writeback_inodes_sb_nr(root, nr_pages, items);
                /*
                 * We need to wait for the async pages to actually start before
@@@ -9386,15 -9386,23 +9386,23 @@@ int btrfs_can_relocate(struct btrfs_roo
        u64 dev_min = 1;
        u64 dev_nr = 0;
        u64 target;
+       int debug;
        int index;
        int full = 0;
        int ret = 0;
  
+       debug = btrfs_test_opt(root, ENOSPC_DEBUG);
        block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
  
        /* odd, couldn't find the block group, leave it alone */
-       if (!block_group)
+       if (!block_group) {
+               if (debug)
+                       btrfs_warn(root->fs_info,
+                                  "can't find block group for bytenr %llu",
+                                  bytenr);
                return -1;
+       }
  
        min_free = btrfs_block_group_used(&block_group->item);
  
                 * this is just a balance, so if we were marked as full
                 * we know there is no space for a new chunk
                 */
-               if (full)
+               if (full) {
+                       if (debug)
+                               btrfs_warn(root->fs_info,
+                                       "no space to alloc new chunk for block group %llu",
+                                       block_group->key.objectid);
                        goto out;
+               }
  
                index = get_block_group_index(block_group);
        }
                        ret = -1;
                }
        }
+       if (debug && ret == -1)
+               btrfs_warn(root->fs_info,
+                       "no space to allocate a new chunk for block group %llu",
+                       block_group->key.objectid);
        mutex_unlock(&root->fs_info->chunk_mutex);
        btrfs_end_transaction(trans, root);
  out:
diff --combined fs/btrfs/file.c
@@@ -414,11 -414,11 +414,11 @@@ static noinline int btrfs_copy_from_use
        size_t copied = 0;
        size_t total_copied = 0;
        int pg = 0;
 -      int offset = pos & (PAGE_CACHE_SIZE - 1);
 +      int offset = pos & (PAGE_SIZE - 1);
  
        while (write_bytes > 0) {
                size_t count = min_t(size_t,
 -                                   PAGE_CACHE_SIZE - offset, write_bytes);
 +                                   PAGE_SIZE - offset, write_bytes);
                struct page *page = prepared_pages[pg];
                /*
                 * Copy data from userspace to the current page
                if (unlikely(copied == 0))
                        break;
  
 -              if (copied < PAGE_CACHE_SIZE - offset) {
 +              if (copied < PAGE_SIZE - offset) {
                        offset += copied;
                } else {
                        pg++;
@@@ -473,7 -473,7 +473,7 @@@ static void btrfs_drop_pages(struct pag
                 */
                ClearPageChecked(pages[i]);
                unlock_page(pages[i]);
 -              page_cache_release(pages[i]);
 +              put_page(pages[i]);
        }
  }
  
@@@ -1297,7 -1297,7 +1297,7 @@@ static int prepare_uptodate_page(struc
  {
        int ret = 0;
  
 -      if (((pos & (PAGE_CACHE_SIZE - 1)) || force_uptodate) &&
 +      if (((pos & (PAGE_SIZE - 1)) || force_uptodate) &&
            !PageUptodate(page)) {
                ret = btrfs_readpage(NULL, page);
                if (ret)
@@@ -1323,7 -1323,7 +1323,7 @@@ static noinline int prepare_pages(struc
                                  size_t write_bytes, bool force_uptodate)
  {
        int i;
 -      unsigned long index = pos >> PAGE_CACHE_SHIFT;
 +      unsigned long index = pos >> PAGE_SHIFT;
        gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
        int err = 0;
        int faili;
@@@ -1345,7 -1345,7 +1345,7 @@@ again
                        err = prepare_uptodate_page(inode, pages[i],
                                                    pos + write_bytes, false);
                if (err) {
 -                      page_cache_release(pages[i]);
 +                      put_page(pages[i]);
                        if (err == -EAGAIN) {
                                err = 0;
                                goto again;
  fail:
        while (faili >= 0) {
                unlock_page(pages[faili]);
 -              page_cache_release(pages[faili]);
 +              put_page(pages[faili]);
                faili--;
        }
        return err;
@@@ -1408,7 -1408,7 +1408,7 @@@ lock_and_cleanup_extent_if_need(struct 
                                             cached_state, GFP_NOFS);
                        for (i = 0; i < num_pages; i++) {
                                unlock_page(pages[i]);
 -                              page_cache_release(pages[i]);
 +                              put_page(pages[i]);
                        }
                        btrfs_start_ordered_extent(inode, ordered, 1);
                        btrfs_put_ordered_extent(ordered);
@@@ -1497,8 -1497,8 +1497,8 @@@ static noinline ssize_t __btrfs_buffere
        bool force_page_uptodate = false;
        bool need_unlock;
  
 -      nrptrs = min(DIV_ROUND_UP(iov_iter_count(i), PAGE_CACHE_SIZE),
 -                      PAGE_CACHE_SIZE / (sizeof(struct page *)));
 +      nrptrs = min(DIV_ROUND_UP(iov_iter_count(i), PAGE_SIZE),
 +                      PAGE_SIZE / (sizeof(struct page *)));
        nrptrs = min(nrptrs, current->nr_dirtied_pause - current->nr_dirtied);
        nrptrs = max(nrptrs, 8);
        pages = kmalloc_array(nrptrs, sizeof(struct page *), GFP_KERNEL);
                return -ENOMEM;
  
        while (iov_iter_count(i) > 0) {
 -              size_t offset = pos & (PAGE_CACHE_SIZE - 1);
 +              size_t offset = pos & (PAGE_SIZE - 1);
                size_t sector_offset;
                size_t write_bytes = min(iov_iter_count(i),
 -                                       nrptrs * (size_t)PAGE_CACHE_SIZE -
 +                                       nrptrs * (size_t)PAGE_SIZE -
                                         offset);
                size_t num_pages = DIV_ROUND_UP(write_bytes + offset,
 -                                              PAGE_CACHE_SIZE);
 +                                              PAGE_SIZE);
                size_t reserve_bytes;
                size_t dirty_pages;
                size_t copied;
                         * write_bytes, so scale down.
                         */
                        num_pages = DIV_ROUND_UP(write_bytes + offset,
 -                                               PAGE_CACHE_SIZE);
 +                                               PAGE_SIZE);
                        reserve_bytes = round_up(write_bytes + sector_offset,
                                        root->sectorsize);
                        goto reserve_metadata;
@@@ -1609,7 -1609,7 +1609,7 @@@ again
                } else {
                        force_page_uptodate = false;
                        dirty_pages = DIV_ROUND_UP(copied + offset,
 -                                                 PAGE_CACHE_SIZE);
 +                                                 PAGE_SIZE);
                }
  
                /*
                                u64 __pos;
  
                                __pos = round_down(pos, root->sectorsize) +
 -                                      (dirty_pages << PAGE_CACHE_SHIFT);
 +                                      (dirty_pages << PAGE_SHIFT);
                                btrfs_delalloc_release_space(inode, __pos,
                                                             release_bytes);
                        }
                cond_resched();
  
                balance_dirty_pages_ratelimited(inode->i_mapping);
 -              if (dirty_pages < (root->nodesize >> PAGE_CACHE_SHIFT) + 1)
 +              if (dirty_pages < (root->nodesize >> PAGE_SHIFT) + 1)
                        btrfs_btree_balance_dirty(root);
  
                pos += copied;
@@@ -1738,8 -1738,8 +1738,8 @@@ static ssize_t __btrfs_direct_write(str
                goto out;
        written += written_buffered;
        iocb->ki_pos = pos + written_buffered;
 -      invalidate_mapping_pages(file->f_mapping, pos >> PAGE_CACHE_SHIFT,
 -                               endbyte >> PAGE_CACHE_SHIFT);
 +      invalidate_mapping_pages(file->f_mapping, pos >> PAGE_SHIFT,
 +                               endbyte >> PAGE_SHIFT);
  out:
        return written ? written : err;
  }
@@@ -1905,7 -1905,7 +1905,7 @@@ static int start_ordered_ops(struct ino
   */
  int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
  {
 -      struct dentry *dentry = file->f_path.dentry;
 +      struct dentry *dentry = file_dentry(file);
        struct inode *inode = d_inode(dentry);
        struct btrfs_root *root = BTRFS_I(inode)->root;
        struct btrfs_trans_handle *trans;
@@@ -2682,9 -2682,12 +2682,12 @@@ static long btrfs_fallocate(struct fil
                return ret;
  
        inode_lock(inode);
-       ret = inode_newsize_ok(inode, alloc_end);
-       if (ret)
-               goto out;
+       if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size) {
+               ret = inode_newsize_ok(inode, offset + len);
+               if (ret)
+                       goto out;
+       }
  
        /*
         * TODO: Move these two operations after we have checked
diff --combined fs/btrfs/ioctl.c
@@@ -898,7 -898,7 +898,7 @@@ static int check_defrag_in_cache(struc
        u64 end;
  
        read_lock(&em_tree->lock);
 -      em = lookup_extent_mapping(em_tree, offset, PAGE_CACHE_SIZE);
 +      em = lookup_extent_mapping(em_tree, offset, PAGE_SIZE);
        read_unlock(&em_tree->lock);
  
        if (em) {
@@@ -988,7 -988,7 +988,7 @@@ static struct extent_map *defrag_lookup
        struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
        struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
        struct extent_map *em;
 -      u64 len = PAGE_CACHE_SIZE;
 +      u64 len = PAGE_SIZE;
  
        /*
         * hopefully we have this extent in the tree already, try without
@@@ -1124,15 -1124,15 +1124,15 @@@ static int cluster_pages_for_defrag(str
        struct extent_io_tree *tree;
        gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
  
 -      file_end = (isize - 1) >> PAGE_CACHE_SHIFT;
 +      file_end = (isize - 1) >> PAGE_SHIFT;
        if (!isize || start_index > file_end)
                return 0;
  
        page_cnt = min_t(u64, (u64)num_pages, (u64)file_end - start_index + 1);
  
        ret = btrfs_delalloc_reserve_space(inode,
 -                      start_index << PAGE_CACHE_SHIFT,
 -                      page_cnt << PAGE_CACHE_SHIFT);
 +                      start_index << PAGE_SHIFT,
 +                      page_cnt << PAGE_SHIFT);
        if (ret)
                return ret;
        i_done = 0;
@@@ -1148,7 -1148,7 +1148,7 @@@ again
                        break;
  
                page_start = page_offset(page);
 -              page_end = page_start + PAGE_CACHE_SIZE - 1;
 +              page_end = page_start + PAGE_SIZE - 1;
                while (1) {
                        lock_extent_bits(tree, page_start, page_end,
                                         &cached_state);
                         */
                        if (page->mapping != inode->i_mapping) {
                                unlock_page(page);
 -                              page_cache_release(page);
 +                              put_page(page);
                                goto again;
                        }
                }
                        lock_page(page);
                        if (!PageUptodate(page)) {
                                unlock_page(page);
 -                              page_cache_release(page);
 +                              put_page(page);
                                ret = -EIO;
                                break;
                        }
  
                if (page->mapping != inode->i_mapping) {
                        unlock_page(page);
 -                      page_cache_release(page);
 +                      put_page(page);
                        goto again;
                }
  
                wait_on_page_writeback(pages[i]);
  
        page_start = page_offset(pages[0]);
 -      page_end = page_offset(pages[i_done - 1]) + PAGE_CACHE_SIZE;
 +      page_end = page_offset(pages[i_done - 1]) + PAGE_SIZE;
  
        lock_extent_bits(&BTRFS_I(inode)->io_tree,
                         page_start, page_end - 1, &cached_state);
                BTRFS_I(inode)->outstanding_extents++;
                spin_unlock(&BTRFS_I(inode)->lock);
                btrfs_delalloc_release_space(inode,
 -                              start_index << PAGE_CACHE_SHIFT,
 -                              (page_cnt - i_done) << PAGE_CACHE_SHIFT);
 +                              start_index << PAGE_SHIFT,
 +                              (page_cnt - i_done) << PAGE_SHIFT);
        }
  
  
                set_page_extent_mapped(pages[i]);
                set_page_dirty(pages[i]);
                unlock_page(pages[i]);
 -              page_cache_release(pages[i]);
 +              put_page(pages[i]);
        }
        return i_done;
  out:
        for (i = 0; i < i_done; i++) {
                unlock_page(pages[i]);
 -              page_cache_release(pages[i]);
 +              put_page(pages[i]);
        }
        btrfs_delalloc_release_space(inode,
 -                      start_index << PAGE_CACHE_SHIFT,
 -                      page_cnt << PAGE_CACHE_SHIFT);
 +                      start_index << PAGE_SHIFT,
 +                      page_cnt << PAGE_SHIFT);
        return ret;
  
  }
@@@ -1273,7 -1273,7 +1273,7 @@@ int btrfs_defrag_file(struct inode *ino
        int defrag_count = 0;
        int compress_type = BTRFS_COMPRESS_ZLIB;
        u32 extent_thresh = range->extent_thresh;
 -      unsigned long max_cluster = SZ_256K >> PAGE_CACHE_SHIFT;
 +      unsigned long max_cluster = SZ_256K >> PAGE_SHIFT;
        unsigned long cluster = max_cluster;
        u64 new_align = ~((u64)SZ_128K - 1);
        struct page **pages = NULL;
        /* find the last page to defrag */
        if (range->start + range->len > range->start) {
                last_index = min_t(u64, isize - 1,
 -                       range->start + range->len - 1) >> PAGE_CACHE_SHIFT;
 +                       range->start + range->len - 1) >> PAGE_SHIFT;
        } else {
 -              last_index = (isize - 1) >> PAGE_CACHE_SHIFT;
 +              last_index = (isize - 1) >> PAGE_SHIFT;
        }
  
        if (newer_than) {
                         * we always align our defrag to help keep
                         * the extents in the file evenly spaced
                         */
 -                      i = (newer_off & new_align) >> PAGE_CACHE_SHIFT;
 +                      i = (newer_off & new_align) >> PAGE_SHIFT;
                } else
                        goto out_ra;
        } else {
 -              i = range->start >> PAGE_CACHE_SHIFT;
 +              i = range->start >> PAGE_SHIFT;
        }
        if (!max_to_defrag)
                max_to_defrag = last_index - i + 1;
                inode->i_mapping->writeback_index = i;
  
        while (i <= last_index && defrag_count < max_to_defrag &&
 -             (i < DIV_ROUND_UP(i_size_read(inode), PAGE_CACHE_SIZE))) {
 +             (i < DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE))) {
                /*
                 * make sure we stop running if someone unmounts
                 * the FS
                        break;
                }
  
 -              if (!should_defrag_range(inode, (u64)i << PAGE_CACHE_SHIFT,
 +              if (!should_defrag_range(inode, (u64)i << PAGE_SHIFT,
                                         extent_thresh, &last_len, &skip,
                                         &defrag_end, range->flags &
                                         BTRFS_DEFRAG_RANGE_COMPRESS)) {
                         * the should_defrag function tells us how much to skip
                         * bump our counter by the suggested amount
                         */
 -                      next = DIV_ROUND_UP(skip, PAGE_CACHE_SIZE);
 +                      next = DIV_ROUND_UP(skip, PAGE_SIZE);
                        i = max(i + 1, next);
                        continue;
                }
  
                if (!newer_than) {
 -                      cluster = (PAGE_CACHE_ALIGN(defrag_end) >>
 -                                 PAGE_CACHE_SHIFT) - i;
 +                      cluster = (PAGE_ALIGN(defrag_end) >>
 +                                 PAGE_SHIFT) - i;
                        cluster = min(cluster, max_cluster);
                } else {
                        cluster = max_cluster;
                                i += ret;
  
                        newer_off = max(newer_off + 1,
 -                                      (u64)i << PAGE_CACHE_SHIFT);
 +                                      (u64)i << PAGE_SHIFT);
  
                        ret = find_new_extents(root, inode, newer_than,
                                               &newer_off, SZ_64K);
                        if (!ret) {
                                range->start = newer_off;
 -                              i = (newer_off & new_align) >> PAGE_CACHE_SHIFT;
 +                              i = (newer_off & new_align) >> PAGE_SHIFT;
                        } else {
                                break;
                        }
                } else {
                        if (ret > 0) {
                                i += ret;
 -                              last_len += ret << PAGE_CACHE_SHIFT;
 +                              last_len += ret << PAGE_SHIFT;
                        } else {
                                i++;
                                last_len = 0;
@@@ -1654,7 -1654,7 +1654,7 @@@ static noinline int btrfs_ioctl_snap_cr
  
                src_inode = file_inode(src.file);
                if (src_inode->i_sb != file_inode(file)->i_sb) {
-                       btrfs_info(BTRFS_I(src_inode)->root->fs_info,
+                       btrfs_info(BTRFS_I(file_inode(file))->root->fs_info,
                                   "Snapshot src from another FS");
                        ret = -EXDEV;
                } else if (!inode_owner_or_capable(src_inode)) {
@@@ -1722,7 -1722,7 +1722,7 @@@ static noinline int btrfs_ioctl_snap_cr
        if (vol_args->flags & BTRFS_SUBVOL_RDONLY)
                readonly = true;
        if (vol_args->flags & BTRFS_SUBVOL_QGROUP_INHERIT) {
 -              if (vol_args->size > PAGE_CACHE_SIZE) {
 +              if (vol_args->size > PAGE_SIZE) {
                        ret = -EINVAL;
                        goto free_args;
                }
@@@ -2806,12 -2806,12 +2806,12 @@@ static struct page *extent_same_get_pag
                lock_page(page);
                if (!PageUptodate(page)) {
                        unlock_page(page);
 -                      page_cache_release(page);
 +                      put_page(page);
                        return ERR_PTR(-EIO);
                }
                if (page->mapping != inode->i_mapping) {
                        unlock_page(page);
 -                      page_cache_release(page);
 +                      put_page(page);
                        return ERR_PTR(-EAGAIN);
                }
        }
@@@ -2823,7 -2823,7 +2823,7 @@@ static int gather_extent_pages(struct i
                               int num_pages, u64 off)
  {
        int i;
 -      pgoff_t index = off >> PAGE_CACHE_SHIFT;
 +      pgoff_t index = off >> PAGE_SHIFT;
  
        for (i = 0; i < num_pages; i++) {
  again:
@@@ -2932,12 -2932,12 +2932,12 @@@ static void btrfs_cmp_data_free(struct 
                pg = cmp->src_pages[i];
                if (pg) {
                        unlock_page(pg);
 -                      page_cache_release(pg);
 +                      put_page(pg);
                }
                pg = cmp->dst_pages[i];
                if (pg) {
                        unlock_page(pg);
 -                      page_cache_release(pg);
 +                      put_page(pg);
                }
        }
        kfree(cmp->src_pages);
@@@ -2949,7 -2949,7 +2949,7 @@@ static int btrfs_cmp_data_prepare(struc
                                  u64 len, struct cmp_pages *cmp)
  {
        int ret;
 -      int num_pages = PAGE_CACHE_ALIGN(len) >> PAGE_CACHE_SHIFT;
 +      int num_pages = PAGE_ALIGN(len) >> PAGE_SHIFT;
        struct page **src_pgarr, **dst_pgarr;
  
        /*
@@@ -2987,12 -2987,12 +2987,12 @@@ static int btrfs_cmp_data(struct inode 
        int ret = 0;
        int i;
        struct page *src_page, *dst_page;
 -      unsigned int cmp_len = PAGE_CACHE_SIZE;
 +      unsigned int cmp_len = PAGE_SIZE;
        void *addr, *dst_addr;
  
        i = 0;
        while (len) {
 -              if (len < PAGE_CACHE_SIZE)
 +              if (len < PAGE_SIZE)
                        cmp_len = len;
  
                BUG_ON(i >= cmp->num_pages);
@@@ -3191,7 -3191,7 +3191,7 @@@ ssize_t btrfs_dedupe_file_range(struct 
        if (olen > BTRFS_MAX_DEDUPE_LEN)
                olen = BTRFS_MAX_DEDUPE_LEN;
  
 -      if (WARN_ON_ONCE(bs < PAGE_CACHE_SIZE)) {
 +      if (WARN_ON_ONCE(bs < PAGE_SIZE)) {
                /*
                 * Btrfs does not support blocksize < page_size. As a
                 * result, btrfs_cmp_data() won't correctly handle
@@@ -3891,8 -3891,8 +3891,8 @@@ static noinline int btrfs_clone_files(s
         * data immediately and not the previous data.
         */
        truncate_inode_pages_range(&inode->i_data,
 -                              round_down(destoff, PAGE_CACHE_SIZE),
 -                              round_up(destoff + len, PAGE_CACHE_SIZE) - 1);
 +                              round_down(destoff, PAGE_SIZE),
 +                              round_up(destoff + len, PAGE_SIZE) - 1);
  out_unlock:
        if (!same_inode)
                btrfs_double_inode_unlock(src, inode);
@@@ -4124,7 -4124,7 +4124,7 @@@ static long btrfs_ioctl_space_info(stru
        /* we generally have at most 6 or so space infos, one for each raid
         * level.  So, a whole page should be more than enough for everyone
         */
 -      if (alloc_size > PAGE_CACHE_SIZE)
 +      if (alloc_size > PAGE_SIZE)
                return -ENOMEM;
  
        space_args.total_spaces = 0;
diff --combined fs/btrfs/relocation.c
@@@ -1850,6 -1850,7 +1850,7 @@@ again
                        eb = read_tree_block(dest, old_bytenr, old_ptr_gen);
                        if (IS_ERR(eb)) {
                                ret = PTR_ERR(eb);
+                               break;
                        } else if (!extent_buffer_uptodate(eb)) {
                                ret = -EIO;
                                free_extent_buffer(eb);
@@@ -3129,10 -3130,10 +3130,10 @@@ static int relocate_file_extent_cluster
        if (ret)
                goto out;
  
 -      index = (cluster->start - offset) >> PAGE_CACHE_SHIFT;
 -      last_index = (cluster->end - offset) >> PAGE_CACHE_SHIFT;
 +      index = (cluster->start - offset) >> PAGE_SHIFT;
 +      last_index = (cluster->end - offset) >> PAGE_SHIFT;
        while (index <= last_index) {
 -              ret = btrfs_delalloc_reserve_metadata(inode, PAGE_CACHE_SIZE);
 +              ret = btrfs_delalloc_reserve_metadata(inode, PAGE_SIZE);
                if (ret)
                        goto out;
  
                                                   mask);
                        if (!page) {
                                btrfs_delalloc_release_metadata(inode,
 -                                                      PAGE_CACHE_SIZE);
 +                                                      PAGE_SIZE);
                                ret = -ENOMEM;
                                goto out;
                        }
                        lock_page(page);
                        if (!PageUptodate(page)) {
                                unlock_page(page);
 -                              page_cache_release(page);
 +                              put_page(page);
                                btrfs_delalloc_release_metadata(inode,
 -                                                      PAGE_CACHE_SIZE);
 +                                                      PAGE_SIZE);
                                ret = -EIO;
                                goto out;
                        }
                }
  
                page_start = page_offset(page);
 -              page_end = page_start + PAGE_CACHE_SIZE - 1;
 +              page_end = page_start + PAGE_SIZE - 1;
  
                lock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end);
  
                unlock_extent(&BTRFS_I(inode)->io_tree,
                              page_start, page_end);
                unlock_page(page);
 -              page_cache_release(page);
 +              put_page(page);
  
                index++;
                balance_dirty_pages_ratelimited(inode->i_mapping);
@@@ -6,7 -6,7 +6,7 @@@
  
  #include <linux/writeback.h>
  #include <linux/tracepoint.h>
 -#include <trace/events/gfpflags.h>
 +#include <trace/events/mmflags.h>
  
  struct btrfs_root;
  struct btrfs_fs_info;
@@@ -23,7 -23,7 +23,7 @@@ struct map_lookup
  struct extent_buffer;
  struct btrfs_work;
  struct __btrfs_workqueue;
- struct btrfs_qgroup_operation;
+ struct btrfs_qgroup_extent_record;
  
  #define show_ref_type(type)                                           \
        __print_symbolic(type,                                          \
@@@ -1231,6 -1231,93 +1231,93 @@@ DEFINE_EVENT(btrfs__qgroup_delayed_ref
  
        TP_ARGS(ref_root, reserved)
  );
+ DECLARE_EVENT_CLASS(btrfs_qgroup_extent,
+       TP_PROTO(struct btrfs_qgroup_extent_record *rec),
+       TP_ARGS(rec),
+       TP_STRUCT__entry(
+               __field(        u64,  bytenr            )
+               __field(        u64,  num_bytes         )
+       ),
+       TP_fast_assign(
+               __entry->bytenr         = rec->bytenr,
+               __entry->num_bytes      = rec->num_bytes;
+       ),
+       TP_printk("bytenr = %llu, num_bytes = %llu",
+                 (unsigned long long)__entry->bytenr,
+                 (unsigned long long)__entry->num_bytes)
+ );
+ DEFINE_EVENT(btrfs_qgroup_extent, btrfs_qgroup_account_extents,
+       TP_PROTO(struct btrfs_qgroup_extent_record *rec),
+       TP_ARGS(rec)
+ );
+ DEFINE_EVENT(btrfs_qgroup_extent, btrfs_qgroup_insert_dirty_extent,
+       TP_PROTO(struct btrfs_qgroup_extent_record *rec),
+       TP_ARGS(rec)
+ );
+ TRACE_EVENT(btrfs_qgroup_account_extent,
+       TP_PROTO(u64 bytenr, u64 num_bytes, u64 nr_old_roots, u64 nr_new_roots),
+       TP_ARGS(bytenr, num_bytes, nr_old_roots, nr_new_roots),
+       TP_STRUCT__entry(
+               __field(        u64,  bytenr                    )
+               __field(        u64,  num_bytes                 )
+               __field(        u64,  nr_old_roots              )
+               __field(        u64,  nr_new_roots              )
+       ),
+       TP_fast_assign(
+               __entry->bytenr         = bytenr;
+               __entry->num_bytes      = num_bytes;
+               __entry->nr_old_roots   = nr_old_roots;
+               __entry->nr_new_roots   = nr_new_roots;
+       ),
+       TP_printk("bytenr = %llu, num_bytes = %llu, nr_old_roots = %llu, "
+                 "nr_new_roots = %llu",
+                 __entry->bytenr,
+                 __entry->num_bytes,
+                 __entry->nr_old_roots,
+                 __entry->nr_new_roots)
+ );
+ TRACE_EVENT(qgroup_update_counters,
+       TP_PROTO(u64 qgid, u64 cur_old_count, u64 cur_new_count),
+       TP_ARGS(qgid, cur_old_count, cur_new_count),
+       TP_STRUCT__entry(
+               __field(        u64,  qgid                      )
+               __field(        u64,  cur_old_count             )
+               __field(        u64,  cur_new_count             )
+       ),
+       TP_fast_assign(
+               __entry->qgid           = qgid;
+               __entry->cur_old_count  = cur_old_count;
+               __entry->cur_new_count  = cur_new_count;
+       ),
+       TP_printk("qgid = %llu, cur_old_count = %llu, cur_new_count = %llu",
+                 __entry->qgid,
+                 __entry->cur_old_count,
+                 __entry->cur_new_count)
+ );
  #endif /* _TRACE_BTRFS_H */
  
  /* This part must be outside protection */