}
}
-#define btrfs_debug_check_extent_io_range(inode, start, end) \
- __btrfs_debug_check_extent_io_range(__func__, (inode), (start), (end))
+#define btrfs_debug_check_extent_io_range(tree, start, end) \
+ __btrfs_debug_check_extent_io_range(__func__, (tree), (start), (end))
static inline void __btrfs_debug_check_extent_io_range(const char *caller,
- struct inode *inode, u64 start, u64 end)
+ struct extent_io_tree *tree, u64 start, u64 end)
{
- u64 isize = i_size_read(inode);
+ struct inode *inode;
+ u64 isize;
+ if (!tree->mapping)
+ return;
+
+ inode = tree->mapping->host;
+ isize = i_size_read(inode);
if (end >= PAGE_SIZE && (end % 2) == 0 && end != isize - 1) {
printk_ratelimited(KERN_DEBUG
"btrfs: %s: ino %llu isize %llu odd range [%llu,%llu]\n",
static inline struct btrfs_fs_info *
tree_fs_info(struct extent_io_tree *tree)
{
+ if (!tree->mapping)
+ return NULL;
return btrfs_sb(tree->mapping->host->i_sb);
}
int err;
int clear = 0;
- btrfs_debug_check_extent_io_range(tree->mapping->host, start, end);
+ btrfs_debug_check_extent_io_range(tree, start, end);
if (bits & EXTENT_DELALLOC)
bits |= EXTENT_NORESERVE;
struct extent_state *state;
struct rb_node *node;
- btrfs_debug_check_extent_io_range(tree->mapping->host, start, end);
+ btrfs_debug_check_extent_io_range(tree, start, end);
spin_lock(&tree->lock);
again:
u64 last_start;
u64 last_end;
- btrfs_debug_check_extent_io_range(tree->mapping->host, start, end);
+ btrfs_debug_check_extent_io_range(tree, start, end);
bits |= EXTENT_FIRST_DELALLOC;
again:
u64 last_start;
u64 last_end;
- btrfs_debug_check_extent_io_range(tree->mapping->host, start, end);
+ btrfs_debug_check_extent_io_range(tree, start, end);
again:
if (!prealloc && (mask & __GFP_WAIT)) {