2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
20 #include <linux/pagemap.h>
21 #include <linux/highmem.h>
22 #include <linux/time.h>
23 #include <linux/init.h>
24 #include <linux/string.h>
25 #include <linux/smp_lock.h>
26 #include <linux/backing-dev.h>
27 #include <linux/mpage.h>
28 #include <linux/swap.h>
29 #include <linux/writeback.h>
30 #include <linux/statfs.h>
31 #include <linux/compat.h>
32 #include <linux/version.h>
35 #include "transaction.h"
36 #include "btrfs_inode.h"
38 #include "print-tree.h"
44 static int noinline btrfs_copy_from_user(loff_t pos, int num_pages,
46 struct page **prepared_pages,
47 const char __user * buf)
51 int offset = pos & (PAGE_CACHE_SIZE - 1);
53 for (i = 0; i < num_pages && write_bytes > 0; i++, offset = 0) {
54 size_t count = min_t(size_t,
55 PAGE_CACHE_SIZE - offset, write_bytes);
56 struct page *page = prepared_pages[i];
57 fault_in_pages_readable(buf, count);
59 /* Copy data from userspace to the current page */
61 page_fault = __copy_from_user(page_address(page) + offset,
63 /* Flush processor's dcache for this page */
64 flush_dcache_page(page);
72 return page_fault ? -EFAULT : 0;
75 static void noinline btrfs_drop_pages(struct page **pages, size_t num_pages)
78 for (i = 0; i < num_pages; i++) {
81 ClearPageChecked(pages[i]);
82 unlock_page(pages[i]);
83 mark_page_accessed(pages[i]);
84 page_cache_release(pages[i]);
88 static int noinline insert_inline_extent(struct btrfs_trans_handle *trans,
89 struct btrfs_root *root, struct inode *inode,
90 u64 offset, size_t size,
91 struct page **pages, size_t page_offset,
95 struct btrfs_path *path;
96 struct extent_buffer *leaf;
99 struct btrfs_file_extent_item *ei;
107 path = btrfs_alloc_path();
111 btrfs_set_trans_block_group(trans, inode);
113 key.objectid = inode->i_ino;
115 btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
117 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
123 struct btrfs_key found_key;
125 if (path->slots[0] == 0)
129 leaf = path->nodes[0];
130 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
132 if (found_key.objectid != inode->i_ino)
135 if (found_key.type != BTRFS_EXTENT_DATA_KEY)
137 ei = btrfs_item_ptr(leaf, path->slots[0],
138 struct btrfs_file_extent_item);
140 if (btrfs_file_extent_type(leaf, ei) !=
141 BTRFS_FILE_EXTENT_INLINE) {
144 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
151 leaf = path->nodes[0];
152 ei = btrfs_item_ptr(leaf, path->slots[0],
153 struct btrfs_file_extent_item);
155 if (btrfs_file_extent_type(leaf, ei) !=
156 BTRFS_FILE_EXTENT_INLINE) {
158 btrfs_print_leaf(root, leaf);
159 printk("found wasn't inline offset %Lu inode %lu\n",
160 offset, inode->i_ino);
163 found_size = btrfs_file_extent_inline_len(leaf,
164 btrfs_item_nr(leaf, path->slots[0]));
165 found_end = key.offset + found_size;
167 if (found_end < offset + size) {
168 btrfs_release_path(root, path);
169 ret = btrfs_search_slot(trans, root, &key, path,
170 offset + size - found_end, 1);
173 ret = btrfs_extend_item(trans, root, path,
174 offset + size - found_end);
179 leaf = path->nodes[0];
180 ei = btrfs_item_ptr(leaf, path->slots[0],
181 struct btrfs_file_extent_item);
182 inode->i_blocks += (offset + size - found_end) >> 9;
184 if (found_end < offset) {
185 ptr = btrfs_file_extent_inline_start(ei) + found_size;
186 memset_extent_buffer(leaf, 0, ptr, offset - found_end);
190 btrfs_release_path(root, path);
191 datasize = offset + size - key.offset;
192 inode->i_blocks += datasize >> 9;
193 datasize = btrfs_file_extent_calc_inline_size(datasize);
194 ret = btrfs_insert_empty_item(trans, root, path, &key,
198 printk("got bad ret %d\n", ret);
201 leaf = path->nodes[0];
202 ei = btrfs_item_ptr(leaf, path->slots[0],
203 struct btrfs_file_extent_item);
204 btrfs_set_file_extent_generation(leaf, ei, trans->transid);
205 btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE);
207 ptr = btrfs_file_extent_inline_start(ei) + offset - key.offset;
213 kaddr = kmap_atomic(page, KM_USER0);
214 cur_size = min_t(size_t, PAGE_CACHE_SIZE - page_offset, size);
215 write_extent_buffer(leaf, kaddr + page_offset, ptr, cur_size);
216 kunmap_atomic(kaddr, KM_USER0);
220 if (i >= num_pages) {
221 printk("i %d num_pages %d\n", i, num_pages);
225 btrfs_mark_buffer_dirty(leaf);
227 btrfs_free_path(path);
231 static int noinline dirty_and_release_pages(struct btrfs_trans_handle *trans,
232 struct btrfs_root *root,
241 struct inode *inode = fdentry(file)->d_inode;
242 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
246 u64 end_of_last_block;
247 u64 end_pos = pos + write_bytes;
250 loff_t isize = i_size_read(inode);
252 start_pos = pos & ~((u64)root->sectorsize - 1);
253 num_bytes = (write_bytes + pos - start_pos +
254 root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
256 end_of_last_block = start_pos + num_bytes - 1;
258 lock_extent(io_tree, start_pos, end_of_last_block, GFP_NOFS);
259 trans = btrfs_join_transaction(root, 1);
264 btrfs_set_trans_block_group(trans, inode);
267 if ((end_of_last_block & 4095) == 0) {
268 printk("strange end of last %Lu %zu %Lu\n", start_pos, write_bytes, end_of_last_block);
270 set_extent_uptodate(io_tree, start_pos, end_of_last_block, GFP_NOFS);
272 /* FIXME...EIEIO, ENOSPC and more */
273 /* insert any holes we need to create */
274 if (isize < start_pos) {
275 u64 last_pos_in_file;
277 u64 mask = root->sectorsize - 1;
278 last_pos_in_file = (isize + mask) & ~mask;
279 hole_size = (start_pos - last_pos_in_file + mask) & ~mask;
281 btrfs_wait_ordered_range(inode, last_pos_in_file,
282 last_pos_in_file + hole_size);
283 mutex_lock(&BTRFS_I(inode)->extent_mutex);
284 err = btrfs_drop_extents(trans, root, inode,
286 last_pos_in_file + hole_size,
292 err = btrfs_insert_file_extent(trans, root,
296 btrfs_drop_extent_cache(inode, last_pos_in_file,
297 last_pos_in_file + hole_size -1);
298 mutex_unlock(&BTRFS_I(inode)->extent_mutex);
299 btrfs_check_file(root, inode);
306 * either allocate an extent for the new bytes or setup the key
307 * to show we are doing inline data in the extent
309 inline_size = end_pos;
310 if (isize >= BTRFS_MAX_INLINE_DATA_SIZE(root) ||
311 inline_size > root->fs_info->max_inline ||
312 (inline_size & (root->sectorsize -1)) == 0 ||
313 inline_size >= BTRFS_MAX_INLINE_DATA_SIZE(root)) {
314 /* check for reserved extents on each page, we don't want
315 * to reset the delalloc bit on things that already have
318 btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block);
319 for (i = 0; i < num_pages; i++) {
320 struct page *p = pages[i];
327 /* step one, delete the existing extents in this range */
328 aligned_end = (pos + write_bytes + root->sectorsize - 1) &
329 ~((u64)root->sectorsize - 1);
330 mutex_lock(&BTRFS_I(inode)->extent_mutex);
331 err = btrfs_drop_extents(trans, root, inode, start_pos,
332 aligned_end, aligned_end, &hint_byte);
335 if (isize > inline_size)
336 inline_size = min_t(u64, isize, aligned_end);
337 inline_size -= start_pos;
338 err = insert_inline_extent(trans, root, inode, start_pos,
339 inline_size, pages, 0, num_pages);
340 btrfs_drop_extent_cache(inode, start_pos, aligned_end - 1);
342 mutex_unlock(&BTRFS_I(inode)->extent_mutex);
345 * an ugly way to do all the prop accounting around
346 * the page bits and mapping tags
348 set_page_writeback(pages[0]);
349 end_page_writeback(pages[0]);
352 if (end_pos > isize) {
353 i_size_write(inode, end_pos);
355 BTRFS_I(inode)->disk_i_size = end_pos;
356 btrfs_update_inode(trans, root, inode);
359 err = btrfs_end_transaction(trans, root);
361 unlock_extent(io_tree, start_pos, end_of_last_block, GFP_NOFS);
365 int noinline btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end)
367 struct extent_map *em;
368 struct extent_map *split = NULL;
369 struct extent_map *split2 = NULL;
370 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
371 u64 len = end - start + 1;
375 WARN_ON(end < start);
376 if (end == (u64)-1) {
382 split = alloc_extent_map(GFP_NOFS);
384 split2 = alloc_extent_map(GFP_NOFS);
386 spin_lock(&em_tree->lock);
387 em = lookup_extent_mapping(em_tree, start, len);
389 spin_unlock(&em_tree->lock);
392 clear_bit(EXTENT_FLAG_PINNED, &em->flags);
393 remove_extent_mapping(em_tree, em);
395 if (em->block_start < EXTENT_MAP_LAST_BYTE &&
397 split->start = em->start;
398 split->len = start - em->start;
399 split->block_start = em->block_start;
400 split->bdev = em->bdev;
401 split->flags = em->flags;
402 ret = add_extent_mapping(em_tree, split);
404 free_extent_map(split);
408 if (em->block_start < EXTENT_MAP_LAST_BYTE &&
409 testend && em->start + em->len > start + len) {
410 u64 diff = start + len - em->start;
412 split->start = start + len;
413 split->len = em->start + em->len - (start + len);
414 split->bdev = em->bdev;
415 split->flags = em->flags;
417 split->block_start = em->block_start + diff;
419 ret = add_extent_mapping(em_tree, split);
421 free_extent_map(split);
424 spin_unlock(&em_tree->lock);
428 /* once for the tree*/
432 free_extent_map(split);
434 free_extent_map(split2);
438 int btrfs_check_file(struct btrfs_root *root, struct inode *inode)
442 struct btrfs_path *path;
443 struct btrfs_key found_key;
444 struct extent_buffer *leaf;
445 struct btrfs_file_extent_item *extent;
454 path = btrfs_alloc_path();
455 ret = btrfs_lookup_file_extent(NULL, root, path, inode->i_ino,
458 nritems = btrfs_header_nritems(path->nodes[0]);
459 if (path->slots[0] >= nritems) {
460 ret = btrfs_next_leaf(root, path);
463 nritems = btrfs_header_nritems(path->nodes[0]);
465 slot = path->slots[0];
466 leaf = path->nodes[0];
467 btrfs_item_key_to_cpu(leaf, &found_key, slot);
468 if (found_key.objectid != inode->i_ino)
470 if (found_key.type != BTRFS_EXTENT_DATA_KEY)
473 if (found_key.offset < last_offset) {
475 btrfs_print_leaf(root, leaf);
476 printk("inode %lu found offset %Lu expected %Lu\n",
477 inode->i_ino, found_key.offset, last_offset);
481 extent = btrfs_item_ptr(leaf, slot,
482 struct btrfs_file_extent_item);
483 found_type = btrfs_file_extent_type(leaf, extent);
484 if (found_type == BTRFS_FILE_EXTENT_REG) {
485 extent_end = found_key.offset +
486 btrfs_file_extent_num_bytes(leaf, extent);
487 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
488 struct btrfs_item *item;
489 item = btrfs_item_nr(leaf, slot);
490 extent_end = found_key.offset +
491 btrfs_file_extent_inline_len(leaf, item);
492 extent_end = (extent_end + root->sectorsize - 1) &
493 ~((u64)root->sectorsize -1 );
495 last_offset = extent_end;
498 if (0 && last_offset < inode->i_size) {
500 btrfs_print_leaf(root, leaf);
501 printk("inode %lu found offset %Lu size %Lu\n", inode->i_ino,
502 last_offset, inode->i_size);
507 btrfs_free_path(path);
513 * this is very complex, but the basic idea is to drop all extents
514 * in the range start - end. hint_block is filled in with a block number
515 * that would be a good hint to the block allocator for this file.
517 * If an extent intersects the range but is not entirely inside the range
518 * it is either truncated or split. Anything entirely inside the range
519 * is deleted from the tree.
521 int noinline btrfs_drop_extents(struct btrfs_trans_handle *trans,
522 struct btrfs_root *root, struct inode *inode,
523 u64 start, u64 end, u64 inline_limit, u64 *hint_byte)
526 u64 search_start = start;
530 struct extent_buffer *leaf;
531 struct btrfs_file_extent_item *extent;
532 struct btrfs_path *path;
533 struct btrfs_key key;
534 struct btrfs_file_extent_item old;
544 btrfs_drop_extent_cache(inode, start, end - 1);
546 path = btrfs_alloc_path();
551 btrfs_release_path(root, path);
552 ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino,
557 if (path->slots[0] == 0) {
572 leaf = path->nodes[0];
573 slot = path->slots[0];
575 btrfs_item_key_to_cpu(leaf, &key, slot);
576 if (btrfs_key_type(&key) == BTRFS_EXTENT_DATA_KEY &&
580 if (btrfs_key_type(&key) > BTRFS_EXTENT_DATA_KEY ||
581 key.objectid != inode->i_ino) {
585 search_start = key.offset;
588 if (btrfs_key_type(&key) == BTRFS_EXTENT_DATA_KEY) {
589 extent = btrfs_item_ptr(leaf, slot,
590 struct btrfs_file_extent_item);
591 found_type = btrfs_file_extent_type(leaf, extent);
592 if (found_type == BTRFS_FILE_EXTENT_REG) {
594 btrfs_file_extent_disk_bytenr(leaf,
597 *hint_byte = extent_end;
599 extent_end = key.offset +
600 btrfs_file_extent_num_bytes(leaf, extent);
602 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
603 struct btrfs_item *item;
604 item = btrfs_item_nr(leaf, slot);
606 extent_end = key.offset +
607 btrfs_file_extent_inline_len(leaf, item);
610 extent_end = search_start;
613 /* we found nothing we can drop */
614 if ((!found_extent && !found_inline) ||
615 search_start >= extent_end) {
618 nritems = btrfs_header_nritems(leaf);
619 if (slot >= nritems - 1) {
620 nextret = btrfs_next_leaf(root, path);
631 u64 mask = root->sectorsize - 1;
632 search_start = (extent_end + mask) & ~mask;
634 search_start = extent_end;
635 if (end <= extent_end && start >= key.offset && found_inline) {
636 *hint_byte = EXTENT_MAP_INLINE;
641 read_extent_buffer(leaf, &old, (unsigned long)extent,
643 root_gen = btrfs_header_generation(leaf);
644 root_owner = btrfs_header_owner(leaf);
645 leaf_start = leaf->start;
648 if (end < extent_end && end >= key.offset) {
650 if (found_inline && start <= key.offset)
653 /* truncate existing extent */
654 if (start > key.offset) {
658 WARN_ON(start & (root->sectorsize - 1));
660 new_num = start - key.offset;
661 old_num = btrfs_file_extent_num_bytes(leaf,
664 btrfs_file_extent_disk_bytenr(leaf,
666 if (btrfs_file_extent_disk_bytenr(leaf,
668 dec_i_blocks(inode, old_num - new_num);
670 btrfs_set_file_extent_num_bytes(leaf, extent,
672 btrfs_mark_buffer_dirty(leaf);
673 } else if (key.offset < inline_limit &&
674 (end > extent_end) &&
675 (inline_limit < extent_end)) {
677 new_size = btrfs_file_extent_calc_inline_size(
678 inline_limit - key.offset);
679 dec_i_blocks(inode, (extent_end - key.offset) -
680 (inline_limit - key.offset));
681 btrfs_truncate_item(trans, root, path,
685 /* delete the entire extent */
687 ret = btrfs_del_item(trans, root, path);
688 /* TODO update progress marker and return */
691 btrfs_release_path(root, path);
692 /* the extent will be freed later */
694 if (bookend && found_inline && start <= key.offset) {
696 new_size = btrfs_file_extent_calc_inline_size(
698 dec_i_blocks(inode, (extent_end - key.offset) -
700 ret = btrfs_truncate_item(trans, root, path,
704 /* create bookend, splitting the extent in two */
705 if (bookend && found_extent) {
707 struct btrfs_key ins;
708 ins.objectid = inode->i_ino;
710 btrfs_set_key_type(&ins, BTRFS_EXTENT_DATA_KEY);
711 btrfs_release_path(root, path);
712 ret = btrfs_insert_empty_item(trans, root, path, &ins,
716 leaf = path->nodes[0];
717 extent = btrfs_item_ptr(leaf, path->slots[0],
718 struct btrfs_file_extent_item);
719 write_extent_buffer(leaf, &old,
720 (unsigned long)extent, sizeof(old));
722 btrfs_set_file_extent_offset(leaf, extent,
723 le64_to_cpu(old.offset) + end - key.offset);
724 WARN_ON(le64_to_cpu(old.num_bytes) <
726 btrfs_set_file_extent_num_bytes(leaf, extent,
728 btrfs_set_file_extent_type(leaf, extent,
729 BTRFS_FILE_EXTENT_REG);
731 btrfs_mark_buffer_dirty(path->nodes[0]);
733 disk_bytenr = le64_to_cpu(old.disk_bytenr);
734 if (disk_bytenr != 0) {
735 ret = btrfs_inc_extent_ref(trans, root,
737 le64_to_cpu(old.disk_num_bytes),
739 root->root_key.objectid,
741 ins.objectid, ins.offset);
744 btrfs_release_path(root, path);
745 if (disk_bytenr != 0) {
747 btrfs_file_extent_num_bytes(leaf,
752 if (found_extent && !keep) {
753 u64 disk_bytenr = le64_to_cpu(old.disk_bytenr);
755 if (disk_bytenr != 0) {
756 dec_i_blocks(inode, le64_to_cpu(old.num_bytes));
757 ret = btrfs_free_extent(trans, root,
759 le64_to_cpu(old.disk_num_bytes),
760 leaf_start, root_owner,
761 root_gen, key.objectid,
764 *hint_byte = disk_bytenr;
768 if (search_start >= end) {
774 btrfs_free_path(path);
775 btrfs_check_file(root, inode);
780 * this gets pages into the page cache and locks them down
782 static int noinline prepare_pages(struct btrfs_root *root, struct file *file,
783 struct page **pages, size_t num_pages,
784 loff_t pos, unsigned long first_index,
785 unsigned long last_index, size_t write_bytes)
788 unsigned long index = pos >> PAGE_CACHE_SHIFT;
789 struct inode *inode = fdentry(file)->d_inode;
794 start_pos = pos & ~((u64)root->sectorsize - 1);
795 last_pos = ((u64)index + num_pages) << PAGE_CACHE_SHIFT;
797 memset(pages, 0, num_pages * sizeof(struct page *));
799 for (i = 0; i < num_pages; i++) {
800 pages[i] = grab_cache_page(inode->i_mapping, index + i);
805 wait_on_page_writeback(pages[i]);
807 if (start_pos < inode->i_size) {
808 struct btrfs_ordered_extent *ordered;
809 lock_extent(&BTRFS_I(inode)->io_tree,
810 start_pos, last_pos - 1, GFP_NOFS);
811 ordered = btrfs_lookup_first_ordered_extent(inode, last_pos -1);
813 ordered->file_offset + ordered->len > start_pos &&
814 ordered->file_offset < last_pos) {
815 btrfs_put_ordered_extent(ordered);
816 unlock_extent(&BTRFS_I(inode)->io_tree,
817 start_pos, last_pos - 1, GFP_NOFS);
818 for (i = 0; i < num_pages; i++) {
819 unlock_page(pages[i]);
820 page_cache_release(pages[i]);
822 btrfs_wait_ordered_range(inode, start_pos,
823 last_pos - start_pos);
827 btrfs_put_ordered_extent(ordered);
829 clear_extent_bits(&BTRFS_I(inode)->io_tree, start_pos,
830 last_pos - 1, EXTENT_DIRTY | EXTENT_DELALLOC,
832 unlock_extent(&BTRFS_I(inode)->io_tree,
833 start_pos, last_pos - 1, GFP_NOFS);
835 for (i = 0; i < num_pages; i++) {
836 clear_page_dirty_for_io(pages[i]);
837 set_page_extent_mapped(pages[i]);
838 WARN_ON(!PageLocked(pages[i]));
843 static ssize_t btrfs_file_write(struct file *file, const char __user *buf,
844 size_t count, loff_t *ppos)
848 ssize_t num_written = 0;
851 struct inode *inode = fdentry(file)->d_inode;
852 struct btrfs_root *root = BTRFS_I(inode)->root;
853 struct page **pages = NULL;
855 struct page *pinned[2];
856 unsigned long first_index;
857 unsigned long last_index;
859 nrptrs = min((count + PAGE_CACHE_SIZE - 1) / PAGE_CACHE_SIZE,
860 PAGE_CACHE_SIZE / (sizeof(struct page *)));
867 vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
868 current->backing_dev_info = inode->i_mapping->backing_dev_info;
869 err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
874 #ifdef REMOVE_SUID_PATH
875 err = remove_suid(&file->f_path);
877 # if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26)
878 err = file_remove_suid(file);
880 err = remove_suid(fdentry(file));
885 file_update_time(file);
887 pages = kmalloc(nrptrs * sizeof(struct page *), GFP_KERNEL);
889 mutex_lock(&inode->i_mutex);
890 first_index = pos >> PAGE_CACHE_SHIFT;
891 last_index = (pos + count) >> PAGE_CACHE_SHIFT;
894 * if this is a nodatasum mount, force summing off for the inode
895 * all the time. That way a later mount with summing on won't
898 if (btrfs_test_opt(root, NODATASUM))
899 btrfs_set_flag(inode, NODATASUM);
902 * there are lots of better ways to do this, but this code
903 * makes sure the first and last page in the file range are
904 * up to date and ready for cow
906 if ((pos & (PAGE_CACHE_SIZE - 1))) {
907 pinned[0] = grab_cache_page(inode->i_mapping, first_index);
908 if (!PageUptodate(pinned[0])) {
909 ret = btrfs_readpage(NULL, pinned[0]);
911 wait_on_page_locked(pinned[0]);
913 unlock_page(pinned[0]);
916 if ((pos + count) & (PAGE_CACHE_SIZE - 1)) {
917 pinned[1] = grab_cache_page(inode->i_mapping, last_index);
918 if (!PageUptodate(pinned[1])) {
919 ret = btrfs_readpage(NULL, pinned[1]);
921 wait_on_page_locked(pinned[1]);
923 unlock_page(pinned[1]);
928 size_t offset = pos & (PAGE_CACHE_SIZE - 1);
929 size_t write_bytes = min(count, nrptrs *
930 (size_t)PAGE_CACHE_SIZE -
932 size_t num_pages = (write_bytes + PAGE_CACHE_SIZE - 1) >>
935 WARN_ON(num_pages > nrptrs);
936 memset(pages, 0, sizeof(pages));
938 ret = btrfs_check_free_space(root, write_bytes, 0);
942 ret = prepare_pages(root, file, pages, num_pages,
943 pos, first_index, last_index,
948 ret = btrfs_copy_from_user(pos, num_pages,
949 write_bytes, pages, buf);
951 btrfs_drop_pages(pages, num_pages);
955 ret = dirty_and_release_pages(NULL, root, file, pages,
956 num_pages, pos, write_bytes);
957 btrfs_drop_pages(pages, num_pages);
962 count -= write_bytes;
964 num_written += write_bytes;
966 balance_dirty_pages_ratelimited_nr(inode->i_mapping, num_pages);
967 if (num_pages < (root->leafsize >> PAGE_CACHE_SHIFT) + 1)
968 btrfs_btree_balance_dirty(root, 1);
969 btrfs_throttle(root);
973 mutex_unlock(&inode->i_mutex);
978 page_cache_release(pinned[0]);
980 page_cache_release(pinned[1]);
983 if (num_written > 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) {
984 struct btrfs_trans_handle *trans;
986 err = btrfs_fdatawrite_range(inode->i_mapping, start_pos,
987 start_pos + num_written -1,
992 err = btrfs_wait_on_page_writeback_range(inode->i_mapping,
993 start_pos, start_pos + num_written - 1);
997 trans = btrfs_start_transaction(root, 1);
998 ret = btrfs_log_dentry_safe(trans, root, file->f_dentry);
1000 btrfs_sync_log(trans, root);
1001 btrfs_end_transaction(trans, root);
1003 btrfs_commit_transaction(trans, root);
1005 } else if (num_written > 0 && (file->f_flags & O_DIRECT)) {
1006 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)
1007 do_sync_file_range(file, start_pos,
1008 start_pos + num_written - 1,
1009 SYNC_FILE_RANGE_WRITE |
1010 SYNC_FILE_RANGE_WAIT_AFTER);
1012 do_sync_mapping_range(inode->i_mapping, start_pos,
1013 start_pos + num_written - 1,
1014 SYNC_FILE_RANGE_WRITE |
1015 SYNC_FILE_RANGE_WAIT_AFTER);
1017 invalidate_mapping_pages(inode->i_mapping,
1018 start_pos >> PAGE_CACHE_SHIFT,
1019 (start_pos + num_written - 1) >> PAGE_CACHE_SHIFT);
1021 current->backing_dev_info = NULL;
1022 return num_written ? num_written : err;
1025 int btrfs_release_file(struct inode * inode, struct file * filp)
1027 if (filp->private_data)
1028 btrfs_ioctl_trans_end(filp);
1032 int btrfs_sync_file(struct file *file, struct dentry *dentry, int datasync)
1034 struct inode *inode = dentry->d_inode;
1035 struct btrfs_root *root = BTRFS_I(inode)->root;
1037 struct btrfs_trans_handle *trans;
1040 * check the transaction that last modified this inode
1041 * and see if its already been committed
1043 if (!BTRFS_I(inode)->last_trans)
1046 mutex_lock(&root->fs_info->trans_mutex);
1047 if (BTRFS_I(inode)->last_trans <=
1048 root->fs_info->last_trans_committed) {
1049 BTRFS_I(inode)->last_trans = 0;
1050 mutex_unlock(&root->fs_info->trans_mutex);
1053 mutex_unlock(&root->fs_info->trans_mutex);
1055 root->fs_info->tree_log_batch++;
1056 filemap_fdatawait(inode->i_mapping);
1057 root->fs_info->tree_log_batch++;
1060 * ok we haven't committed the transaction yet, lets do a commit
1062 if (file->private_data)
1063 btrfs_ioctl_trans_end(file);
1065 trans = btrfs_start_transaction(root, 1);
1071 ret = btrfs_log_dentry_safe(trans, root, file->f_dentry);
1076 /* we've logged all the items and now have a consistent
1077 * version of the file in the log. It is possible that
1078 * someone will come in and modify the file, but that's
1079 * fine because the log is consistent on disk, and we
1080 * have references to all of the file's extents
1082 * It is possible that someone will come in and log the
1083 * file again, but that will end up using the synchronization
1084 * inside btrfs_sync_log to keep things safe.
1086 mutex_unlock(&file->f_dentry->d_inode->i_mutex);
1089 ret = btrfs_commit_transaction(trans, root);
1091 btrfs_sync_log(trans, root);
1092 ret = btrfs_end_transaction(trans, root);
1094 mutex_lock(&file->f_dentry->d_inode->i_mutex);
1096 return ret > 0 ? EIO : ret;
1099 static struct vm_operations_struct btrfs_file_vm_ops = {
1100 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
1101 .nopage = filemap_nopage,
1102 .populate = filemap_populate,
1104 .fault = filemap_fault,
1106 .page_mkwrite = btrfs_page_mkwrite,
1109 static int btrfs_file_mmap(struct file *filp, struct vm_area_struct *vma)
1111 vma->vm_ops = &btrfs_file_vm_ops;
1112 file_accessed(filp);
1116 struct file_operations btrfs_file_operations = {
1117 .llseek = generic_file_llseek,
1118 .read = do_sync_read,
1119 .aio_read = generic_file_aio_read,
1120 .splice_read = generic_file_splice_read,
1121 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18)
1122 .sendfile = generic_file_sendfile,
1124 .write = btrfs_file_write,
1125 .mmap = btrfs_file_mmap,
1126 .open = generic_file_open,
1127 .release = btrfs_release_file,
1128 .fsync = btrfs_sync_file,
1129 .unlocked_ioctl = btrfs_ioctl,
1130 #ifdef CONFIG_COMPAT
1131 .compat_ioctl = btrfs_ioctl,