2 * Copyright (C) 2012 Alexander Block. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/bsearch.h>
21 #include <linux/file.h>
22 #include <linux/sort.h>
23 #include <linux/mount.h>
24 #include <linux/xattr.h>
25 #include <linux/posix_acl_xattr.h>
26 #include <linux/radix-tree.h>
27 #include <linux/crc32c.h>
28 #include <linux/vmalloc.h>
29 #include <linux/string.h>
35 #include "btrfs_inode.h"
36 #include "transaction.h"
38 static int g_verbose = 0;
40 #define verbose_printk(...) if (g_verbose) printk(__VA_ARGS__)
43 * A fs_path is a helper to dynamically build path names with unknown size.
44 * It reallocates the internal buffer on demand.
45 * It allows fast adding of path elements on the right side (normal path) and
46 * fast adding to the left side (reversed path). A reversed path can also be
47 * unreversed if needed.
58 unsigned int reversed:1;
59 unsigned int virtual_mem:1;
65 #define FS_PATH_INLINE_SIZE \
66 (sizeof(struct fs_path) - offsetof(struct fs_path, inline_buf))
69 /* reused for each extent */
71 struct btrfs_root *root;
78 #define SEND_CTX_MAX_NAME_CACHE_SIZE 128
79 #define SEND_CTX_NAME_CACHE_CLEAN_SIZE (SEND_CTX_MAX_NAME_CACHE_SIZE * 2)
82 struct file *send_filp;
88 u64 cmd_send_size[BTRFS_SEND_C_MAX + 1];
89 u64 flags; /* 'flags' member of btrfs_ioctl_send_args is u64 */
91 struct btrfs_root *send_root;
92 struct btrfs_root *parent_root;
93 struct clone_root *clone_roots;
96 /* current state of the compare_tree call */
97 struct btrfs_path *left_path;
98 struct btrfs_path *right_path;
99 struct btrfs_key *cmp_key;
102 * infos of the currently processed inode. In case of deleted inodes,
103 * these are the values from the deleted inode.
108 int cur_inode_new_gen;
109 int cur_inode_deleted;
112 u64 cur_inode_last_extent;
116 struct list_head new_refs;
117 struct list_head deleted_refs;
119 struct radix_tree_root name_cache;
120 struct list_head name_cache_list;
126 struct name_cache_entry {
127 struct list_head list;
129 * radix_tree has only 32bit entries but we need to handle 64bit inums.
130 * We use the lower 32bit of the 64bit inum to store it in the tree. If
131 * more then one inum would fall into the same entry, we use radix_list
132 * to store the additional entries. radix_list is also used to store
133 * entries where two entries have the same inum but different
136 struct list_head radix_list;
142 int need_later_update;
147 static int need_send_hole(struct send_ctx *sctx)
149 return (sctx->parent_root && !sctx->cur_inode_new &&
150 !sctx->cur_inode_new_gen && !sctx->cur_inode_deleted &&
151 S_ISREG(sctx->cur_inode_mode));
154 static void fs_path_reset(struct fs_path *p)
157 p->start = p->buf + p->buf_len - 1;
167 static struct fs_path *fs_path_alloc(void)
171 p = kmalloc(sizeof(*p), GFP_NOFS);
176 p->buf = p->inline_buf;
177 p->buf_len = FS_PATH_INLINE_SIZE;
182 static struct fs_path *fs_path_alloc_reversed(void)
194 static void fs_path_free(struct fs_path *p)
198 if (p->buf != p->inline_buf) {
207 static int fs_path_len(struct fs_path *p)
209 return p->end - p->start;
212 static int fs_path_ensure_buf(struct fs_path *p, int len)
220 if (p->buf_len >= len)
223 path_len = p->end - p->start;
224 old_buf_len = p->buf_len;
225 len = PAGE_ALIGN(len);
227 if (p->buf == p->inline_buf) {
228 tmp_buf = kmalloc(len, GFP_NOFS | __GFP_NOWARN);
230 tmp_buf = vmalloc(len);
235 memcpy(tmp_buf, p->buf, p->buf_len);
239 if (p->virtual_mem) {
240 tmp_buf = vmalloc(len);
243 memcpy(tmp_buf, p->buf, p->buf_len);
246 tmp_buf = krealloc(p->buf, len, GFP_NOFS);
248 tmp_buf = vmalloc(len);
251 memcpy(tmp_buf, p->buf, p->buf_len);
260 tmp_buf = p->buf + old_buf_len - path_len - 1;
261 p->end = p->buf + p->buf_len - 1;
262 p->start = p->end - path_len;
263 memmove(p->start, tmp_buf, path_len + 1);
266 p->end = p->start + path_len;
271 static int fs_path_prepare_for_add(struct fs_path *p, int name_len)
276 new_len = p->end - p->start + name_len;
277 if (p->start != p->end)
279 ret = fs_path_ensure_buf(p, new_len);
284 if (p->start != p->end)
286 p->start -= name_len;
287 p->prepared = p->start;
289 if (p->start != p->end)
291 p->prepared = p->end;
300 static int fs_path_add(struct fs_path *p, const char *name, int name_len)
304 ret = fs_path_prepare_for_add(p, name_len);
307 memcpy(p->prepared, name, name_len);
314 static int fs_path_add_path(struct fs_path *p, struct fs_path *p2)
318 ret = fs_path_prepare_for_add(p, p2->end - p2->start);
321 memcpy(p->prepared, p2->start, p2->end - p2->start);
328 static int fs_path_add_from_extent_buffer(struct fs_path *p,
329 struct extent_buffer *eb,
330 unsigned long off, int len)
334 ret = fs_path_prepare_for_add(p, len);
338 read_extent_buffer(eb, p->prepared, off, len);
345 static int fs_path_copy(struct fs_path *p, struct fs_path *from)
349 p->reversed = from->reversed;
352 ret = fs_path_add_path(p, from);
358 static void fs_path_unreverse(struct fs_path *p)
367 len = p->end - p->start;
369 p->end = p->start + len;
370 memmove(p->start, tmp, len + 1);
374 static struct btrfs_path *alloc_path_for_send(void)
376 struct btrfs_path *path;
378 path = btrfs_alloc_path();
381 path->search_commit_root = 1;
382 path->skip_locking = 1;
386 static int write_buf(struct file *filp, const void *buf, u32 len, loff_t *off)
396 ret = vfs_write(filp, (char *)buf + pos, len - pos, off);
397 /* TODO handle that correctly */
398 /*if (ret == -ERESTARTSYS) {
417 static int tlv_put(struct send_ctx *sctx, u16 attr, const void *data, int len)
419 struct btrfs_tlv_header *hdr;
420 int total_len = sizeof(*hdr) + len;
421 int left = sctx->send_max_size - sctx->send_size;
423 if (unlikely(left < total_len))
426 hdr = (struct btrfs_tlv_header *) (sctx->send_buf + sctx->send_size);
427 hdr->tlv_type = cpu_to_le16(attr);
428 hdr->tlv_len = cpu_to_le16(len);
429 memcpy(hdr + 1, data, len);
430 sctx->send_size += total_len;
435 #define TLV_PUT_DEFINE_INT(bits) \
436 static int tlv_put_u##bits(struct send_ctx *sctx, \
437 u##bits attr, u##bits value) \
439 __le##bits __tmp = cpu_to_le##bits(value); \
440 return tlv_put(sctx, attr, &__tmp, sizeof(__tmp)); \
443 TLV_PUT_DEFINE_INT(64)
445 static int tlv_put_string(struct send_ctx *sctx, u16 attr,
446 const char *str, int len)
450 return tlv_put(sctx, attr, str, len);
453 static int tlv_put_uuid(struct send_ctx *sctx, u16 attr,
456 return tlv_put(sctx, attr, uuid, BTRFS_UUID_SIZE);
459 static int tlv_put_btrfs_timespec(struct send_ctx *sctx, u16 attr,
460 struct extent_buffer *eb,
461 struct btrfs_timespec *ts)
463 struct btrfs_timespec bts;
464 read_extent_buffer(eb, &bts, (unsigned long)ts, sizeof(bts));
465 return tlv_put(sctx, attr, &bts, sizeof(bts));
469 #define TLV_PUT(sctx, attrtype, attrlen, data) \
471 ret = tlv_put(sctx, attrtype, attrlen, data); \
473 goto tlv_put_failure; \
476 #define TLV_PUT_INT(sctx, attrtype, bits, value) \
478 ret = tlv_put_u##bits(sctx, attrtype, value); \
480 goto tlv_put_failure; \
483 #define TLV_PUT_U8(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 8, data)
484 #define TLV_PUT_U16(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 16, data)
485 #define TLV_PUT_U32(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 32, data)
486 #define TLV_PUT_U64(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 64, data)
487 #define TLV_PUT_STRING(sctx, attrtype, str, len) \
489 ret = tlv_put_string(sctx, attrtype, str, len); \
491 goto tlv_put_failure; \
493 #define TLV_PUT_PATH(sctx, attrtype, p) \
495 ret = tlv_put_string(sctx, attrtype, p->start, \
496 p->end - p->start); \
498 goto tlv_put_failure; \
500 #define TLV_PUT_UUID(sctx, attrtype, uuid) \
502 ret = tlv_put_uuid(sctx, attrtype, uuid); \
504 goto tlv_put_failure; \
506 #define TLV_PUT_BTRFS_TIMESPEC(sctx, attrtype, eb, ts) \
508 ret = tlv_put_btrfs_timespec(sctx, attrtype, eb, ts); \
510 goto tlv_put_failure; \
513 static int send_header(struct send_ctx *sctx)
515 struct btrfs_stream_header hdr;
517 strcpy(hdr.magic, BTRFS_SEND_STREAM_MAGIC);
518 hdr.version = cpu_to_le32(BTRFS_SEND_STREAM_VERSION);
520 return write_buf(sctx->send_filp, &hdr, sizeof(hdr),
525 * For each command/item we want to send to userspace, we call this function.
527 static int begin_cmd(struct send_ctx *sctx, int cmd)
529 struct btrfs_cmd_header *hdr;
531 if (WARN_ON(!sctx->send_buf))
534 BUG_ON(sctx->send_size);
536 sctx->send_size += sizeof(*hdr);
537 hdr = (struct btrfs_cmd_header *)sctx->send_buf;
538 hdr->cmd = cpu_to_le16(cmd);
543 static int send_cmd(struct send_ctx *sctx)
546 struct btrfs_cmd_header *hdr;
549 hdr = (struct btrfs_cmd_header *)sctx->send_buf;
550 hdr->len = cpu_to_le32(sctx->send_size - sizeof(*hdr));
553 crc = crc32c(0, (unsigned char *)sctx->send_buf, sctx->send_size);
554 hdr->crc = cpu_to_le32(crc);
556 ret = write_buf(sctx->send_filp, sctx->send_buf, sctx->send_size,
559 sctx->total_send_size += sctx->send_size;
560 sctx->cmd_send_size[le16_to_cpu(hdr->cmd)] += sctx->send_size;
567 * Sends a move instruction to user space
569 static int send_rename(struct send_ctx *sctx,
570 struct fs_path *from, struct fs_path *to)
574 verbose_printk("btrfs: send_rename %s -> %s\n", from->start, to->start);
576 ret = begin_cmd(sctx, BTRFS_SEND_C_RENAME);
580 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, from);
581 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_TO, to);
583 ret = send_cmd(sctx);
591 * Sends a link instruction to user space
593 static int send_link(struct send_ctx *sctx,
594 struct fs_path *path, struct fs_path *lnk)
598 verbose_printk("btrfs: send_link %s -> %s\n", path->start, lnk->start);
600 ret = begin_cmd(sctx, BTRFS_SEND_C_LINK);
604 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
605 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_LINK, lnk);
607 ret = send_cmd(sctx);
615 * Sends an unlink instruction to user space
617 static int send_unlink(struct send_ctx *sctx, struct fs_path *path)
621 verbose_printk("btrfs: send_unlink %s\n", path->start);
623 ret = begin_cmd(sctx, BTRFS_SEND_C_UNLINK);
627 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
629 ret = send_cmd(sctx);
637 * Sends a rmdir instruction to user space
639 static int send_rmdir(struct send_ctx *sctx, struct fs_path *path)
643 verbose_printk("btrfs: send_rmdir %s\n", path->start);
645 ret = begin_cmd(sctx, BTRFS_SEND_C_RMDIR);
649 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
651 ret = send_cmd(sctx);
659 * Helper function to retrieve some fields from an inode item.
661 static int get_inode_info(struct btrfs_root *root,
662 u64 ino, u64 *size, u64 *gen,
663 u64 *mode, u64 *uid, u64 *gid,
667 struct btrfs_inode_item *ii;
668 struct btrfs_key key;
669 struct btrfs_path *path;
671 path = alloc_path_for_send();
676 key.type = BTRFS_INODE_ITEM_KEY;
678 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
686 ii = btrfs_item_ptr(path->nodes[0], path->slots[0],
687 struct btrfs_inode_item);
689 *size = btrfs_inode_size(path->nodes[0], ii);
691 *gen = btrfs_inode_generation(path->nodes[0], ii);
693 *mode = btrfs_inode_mode(path->nodes[0], ii);
695 *uid = btrfs_inode_uid(path->nodes[0], ii);
697 *gid = btrfs_inode_gid(path->nodes[0], ii);
699 *rdev = btrfs_inode_rdev(path->nodes[0], ii);
702 btrfs_free_path(path);
706 typedef int (*iterate_inode_ref_t)(int num, u64 dir, int index,
711 * Helper function to iterate the entries in ONE btrfs_inode_ref or
712 * btrfs_inode_extref.
713 * The iterate callback may return a non zero value to stop iteration. This can
714 * be a negative value for error codes or 1 to simply stop it.
716 * path must point to the INODE_REF or INODE_EXTREF when called.
718 static int iterate_inode_ref(struct btrfs_root *root, struct btrfs_path *path,
719 struct btrfs_key *found_key, int resolve,
720 iterate_inode_ref_t iterate, void *ctx)
722 struct extent_buffer *eb = path->nodes[0];
723 struct btrfs_item *item;
724 struct btrfs_inode_ref *iref;
725 struct btrfs_inode_extref *extref;
726 struct btrfs_path *tmp_path;
730 int slot = path->slots[0];
737 unsigned long name_off;
738 unsigned long elem_size;
741 p = fs_path_alloc_reversed();
745 tmp_path = alloc_path_for_send();
752 if (found_key->type == BTRFS_INODE_REF_KEY) {
753 ptr = (unsigned long)btrfs_item_ptr(eb, slot,
754 struct btrfs_inode_ref);
755 item = btrfs_item_nr(slot);
756 total = btrfs_item_size(eb, item);
757 elem_size = sizeof(*iref);
759 ptr = btrfs_item_ptr_offset(eb, slot);
760 total = btrfs_item_size_nr(eb, slot);
761 elem_size = sizeof(*extref);
764 while (cur < total) {
767 if (found_key->type == BTRFS_INODE_REF_KEY) {
768 iref = (struct btrfs_inode_ref *)(ptr + cur);
769 name_len = btrfs_inode_ref_name_len(eb, iref);
770 name_off = (unsigned long)(iref + 1);
771 index = btrfs_inode_ref_index(eb, iref);
772 dir = found_key->offset;
774 extref = (struct btrfs_inode_extref *)(ptr + cur);
775 name_len = btrfs_inode_extref_name_len(eb, extref);
776 name_off = (unsigned long)&extref->name;
777 index = btrfs_inode_extref_index(eb, extref);
778 dir = btrfs_inode_extref_parent(eb, extref);
782 start = btrfs_ref_to_path(root, tmp_path, name_len,
786 ret = PTR_ERR(start);
789 if (start < p->buf) {
790 /* overflow , try again with larger buffer */
791 ret = fs_path_ensure_buf(p,
792 p->buf_len + p->buf - start);
795 start = btrfs_ref_to_path(root, tmp_path,
800 ret = PTR_ERR(start);
803 BUG_ON(start < p->buf);
807 ret = fs_path_add_from_extent_buffer(p, eb, name_off,
813 cur += elem_size + name_len;
814 ret = iterate(num, dir, index, p, ctx);
821 btrfs_free_path(tmp_path);
826 typedef int (*iterate_dir_item_t)(int num, struct btrfs_key *di_key,
827 const char *name, int name_len,
828 const char *data, int data_len,
832 * Helper function to iterate the entries in ONE btrfs_dir_item.
833 * The iterate callback may return a non zero value to stop iteration. This can
834 * be a negative value for error codes or 1 to simply stop it.
836 * path must point to the dir item when called.
838 static int iterate_dir_item(struct btrfs_root *root, struct btrfs_path *path,
839 struct btrfs_key *found_key,
840 iterate_dir_item_t iterate, void *ctx)
843 struct extent_buffer *eb;
844 struct btrfs_item *item;
845 struct btrfs_dir_item *di;
846 struct btrfs_key di_key;
861 buf = kmalloc(buf_len, GFP_NOFS);
868 slot = path->slots[0];
869 item = btrfs_item_nr(slot);
870 di = btrfs_item_ptr(eb, slot, struct btrfs_dir_item);
873 total = btrfs_item_size(eb, item);
876 while (cur < total) {
877 name_len = btrfs_dir_name_len(eb, di);
878 data_len = btrfs_dir_data_len(eb, di);
879 type = btrfs_dir_type(eb, di);
880 btrfs_dir_item_key_to_cpu(eb, di, &di_key);
882 if (name_len + data_len > buf_len) {
883 buf_len = PAGE_ALIGN(name_len + data_len);
885 buf2 = vmalloc(buf_len);
892 buf2 = krealloc(buf, buf_len, GFP_NOFS);
894 buf2 = vmalloc(buf_len);
908 read_extent_buffer(eb, buf, (unsigned long)(di + 1),
909 name_len + data_len);
911 len = sizeof(*di) + name_len + data_len;
912 di = (struct btrfs_dir_item *)((char *)di + len);
915 ret = iterate(num, &di_key, buf, name_len, buf + name_len,
916 data_len, type, ctx);
935 static int __copy_first_ref(int num, u64 dir, int index,
936 struct fs_path *p, void *ctx)
939 struct fs_path *pt = ctx;
941 ret = fs_path_copy(pt, p);
945 /* we want the first only */
950 * Retrieve the first path of an inode. If an inode has more then one
951 * ref/hardlink, this is ignored.
953 static int get_inode_path(struct btrfs_root *root,
954 u64 ino, struct fs_path *path)
957 struct btrfs_key key, found_key;
958 struct btrfs_path *p;
960 p = alloc_path_for_send();
967 key.type = BTRFS_INODE_REF_KEY;
970 ret = btrfs_search_slot_for_read(root, &key, p, 1, 0);
977 btrfs_item_key_to_cpu(p->nodes[0], &found_key, p->slots[0]);
978 if (found_key.objectid != ino ||
979 (found_key.type != BTRFS_INODE_REF_KEY &&
980 found_key.type != BTRFS_INODE_EXTREF_KEY)) {
985 ret = iterate_inode_ref(root, p, &found_key, 1,
986 __copy_first_ref, path);
997 struct send_ctx *sctx;
999 /* number of total found references */
1003 * used for clones found in send_root. clones found behind cur_objectid
1004 * and cur_offset are not considered as allowed clones.
1009 /* may be truncated in case it's the last extent in a file */
1012 /* Just to check for bugs in backref resolving */
1016 static int __clone_root_cmp_bsearch(const void *key, const void *elt)
1018 u64 root = (u64)(uintptr_t)key;
1019 struct clone_root *cr = (struct clone_root *)elt;
1021 if (root < cr->root->objectid)
1023 if (root > cr->root->objectid)
1028 static int __clone_root_cmp_sort(const void *e1, const void *e2)
1030 struct clone_root *cr1 = (struct clone_root *)e1;
1031 struct clone_root *cr2 = (struct clone_root *)e2;
1033 if (cr1->root->objectid < cr2->root->objectid)
1035 if (cr1->root->objectid > cr2->root->objectid)
1041 * Called for every backref that is found for the current extent.
1042 * Results are collected in sctx->clone_roots->ino/offset/found_refs
1044 static int __iterate_backrefs(u64 ino, u64 offset, u64 root, void *ctx_)
1046 struct backref_ctx *bctx = ctx_;
1047 struct clone_root *found;
1051 /* First check if the root is in the list of accepted clone sources */
1052 found = bsearch((void *)(uintptr_t)root, bctx->sctx->clone_roots,
1053 bctx->sctx->clone_roots_cnt,
1054 sizeof(struct clone_root),
1055 __clone_root_cmp_bsearch);
1059 if (found->root == bctx->sctx->send_root &&
1060 ino == bctx->cur_objectid &&
1061 offset == bctx->cur_offset) {
1062 bctx->found_itself = 1;
1066 * There are inodes that have extents that lie behind its i_size. Don't
1067 * accept clones from these extents.
1069 ret = get_inode_info(found->root, ino, &i_size, NULL, NULL, NULL, NULL,
1074 if (offset + bctx->extent_len > i_size)
1078 * Make sure we don't consider clones from send_root that are
1079 * behind the current inode/offset.
1081 if (found->root == bctx->sctx->send_root) {
1083 * TODO for the moment we don't accept clones from the inode
1084 * that is currently send. We may change this when
1085 * BTRFS_IOC_CLONE_RANGE supports cloning from and to the same
1088 if (ino >= bctx->cur_objectid)
1091 if (ino > bctx->cur_objectid)
1093 if (offset + bctx->extent_len > bctx->cur_offset)
1099 found->found_refs++;
1100 if (ino < found->ino) {
1102 found->offset = offset;
1103 } else if (found->ino == ino) {
1105 * same extent found more then once in the same file.
1107 if (found->offset > offset + bctx->extent_len)
1108 found->offset = offset;
1115 * Given an inode, offset and extent item, it finds a good clone for a clone
1116 * instruction. Returns -ENOENT when none could be found. The function makes
1117 * sure that the returned clone is usable at the point where sending is at the
1118 * moment. This means, that no clones are accepted which lie behind the current
1121 * path must point to the extent item when called.
1123 static int find_extent_clone(struct send_ctx *sctx,
1124 struct btrfs_path *path,
1125 u64 ino, u64 data_offset,
1127 struct clone_root **found)
1134 u64 extent_item_pos;
1136 struct btrfs_file_extent_item *fi;
1137 struct extent_buffer *eb = path->nodes[0];
1138 struct backref_ctx *backref_ctx = NULL;
1139 struct clone_root *cur_clone_root;
1140 struct btrfs_key found_key;
1141 struct btrfs_path *tmp_path;
1145 tmp_path = alloc_path_for_send();
1149 backref_ctx = kmalloc(sizeof(*backref_ctx), GFP_NOFS);
1155 if (data_offset >= ino_size) {
1157 * There may be extents that lie behind the file's size.
1158 * I at least had this in combination with snapshotting while
1159 * writing large files.
1165 fi = btrfs_item_ptr(eb, path->slots[0],
1166 struct btrfs_file_extent_item);
1167 extent_type = btrfs_file_extent_type(eb, fi);
1168 if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
1172 compressed = btrfs_file_extent_compression(eb, fi);
1174 num_bytes = btrfs_file_extent_num_bytes(eb, fi);
1175 disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
1176 if (disk_byte == 0) {
1180 logical = disk_byte + btrfs_file_extent_offset(eb, fi);
1182 ret = extent_from_logical(sctx->send_root->fs_info, disk_byte, tmp_path,
1183 &found_key, &flags);
1184 btrfs_release_path(tmp_path);
1188 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1194 * Setup the clone roots.
1196 for (i = 0; i < sctx->clone_roots_cnt; i++) {
1197 cur_clone_root = sctx->clone_roots + i;
1198 cur_clone_root->ino = (u64)-1;
1199 cur_clone_root->offset = 0;
1200 cur_clone_root->found_refs = 0;
1203 backref_ctx->sctx = sctx;
1204 backref_ctx->found = 0;
1205 backref_ctx->cur_objectid = ino;
1206 backref_ctx->cur_offset = data_offset;
1207 backref_ctx->found_itself = 0;
1208 backref_ctx->extent_len = num_bytes;
1211 * The last extent of a file may be too large due to page alignment.
1212 * We need to adjust extent_len in this case so that the checks in
1213 * __iterate_backrefs work.
1215 if (data_offset + num_bytes >= ino_size)
1216 backref_ctx->extent_len = ino_size - data_offset;
1219 * Now collect all backrefs.
1221 if (compressed == BTRFS_COMPRESS_NONE)
1222 extent_item_pos = logical - found_key.objectid;
1224 extent_item_pos = 0;
1226 extent_item_pos = logical - found_key.objectid;
1227 ret = iterate_extent_inodes(sctx->send_root->fs_info,
1228 found_key.objectid, extent_item_pos, 1,
1229 __iterate_backrefs, backref_ctx);
1234 if (!backref_ctx->found_itself) {
1235 /* found a bug in backref code? */
1237 btrfs_err(sctx->send_root->fs_info, "did not find backref in "
1238 "send_root. inode=%llu, offset=%llu, "
1239 "disk_byte=%llu found extent=%llu\n",
1240 ino, data_offset, disk_byte, found_key.objectid);
1244 verbose_printk(KERN_DEBUG "btrfs: find_extent_clone: data_offset=%llu, "
1246 "num_bytes=%llu, logical=%llu\n",
1247 data_offset, ino, num_bytes, logical);
1249 if (!backref_ctx->found)
1250 verbose_printk("btrfs: no clones found\n");
1252 cur_clone_root = NULL;
1253 for (i = 0; i < sctx->clone_roots_cnt; i++) {
1254 if (sctx->clone_roots[i].found_refs) {
1255 if (!cur_clone_root)
1256 cur_clone_root = sctx->clone_roots + i;
1257 else if (sctx->clone_roots[i].root == sctx->send_root)
1258 /* prefer clones from send_root over others */
1259 cur_clone_root = sctx->clone_roots + i;
1264 if (cur_clone_root) {
1265 *found = cur_clone_root;
1272 btrfs_free_path(tmp_path);
1277 static int read_symlink(struct btrfs_root *root,
1279 struct fs_path *dest)
1282 struct btrfs_path *path;
1283 struct btrfs_key key;
1284 struct btrfs_file_extent_item *ei;
1290 path = alloc_path_for_send();
1295 key.type = BTRFS_EXTENT_DATA_KEY;
1297 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1302 ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
1303 struct btrfs_file_extent_item);
1304 type = btrfs_file_extent_type(path->nodes[0], ei);
1305 compression = btrfs_file_extent_compression(path->nodes[0], ei);
1306 BUG_ON(type != BTRFS_FILE_EXTENT_INLINE);
1307 BUG_ON(compression);
1309 off = btrfs_file_extent_inline_start(ei);
1310 len = btrfs_file_extent_inline_len(path->nodes[0], ei);
1312 ret = fs_path_add_from_extent_buffer(dest, path->nodes[0], off, len);
1315 btrfs_free_path(path);
1320 * Helper function to generate a file name that is unique in the root of
1321 * send_root and parent_root. This is used to generate names for orphan inodes.
1323 static int gen_unique_name(struct send_ctx *sctx,
1325 struct fs_path *dest)
1328 struct btrfs_path *path;
1329 struct btrfs_dir_item *di;
1334 path = alloc_path_for_send();
1339 len = snprintf(tmp, sizeof(tmp) - 1, "o%llu-%llu-%llu",
1341 if (len >= sizeof(tmp)) {
1342 /* should really not happen */
1347 di = btrfs_lookup_dir_item(NULL, sctx->send_root,
1348 path, BTRFS_FIRST_FREE_OBJECTID,
1349 tmp, strlen(tmp), 0);
1350 btrfs_release_path(path);
1356 /* not unique, try again */
1361 if (!sctx->parent_root) {
1367 di = btrfs_lookup_dir_item(NULL, sctx->parent_root,
1368 path, BTRFS_FIRST_FREE_OBJECTID,
1369 tmp, strlen(tmp), 0);
1370 btrfs_release_path(path);
1376 /* not unique, try again */
1384 ret = fs_path_add(dest, tmp, strlen(tmp));
1387 btrfs_free_path(path);
1392 inode_state_no_change,
1393 inode_state_will_create,
1394 inode_state_did_create,
1395 inode_state_will_delete,
1396 inode_state_did_delete,
1399 static int get_cur_inode_state(struct send_ctx *sctx, u64 ino, u64 gen)
1407 ret = get_inode_info(sctx->send_root, ino, NULL, &left_gen, NULL, NULL,
1409 if (ret < 0 && ret != -ENOENT)
1413 if (!sctx->parent_root) {
1414 right_ret = -ENOENT;
1416 ret = get_inode_info(sctx->parent_root, ino, NULL, &right_gen,
1417 NULL, NULL, NULL, NULL);
1418 if (ret < 0 && ret != -ENOENT)
1423 if (!left_ret && !right_ret) {
1424 if (left_gen == gen && right_gen == gen) {
1425 ret = inode_state_no_change;
1426 } else if (left_gen == gen) {
1427 if (ino < sctx->send_progress)
1428 ret = inode_state_did_create;
1430 ret = inode_state_will_create;
1431 } else if (right_gen == gen) {
1432 if (ino < sctx->send_progress)
1433 ret = inode_state_did_delete;
1435 ret = inode_state_will_delete;
1439 } else if (!left_ret) {
1440 if (left_gen == gen) {
1441 if (ino < sctx->send_progress)
1442 ret = inode_state_did_create;
1444 ret = inode_state_will_create;
1448 } else if (!right_ret) {
1449 if (right_gen == gen) {
1450 if (ino < sctx->send_progress)
1451 ret = inode_state_did_delete;
1453 ret = inode_state_will_delete;
1465 static int is_inode_existent(struct send_ctx *sctx, u64 ino, u64 gen)
1469 ret = get_cur_inode_state(sctx, ino, gen);
1473 if (ret == inode_state_no_change ||
1474 ret == inode_state_did_create ||
1475 ret == inode_state_will_delete)
1485 * Helper function to lookup a dir item in a dir.
1487 static int lookup_dir_item_inode(struct btrfs_root *root,
1488 u64 dir, const char *name, int name_len,
1493 struct btrfs_dir_item *di;
1494 struct btrfs_key key;
1495 struct btrfs_path *path;
1497 path = alloc_path_for_send();
1501 di = btrfs_lookup_dir_item(NULL, root, path,
1502 dir, name, name_len, 0);
1511 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &key);
1512 *found_inode = key.objectid;
1513 *found_type = btrfs_dir_type(path->nodes[0], di);
1516 btrfs_free_path(path);
1521 * Looks up the first btrfs_inode_ref of a given ino. It returns the parent dir,
1522 * generation of the parent dir and the name of the dir entry.
1524 static int get_first_ref(struct btrfs_root *root, u64 ino,
1525 u64 *dir, u64 *dir_gen, struct fs_path *name)
1528 struct btrfs_key key;
1529 struct btrfs_key found_key;
1530 struct btrfs_path *path;
1534 path = alloc_path_for_send();
1539 key.type = BTRFS_INODE_REF_KEY;
1542 ret = btrfs_search_slot_for_read(root, &key, path, 1, 0);
1546 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1548 if (ret || found_key.objectid != ino ||
1549 (found_key.type != BTRFS_INODE_REF_KEY &&
1550 found_key.type != BTRFS_INODE_EXTREF_KEY)) {
1555 if (key.type == BTRFS_INODE_REF_KEY) {
1556 struct btrfs_inode_ref *iref;
1557 iref = btrfs_item_ptr(path->nodes[0], path->slots[0],
1558 struct btrfs_inode_ref);
1559 len = btrfs_inode_ref_name_len(path->nodes[0], iref);
1560 ret = fs_path_add_from_extent_buffer(name, path->nodes[0],
1561 (unsigned long)(iref + 1),
1563 parent_dir = found_key.offset;
1565 struct btrfs_inode_extref *extref;
1566 extref = btrfs_item_ptr(path->nodes[0], path->slots[0],
1567 struct btrfs_inode_extref);
1568 len = btrfs_inode_extref_name_len(path->nodes[0], extref);
1569 ret = fs_path_add_from_extent_buffer(name, path->nodes[0],
1570 (unsigned long)&extref->name, len);
1571 parent_dir = btrfs_inode_extref_parent(path->nodes[0], extref);
1575 btrfs_release_path(path);
1577 ret = get_inode_info(root, parent_dir, NULL, dir_gen, NULL, NULL,
1585 btrfs_free_path(path);
1589 static int is_first_ref(struct btrfs_root *root,
1591 const char *name, int name_len)
1594 struct fs_path *tmp_name;
1598 tmp_name = fs_path_alloc();
1602 ret = get_first_ref(root, ino, &tmp_dir, &tmp_dir_gen, tmp_name);
1606 if (dir != tmp_dir || name_len != fs_path_len(tmp_name)) {
1611 ret = !memcmp(tmp_name->start, name, name_len);
1614 fs_path_free(tmp_name);
1619 * Used by process_recorded_refs to determine if a new ref would overwrite an
1620 * already existing ref. In case it detects an overwrite, it returns the
1621 * inode/gen in who_ino/who_gen.
1622 * When an overwrite is detected, process_recorded_refs does proper orphanizing
1623 * to make sure later references to the overwritten inode are possible.
1624 * Orphanizing is however only required for the first ref of an inode.
1625 * process_recorded_refs does an additional is_first_ref check to see if
1626 * orphanizing is really required.
1628 static int will_overwrite_ref(struct send_ctx *sctx, u64 dir, u64 dir_gen,
1629 const char *name, int name_len,
1630 u64 *who_ino, u64 *who_gen)
1634 u64 other_inode = 0;
1637 if (!sctx->parent_root)
1640 ret = is_inode_existent(sctx, dir, dir_gen);
1645 * If we have a parent root we need to verify that the parent dir was
1646 * not delted and then re-created, if it was then we have no overwrite
1647 * and we can just unlink this entry.
1649 if (sctx->parent_root) {
1650 ret = get_inode_info(sctx->parent_root, dir, NULL, &gen, NULL,
1652 if (ret < 0 && ret != -ENOENT)
1662 ret = lookup_dir_item_inode(sctx->parent_root, dir, name, name_len,
1663 &other_inode, &other_type);
1664 if (ret < 0 && ret != -ENOENT)
1672 * Check if the overwritten ref was already processed. If yes, the ref
1673 * was already unlinked/moved, so we can safely assume that we will not
1674 * overwrite anything at this point in time.
1676 if (other_inode > sctx->send_progress) {
1677 ret = get_inode_info(sctx->parent_root, other_inode, NULL,
1678 who_gen, NULL, NULL, NULL, NULL);
1683 *who_ino = other_inode;
1693 * Checks if the ref was overwritten by an already processed inode. This is
1694 * used by __get_cur_name_and_parent to find out if the ref was orphanized and
1695 * thus the orphan name needs be used.
1696 * process_recorded_refs also uses it to avoid unlinking of refs that were
1699 static int did_overwrite_ref(struct send_ctx *sctx,
1700 u64 dir, u64 dir_gen,
1701 u64 ino, u64 ino_gen,
1702 const char *name, int name_len)
1709 if (!sctx->parent_root)
1712 ret = is_inode_existent(sctx, dir, dir_gen);
1716 /* check if the ref was overwritten by another ref */
1717 ret = lookup_dir_item_inode(sctx->send_root, dir, name, name_len,
1718 &ow_inode, &other_type);
1719 if (ret < 0 && ret != -ENOENT)
1722 /* was never and will never be overwritten */
1727 ret = get_inode_info(sctx->send_root, ow_inode, NULL, &gen, NULL, NULL,
1732 if (ow_inode == ino && gen == ino_gen) {
1737 /* we know that it is or will be overwritten. check this now */
1738 if (ow_inode < sctx->send_progress)
1748 * Same as did_overwrite_ref, but also checks if it is the first ref of an inode
1749 * that got overwritten. This is used by process_recorded_refs to determine
1750 * if it has to use the path as returned by get_cur_path or the orphan name.
1752 static int did_overwrite_first_ref(struct send_ctx *sctx, u64 ino, u64 gen)
1755 struct fs_path *name = NULL;
1759 if (!sctx->parent_root)
1762 name = fs_path_alloc();
1766 ret = get_first_ref(sctx->parent_root, ino, &dir, &dir_gen, name);
1770 ret = did_overwrite_ref(sctx, dir, dir_gen, ino, gen,
1771 name->start, fs_path_len(name));
1779 * Insert a name cache entry. On 32bit kernels the radix tree index is 32bit,
1780 * so we need to do some special handling in case we have clashes. This function
1781 * takes care of this with the help of name_cache_entry::radix_list.
1782 * In case of error, nce is kfreed.
1784 static int name_cache_insert(struct send_ctx *sctx,
1785 struct name_cache_entry *nce)
1788 struct list_head *nce_head;
1790 nce_head = radix_tree_lookup(&sctx->name_cache,
1791 (unsigned long)nce->ino);
1793 nce_head = kmalloc(sizeof(*nce_head), GFP_NOFS);
1798 INIT_LIST_HEAD(nce_head);
1800 ret = radix_tree_insert(&sctx->name_cache, nce->ino, nce_head);
1807 list_add_tail(&nce->radix_list, nce_head);
1808 list_add_tail(&nce->list, &sctx->name_cache_list);
1809 sctx->name_cache_size++;
1814 static void name_cache_delete(struct send_ctx *sctx,
1815 struct name_cache_entry *nce)
1817 struct list_head *nce_head;
1819 nce_head = radix_tree_lookup(&sctx->name_cache,
1820 (unsigned long)nce->ino);
1823 list_del(&nce->radix_list);
1824 list_del(&nce->list);
1825 sctx->name_cache_size--;
1827 if (list_empty(nce_head)) {
1828 radix_tree_delete(&sctx->name_cache, (unsigned long)nce->ino);
1833 static struct name_cache_entry *name_cache_search(struct send_ctx *sctx,
1836 struct list_head *nce_head;
1837 struct name_cache_entry *cur;
1839 nce_head = radix_tree_lookup(&sctx->name_cache, (unsigned long)ino);
1843 list_for_each_entry(cur, nce_head, radix_list) {
1844 if (cur->ino == ino && cur->gen == gen)
1851 * Removes the entry from the list and adds it back to the end. This marks the
1852 * entry as recently used so that name_cache_clean_unused does not remove it.
1854 static void name_cache_used(struct send_ctx *sctx, struct name_cache_entry *nce)
1856 list_del(&nce->list);
1857 list_add_tail(&nce->list, &sctx->name_cache_list);
1861 * Remove some entries from the beginning of name_cache_list.
1863 static void name_cache_clean_unused(struct send_ctx *sctx)
1865 struct name_cache_entry *nce;
1867 if (sctx->name_cache_size < SEND_CTX_NAME_CACHE_CLEAN_SIZE)
1870 while (sctx->name_cache_size > SEND_CTX_MAX_NAME_CACHE_SIZE) {
1871 nce = list_entry(sctx->name_cache_list.next,
1872 struct name_cache_entry, list);
1873 name_cache_delete(sctx, nce);
1878 static void name_cache_free(struct send_ctx *sctx)
1880 struct name_cache_entry *nce;
1882 while (!list_empty(&sctx->name_cache_list)) {
1883 nce = list_entry(sctx->name_cache_list.next,
1884 struct name_cache_entry, list);
1885 name_cache_delete(sctx, nce);
1891 * Used by get_cur_path for each ref up to the root.
1892 * Returns 0 if it succeeded.
1893 * Returns 1 if the inode is not existent or got overwritten. In that case, the
1894 * name is an orphan name. This instructs get_cur_path to stop iterating. If 1
1895 * is returned, parent_ino/parent_gen are not guaranteed to be valid.
1896 * Returns <0 in case of error.
1898 static int __get_cur_name_and_parent(struct send_ctx *sctx,
1902 struct fs_path *dest)
1906 struct btrfs_path *path = NULL;
1907 struct name_cache_entry *nce = NULL;
1910 * First check if we already did a call to this function with the same
1911 * ino/gen. If yes, check if the cache entry is still up-to-date. If yes
1912 * return the cached result.
1914 nce = name_cache_search(sctx, ino, gen);
1916 if (ino < sctx->send_progress && nce->need_later_update) {
1917 name_cache_delete(sctx, nce);
1921 name_cache_used(sctx, nce);
1922 *parent_ino = nce->parent_ino;
1923 *parent_gen = nce->parent_gen;
1924 ret = fs_path_add(dest, nce->name, nce->name_len);
1932 path = alloc_path_for_send();
1937 * If the inode is not existent yet, add the orphan name and return 1.
1938 * This should only happen for the parent dir that we determine in
1941 ret = is_inode_existent(sctx, ino, gen);
1946 ret = gen_unique_name(sctx, ino, gen, dest);
1954 * Depending on whether the inode was already processed or not, use
1955 * send_root or parent_root for ref lookup.
1957 if (ino < sctx->send_progress)
1958 ret = get_first_ref(sctx->send_root, ino,
1959 parent_ino, parent_gen, dest);
1961 ret = get_first_ref(sctx->parent_root, ino,
1962 parent_ino, parent_gen, dest);
1967 * Check if the ref was overwritten by an inode's ref that was processed
1968 * earlier. If yes, treat as orphan and return 1.
1970 ret = did_overwrite_ref(sctx, *parent_ino, *parent_gen, ino, gen,
1971 dest->start, dest->end - dest->start);
1975 fs_path_reset(dest);
1976 ret = gen_unique_name(sctx, ino, gen, dest);
1984 * Store the result of the lookup in the name cache.
1986 nce = kmalloc(sizeof(*nce) + fs_path_len(dest) + 1, GFP_NOFS);
1994 nce->parent_ino = *parent_ino;
1995 nce->parent_gen = *parent_gen;
1996 nce->name_len = fs_path_len(dest);
1998 strcpy(nce->name, dest->start);
2000 if (ino < sctx->send_progress)
2001 nce->need_later_update = 0;
2003 nce->need_later_update = 1;
2005 nce_ret = name_cache_insert(sctx, nce);
2008 name_cache_clean_unused(sctx);
2011 btrfs_free_path(path);
2016 * Magic happens here. This function returns the first ref to an inode as it
2017 * would look like while receiving the stream at this point in time.
2018 * We walk the path up to the root. For every inode in between, we check if it
2019 * was already processed/sent. If yes, we continue with the parent as found
2020 * in send_root. If not, we continue with the parent as found in parent_root.
2021 * If we encounter an inode that was deleted at this point in time, we use the
2022 * inodes "orphan" name instead of the real name and stop. Same with new inodes
2023 * that were not created yet and overwritten inodes/refs.
2025 * When do we have have orphan inodes:
2026 * 1. When an inode is freshly created and thus no valid refs are available yet
2027 * 2. When a directory lost all it's refs (deleted) but still has dir items
2028 * inside which were not processed yet (pending for move/delete). If anyone
2029 * tried to get the path to the dir items, it would get a path inside that
2031 * 3. When an inode is moved around or gets new links, it may overwrite the ref
2032 * of an unprocessed inode. If in that case the first ref would be
2033 * overwritten, the overwritten inode gets "orphanized". Later when we
2034 * process this overwritten inode, it is restored at a new place by moving
2037 * sctx->send_progress tells this function at which point in time receiving
2040 static int get_cur_path(struct send_ctx *sctx, u64 ino, u64 gen,
2041 struct fs_path *dest)
2044 struct fs_path *name = NULL;
2045 u64 parent_inode = 0;
2049 name = fs_path_alloc();
2056 fs_path_reset(dest);
2058 while (!stop && ino != BTRFS_FIRST_FREE_OBJECTID) {
2059 fs_path_reset(name);
2061 ret = __get_cur_name_and_parent(sctx, ino, gen,
2062 &parent_inode, &parent_gen, name);
2068 ret = fs_path_add_path(dest, name);
2079 fs_path_unreverse(dest);
2084 * Sends a BTRFS_SEND_C_SUBVOL command/item to userspace
2086 static int send_subvol_begin(struct send_ctx *sctx)
2089 struct btrfs_root *send_root = sctx->send_root;
2090 struct btrfs_root *parent_root = sctx->parent_root;
2091 struct btrfs_path *path;
2092 struct btrfs_key key;
2093 struct btrfs_root_ref *ref;
2094 struct extent_buffer *leaf;
2098 path = alloc_path_for_send();
2102 name = kmalloc(BTRFS_PATH_NAME_MAX, GFP_NOFS);
2104 btrfs_free_path(path);
2108 key.objectid = send_root->objectid;
2109 key.type = BTRFS_ROOT_BACKREF_KEY;
2112 ret = btrfs_search_slot_for_read(send_root->fs_info->tree_root,
2121 leaf = path->nodes[0];
2122 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2123 if (key.type != BTRFS_ROOT_BACKREF_KEY ||
2124 key.objectid != send_root->objectid) {
2128 ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref);
2129 namelen = btrfs_root_ref_name_len(leaf, ref);
2130 read_extent_buffer(leaf, name, (unsigned long)(ref + 1), namelen);
2131 btrfs_release_path(path);
2134 ret = begin_cmd(sctx, BTRFS_SEND_C_SNAPSHOT);
2138 ret = begin_cmd(sctx, BTRFS_SEND_C_SUBVOL);
2143 TLV_PUT_STRING(sctx, BTRFS_SEND_A_PATH, name, namelen);
2144 TLV_PUT_UUID(sctx, BTRFS_SEND_A_UUID,
2145 sctx->send_root->root_item.uuid);
2146 TLV_PUT_U64(sctx, BTRFS_SEND_A_CTRANSID,
2147 le64_to_cpu(sctx->send_root->root_item.ctransid));
2149 TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID,
2150 sctx->parent_root->root_item.uuid);
2151 TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_CTRANSID,
2152 le64_to_cpu(sctx->parent_root->root_item.ctransid));
2155 ret = send_cmd(sctx);
2159 btrfs_free_path(path);
2164 static int send_truncate(struct send_ctx *sctx, u64 ino, u64 gen, u64 size)
2169 verbose_printk("btrfs: send_truncate %llu size=%llu\n", ino, size);
2171 p = fs_path_alloc();
2175 ret = begin_cmd(sctx, BTRFS_SEND_C_TRUNCATE);
2179 ret = get_cur_path(sctx, ino, gen, p);
2182 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
2183 TLV_PUT_U64(sctx, BTRFS_SEND_A_SIZE, size);
2185 ret = send_cmd(sctx);
2193 static int send_chmod(struct send_ctx *sctx, u64 ino, u64 gen, u64 mode)
2198 verbose_printk("btrfs: send_chmod %llu mode=%llu\n", ino, mode);
2200 p = fs_path_alloc();
2204 ret = begin_cmd(sctx, BTRFS_SEND_C_CHMOD);
2208 ret = get_cur_path(sctx, ino, gen, p);
2211 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
2212 TLV_PUT_U64(sctx, BTRFS_SEND_A_MODE, mode & 07777);
2214 ret = send_cmd(sctx);
2222 static int send_chown(struct send_ctx *sctx, u64 ino, u64 gen, u64 uid, u64 gid)
2227 verbose_printk("btrfs: send_chown %llu uid=%llu, gid=%llu\n", ino, uid, gid);
2229 p = fs_path_alloc();
2233 ret = begin_cmd(sctx, BTRFS_SEND_C_CHOWN);
2237 ret = get_cur_path(sctx, ino, gen, p);
2240 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
2241 TLV_PUT_U64(sctx, BTRFS_SEND_A_UID, uid);
2242 TLV_PUT_U64(sctx, BTRFS_SEND_A_GID, gid);
2244 ret = send_cmd(sctx);
2252 static int send_utimes(struct send_ctx *sctx, u64 ino, u64 gen)
2255 struct fs_path *p = NULL;
2256 struct btrfs_inode_item *ii;
2257 struct btrfs_path *path = NULL;
2258 struct extent_buffer *eb;
2259 struct btrfs_key key;
2262 verbose_printk("btrfs: send_utimes %llu\n", ino);
2264 p = fs_path_alloc();
2268 path = alloc_path_for_send();
2275 key.type = BTRFS_INODE_ITEM_KEY;
2277 ret = btrfs_search_slot(NULL, sctx->send_root, &key, path, 0, 0);
2281 eb = path->nodes[0];
2282 slot = path->slots[0];
2283 ii = btrfs_item_ptr(eb, slot, struct btrfs_inode_item);
2285 ret = begin_cmd(sctx, BTRFS_SEND_C_UTIMES);
2289 ret = get_cur_path(sctx, ino, gen, p);
2292 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
2293 TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_ATIME, eb,
2294 btrfs_inode_atime(ii));
2295 TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_MTIME, eb,
2296 btrfs_inode_mtime(ii));
2297 TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_CTIME, eb,
2298 btrfs_inode_ctime(ii));
2299 /* TODO Add otime support when the otime patches get into upstream */
2301 ret = send_cmd(sctx);
2306 btrfs_free_path(path);
2311 * Sends a BTRFS_SEND_C_MKXXX or SYMLINK command to user space. We don't have
2312 * a valid path yet because we did not process the refs yet. So, the inode
2313 * is created as orphan.
2315 static int send_create_inode(struct send_ctx *sctx, u64 ino)
2324 verbose_printk("btrfs: send_create_inode %llu\n", ino);
2326 p = fs_path_alloc();
2330 ret = get_inode_info(sctx->send_root, ino, NULL, &gen, &mode, NULL,
2335 if (S_ISREG(mode)) {
2336 cmd = BTRFS_SEND_C_MKFILE;
2337 } else if (S_ISDIR(mode)) {
2338 cmd = BTRFS_SEND_C_MKDIR;
2339 } else if (S_ISLNK(mode)) {
2340 cmd = BTRFS_SEND_C_SYMLINK;
2341 } else if (S_ISCHR(mode) || S_ISBLK(mode)) {
2342 cmd = BTRFS_SEND_C_MKNOD;
2343 } else if (S_ISFIFO(mode)) {
2344 cmd = BTRFS_SEND_C_MKFIFO;
2345 } else if (S_ISSOCK(mode)) {
2346 cmd = BTRFS_SEND_C_MKSOCK;
2348 printk(KERN_WARNING "btrfs: unexpected inode type %o",
2349 (int)(mode & S_IFMT));
2354 ret = begin_cmd(sctx, cmd);
2358 ret = gen_unique_name(sctx, ino, gen, p);
2362 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
2363 TLV_PUT_U64(sctx, BTRFS_SEND_A_INO, ino);
2365 if (S_ISLNK(mode)) {
2367 ret = read_symlink(sctx->send_root, ino, p);
2370 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_LINK, p);
2371 } else if (S_ISCHR(mode) || S_ISBLK(mode) ||
2372 S_ISFIFO(mode) || S_ISSOCK(mode)) {
2373 TLV_PUT_U64(sctx, BTRFS_SEND_A_RDEV, new_encode_dev(rdev));
2374 TLV_PUT_U64(sctx, BTRFS_SEND_A_MODE, mode);
2377 ret = send_cmd(sctx);
2389 * We need some special handling for inodes that get processed before the parent
2390 * directory got created. See process_recorded_refs for details.
2391 * This function does the check if we already created the dir out of order.
2393 static int did_create_dir(struct send_ctx *sctx, u64 dir)
2396 struct btrfs_path *path = NULL;
2397 struct btrfs_key key;
2398 struct btrfs_key found_key;
2399 struct btrfs_key di_key;
2400 struct extent_buffer *eb;
2401 struct btrfs_dir_item *di;
2404 path = alloc_path_for_send();
2411 key.type = BTRFS_DIR_INDEX_KEY;
2414 ret = btrfs_search_slot_for_read(sctx->send_root, &key, path,
2419 eb = path->nodes[0];
2420 slot = path->slots[0];
2421 btrfs_item_key_to_cpu(eb, &found_key, slot);
2423 if (ret || found_key.objectid != key.objectid ||
2424 found_key.type != key.type) {
2429 di = btrfs_item_ptr(eb, slot, struct btrfs_dir_item);
2430 btrfs_dir_item_key_to_cpu(eb, di, &di_key);
2432 if (di_key.type != BTRFS_ROOT_ITEM_KEY &&
2433 di_key.objectid < sctx->send_progress) {
2438 key.offset = found_key.offset + 1;
2439 btrfs_release_path(path);
2443 btrfs_free_path(path);
2448 * Only creates the inode if it is:
2449 * 1. Not a directory
2450 * 2. Or a directory which was not created already due to out of order
2451 * directories. See did_create_dir and process_recorded_refs for details.
2453 static int send_create_inode_if_needed(struct send_ctx *sctx)
2457 if (S_ISDIR(sctx->cur_inode_mode)) {
2458 ret = did_create_dir(sctx, sctx->cur_ino);
2467 ret = send_create_inode(sctx, sctx->cur_ino);
2475 struct recorded_ref {
2476 struct list_head list;
2479 struct fs_path *full_path;
2487 * We need to process new refs before deleted refs, but compare_tree gives us
2488 * everything mixed. So we first record all refs and later process them.
2489 * This function is a helper to record one ref.
2491 static int record_ref(struct list_head *head, u64 dir,
2492 u64 dir_gen, struct fs_path *path)
2494 struct recorded_ref *ref;
2496 ref = kmalloc(sizeof(*ref), GFP_NOFS);
2501 ref->dir_gen = dir_gen;
2502 ref->full_path = path;
2504 ref->name = (char *)kbasename(ref->full_path->start);
2505 ref->name_len = ref->full_path->end - ref->name;
2506 ref->dir_path = ref->full_path->start;
2507 if (ref->name == ref->full_path->start)
2508 ref->dir_path_len = 0;
2510 ref->dir_path_len = ref->full_path->end -
2511 ref->full_path->start - 1 - ref->name_len;
2513 list_add_tail(&ref->list, head);
2517 static int dup_ref(struct recorded_ref *ref, struct list_head *list)
2519 struct recorded_ref *new;
2521 new = kmalloc(sizeof(*ref), GFP_NOFS);
2525 new->dir = ref->dir;
2526 new->dir_gen = ref->dir_gen;
2527 new->full_path = NULL;
2528 INIT_LIST_HEAD(&new->list);
2529 list_add_tail(&new->list, list);
2533 static void __free_recorded_refs(struct list_head *head)
2535 struct recorded_ref *cur;
2537 while (!list_empty(head)) {
2538 cur = list_entry(head->next, struct recorded_ref, list);
2539 fs_path_free(cur->full_path);
2540 list_del(&cur->list);
2545 static void free_recorded_refs(struct send_ctx *sctx)
2547 __free_recorded_refs(&sctx->new_refs);
2548 __free_recorded_refs(&sctx->deleted_refs);
2552 * Renames/moves a file/dir to its orphan name. Used when the first
2553 * ref of an unprocessed inode gets overwritten and for all non empty
2556 static int orphanize_inode(struct send_ctx *sctx, u64 ino, u64 gen,
2557 struct fs_path *path)
2560 struct fs_path *orphan;
2562 orphan = fs_path_alloc();
2566 ret = gen_unique_name(sctx, ino, gen, orphan);
2570 ret = send_rename(sctx, path, orphan);
2573 fs_path_free(orphan);
2578 * Returns 1 if a directory can be removed at this point in time.
2579 * We check this by iterating all dir items and checking if the inode behind
2580 * the dir item was already processed.
2582 static int can_rmdir(struct send_ctx *sctx, u64 dir, u64 send_progress)
2585 struct btrfs_root *root = sctx->parent_root;
2586 struct btrfs_path *path;
2587 struct btrfs_key key;
2588 struct btrfs_key found_key;
2589 struct btrfs_key loc;
2590 struct btrfs_dir_item *di;
2593 * Don't try to rmdir the top/root subvolume dir.
2595 if (dir == BTRFS_FIRST_FREE_OBJECTID)
2598 path = alloc_path_for_send();
2603 key.type = BTRFS_DIR_INDEX_KEY;
2607 ret = btrfs_search_slot_for_read(root, &key, path, 1, 0);
2611 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
2614 if (ret || found_key.objectid != key.objectid ||
2615 found_key.type != key.type) {
2619 di = btrfs_item_ptr(path->nodes[0], path->slots[0],
2620 struct btrfs_dir_item);
2621 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &loc);
2623 if (loc.objectid > send_progress) {
2628 btrfs_release_path(path);
2629 key.offset = found_key.offset + 1;
2635 btrfs_free_path(path);
2640 * This does all the move/link/unlink/rmdir magic.
2642 static int process_recorded_refs(struct send_ctx *sctx)
2645 struct recorded_ref *cur;
2646 struct recorded_ref *cur2;
2647 struct list_head check_dirs;
2648 struct fs_path *valid_path = NULL;
2651 int did_overwrite = 0;
2654 verbose_printk("btrfs: process_recorded_refs %llu\n", sctx->cur_ino);
2657 * This should never happen as the root dir always has the same ref
2658 * which is always '..'
2660 BUG_ON(sctx->cur_ino <= BTRFS_FIRST_FREE_OBJECTID);
2661 INIT_LIST_HEAD(&check_dirs);
2663 valid_path = fs_path_alloc();
2670 * First, check if the first ref of the current inode was overwritten
2671 * before. If yes, we know that the current inode was already orphanized
2672 * and thus use the orphan name. If not, we can use get_cur_path to
2673 * get the path of the first ref as it would like while receiving at
2674 * this point in time.
2675 * New inodes are always orphan at the beginning, so force to use the
2676 * orphan name in this case.
2677 * The first ref is stored in valid_path and will be updated if it
2678 * gets moved around.
2680 if (!sctx->cur_inode_new) {
2681 ret = did_overwrite_first_ref(sctx, sctx->cur_ino,
2682 sctx->cur_inode_gen);
2688 if (sctx->cur_inode_new || did_overwrite) {
2689 ret = gen_unique_name(sctx, sctx->cur_ino,
2690 sctx->cur_inode_gen, valid_path);
2695 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen,
2701 list_for_each_entry(cur, &sctx->new_refs, list) {
2703 * We may have refs where the parent directory does not exist
2704 * yet. This happens if the parent directories inum is higher
2705 * the the current inum. To handle this case, we create the
2706 * parent directory out of order. But we need to check if this
2707 * did already happen before due to other refs in the same dir.
2709 ret = get_cur_inode_state(sctx, cur->dir, cur->dir_gen);
2712 if (ret == inode_state_will_create) {
2715 * First check if any of the current inodes refs did
2716 * already create the dir.
2718 list_for_each_entry(cur2, &sctx->new_refs, list) {
2721 if (cur2->dir == cur->dir) {
2728 * If that did not happen, check if a previous inode
2729 * did already create the dir.
2732 ret = did_create_dir(sctx, cur->dir);
2736 ret = send_create_inode(sctx, cur->dir);
2743 * Check if this new ref would overwrite the first ref of
2744 * another unprocessed inode. If yes, orphanize the
2745 * overwritten inode. If we find an overwritten ref that is
2746 * not the first ref, simply unlink it.
2748 ret = will_overwrite_ref(sctx, cur->dir, cur->dir_gen,
2749 cur->name, cur->name_len,
2750 &ow_inode, &ow_gen);
2754 ret = is_first_ref(sctx->parent_root,
2755 ow_inode, cur->dir, cur->name,
2760 ret = orphanize_inode(sctx, ow_inode, ow_gen,
2765 ret = send_unlink(sctx, cur->full_path);
2772 * link/move the ref to the new place. If we have an orphan
2773 * inode, move it and update valid_path. If not, link or move
2774 * it depending on the inode mode.
2777 ret = send_rename(sctx, valid_path, cur->full_path);
2781 ret = fs_path_copy(valid_path, cur->full_path);
2785 if (S_ISDIR(sctx->cur_inode_mode)) {
2787 * Dirs can't be linked, so move it. For moved
2788 * dirs, we always have one new and one deleted
2789 * ref. The deleted ref is ignored later.
2791 ret = send_rename(sctx, valid_path,
2795 ret = fs_path_copy(valid_path, cur->full_path);
2799 ret = send_link(sctx, cur->full_path,
2805 ret = dup_ref(cur, &check_dirs);
2810 if (S_ISDIR(sctx->cur_inode_mode) && sctx->cur_inode_deleted) {
2812 * Check if we can already rmdir the directory. If not,
2813 * orphanize it. For every dir item inside that gets deleted
2814 * later, we do this check again and rmdir it then if possible.
2815 * See the use of check_dirs for more details.
2817 ret = can_rmdir(sctx, sctx->cur_ino, sctx->cur_ino);
2821 ret = send_rmdir(sctx, valid_path);
2824 } else if (!is_orphan) {
2825 ret = orphanize_inode(sctx, sctx->cur_ino,
2826 sctx->cur_inode_gen, valid_path);
2832 list_for_each_entry(cur, &sctx->deleted_refs, list) {
2833 ret = dup_ref(cur, &check_dirs);
2837 } else if (S_ISDIR(sctx->cur_inode_mode) &&
2838 !list_empty(&sctx->deleted_refs)) {
2840 * We have a moved dir. Add the old parent to check_dirs
2842 cur = list_entry(sctx->deleted_refs.next, struct recorded_ref,
2844 ret = dup_ref(cur, &check_dirs);
2847 } else if (!S_ISDIR(sctx->cur_inode_mode)) {
2849 * We have a non dir inode. Go through all deleted refs and
2850 * unlink them if they were not already overwritten by other
2853 list_for_each_entry(cur, &sctx->deleted_refs, list) {
2854 ret = did_overwrite_ref(sctx, cur->dir, cur->dir_gen,
2855 sctx->cur_ino, sctx->cur_inode_gen,
2856 cur->name, cur->name_len);
2860 ret = send_unlink(sctx, cur->full_path);
2864 ret = dup_ref(cur, &check_dirs);
2869 * If the inode is still orphan, unlink the orphan. This may
2870 * happen when a previous inode did overwrite the first ref
2871 * of this inode and no new refs were added for the current
2872 * inode. Unlinking does not mean that the inode is deleted in
2873 * all cases. There may still be links to this inode in other
2877 ret = send_unlink(sctx, valid_path);
2884 * We did collect all parent dirs where cur_inode was once located. We
2885 * now go through all these dirs and check if they are pending for
2886 * deletion and if it's finally possible to perform the rmdir now.
2887 * We also update the inode stats of the parent dirs here.
2889 list_for_each_entry(cur, &check_dirs, list) {
2891 * In case we had refs into dirs that were not processed yet,
2892 * we don't need to do the utime and rmdir logic for these dirs.
2893 * The dir will be processed later.
2895 if (cur->dir > sctx->cur_ino)
2898 ret = get_cur_inode_state(sctx, cur->dir, cur->dir_gen);
2902 if (ret == inode_state_did_create ||
2903 ret == inode_state_no_change) {
2904 /* TODO delayed utimes */
2905 ret = send_utimes(sctx, cur->dir, cur->dir_gen);
2908 } else if (ret == inode_state_did_delete) {
2909 ret = can_rmdir(sctx, cur->dir, sctx->cur_ino);
2913 ret = get_cur_path(sctx, cur->dir,
2914 cur->dir_gen, valid_path);
2917 ret = send_rmdir(sctx, valid_path);
2927 __free_recorded_refs(&check_dirs);
2928 free_recorded_refs(sctx);
2929 fs_path_free(valid_path);
2933 static int __record_new_ref(int num, u64 dir, int index,
2934 struct fs_path *name,
2938 struct send_ctx *sctx = ctx;
2942 p = fs_path_alloc();
2946 ret = get_inode_info(sctx->send_root, dir, NULL, &gen, NULL, NULL,
2951 ret = get_cur_path(sctx, dir, gen, p);
2954 ret = fs_path_add_path(p, name);
2958 ret = record_ref(&sctx->new_refs, dir, gen, p);
2966 static int __record_deleted_ref(int num, u64 dir, int index,
2967 struct fs_path *name,
2971 struct send_ctx *sctx = ctx;
2975 p = fs_path_alloc();
2979 ret = get_inode_info(sctx->parent_root, dir, NULL, &gen, NULL, NULL,
2984 ret = get_cur_path(sctx, dir, gen, p);
2987 ret = fs_path_add_path(p, name);
2991 ret = record_ref(&sctx->deleted_refs, dir, gen, p);
2999 static int record_new_ref(struct send_ctx *sctx)
3003 ret = iterate_inode_ref(sctx->send_root, sctx->left_path,
3004 sctx->cmp_key, 0, __record_new_ref, sctx);
3013 static int record_deleted_ref(struct send_ctx *sctx)
3017 ret = iterate_inode_ref(sctx->parent_root, sctx->right_path,
3018 sctx->cmp_key, 0, __record_deleted_ref, sctx);
3027 struct find_ref_ctx {
3030 struct btrfs_root *root;
3031 struct fs_path *name;
3035 static int __find_iref(int num, u64 dir, int index,
3036 struct fs_path *name,
3039 struct find_ref_ctx *ctx = ctx_;
3043 if (dir == ctx->dir && fs_path_len(name) == fs_path_len(ctx->name) &&
3044 strncmp(name->start, ctx->name->start, fs_path_len(name)) == 0) {
3046 * To avoid doing extra lookups we'll only do this if everything
3049 ret = get_inode_info(ctx->root, dir, NULL, &dir_gen, NULL,
3053 if (dir_gen != ctx->dir_gen)
3055 ctx->found_idx = num;
3061 static int find_iref(struct btrfs_root *root,
3062 struct btrfs_path *path,
3063 struct btrfs_key *key,
3064 u64 dir, u64 dir_gen, struct fs_path *name)
3067 struct find_ref_ctx ctx;
3071 ctx.dir_gen = dir_gen;
3075 ret = iterate_inode_ref(root, path, key, 0, __find_iref, &ctx);
3079 if (ctx.found_idx == -1)
3082 return ctx.found_idx;
3085 static int __record_changed_new_ref(int num, u64 dir, int index,
3086 struct fs_path *name,
3091 struct send_ctx *sctx = ctx;
3093 ret = get_inode_info(sctx->send_root, dir, NULL, &dir_gen, NULL,
3098 ret = find_iref(sctx->parent_root, sctx->right_path,
3099 sctx->cmp_key, dir, dir_gen, name);
3101 ret = __record_new_ref(num, dir, index, name, sctx);
3108 static int __record_changed_deleted_ref(int num, u64 dir, int index,
3109 struct fs_path *name,
3114 struct send_ctx *sctx = ctx;
3116 ret = get_inode_info(sctx->parent_root, dir, NULL, &dir_gen, NULL,
3121 ret = find_iref(sctx->send_root, sctx->left_path, sctx->cmp_key,
3122 dir, dir_gen, name);
3124 ret = __record_deleted_ref(num, dir, index, name, sctx);
3131 static int record_changed_ref(struct send_ctx *sctx)
3135 ret = iterate_inode_ref(sctx->send_root, sctx->left_path,
3136 sctx->cmp_key, 0, __record_changed_new_ref, sctx);
3139 ret = iterate_inode_ref(sctx->parent_root, sctx->right_path,
3140 sctx->cmp_key, 0, __record_changed_deleted_ref, sctx);
3150 * Record and process all refs at once. Needed when an inode changes the
3151 * generation number, which means that it was deleted and recreated.
3153 static int process_all_refs(struct send_ctx *sctx,
3154 enum btrfs_compare_tree_result cmd)
3157 struct btrfs_root *root;
3158 struct btrfs_path *path;
3159 struct btrfs_key key;
3160 struct btrfs_key found_key;
3161 struct extent_buffer *eb;
3163 iterate_inode_ref_t cb;
3165 path = alloc_path_for_send();
3169 if (cmd == BTRFS_COMPARE_TREE_NEW) {
3170 root = sctx->send_root;
3171 cb = __record_new_ref;
3172 } else if (cmd == BTRFS_COMPARE_TREE_DELETED) {
3173 root = sctx->parent_root;
3174 cb = __record_deleted_ref;
3179 key.objectid = sctx->cmp_key->objectid;
3180 key.type = BTRFS_INODE_REF_KEY;
3183 ret = btrfs_search_slot_for_read(root, &key, path, 1, 0);
3189 eb = path->nodes[0];
3190 slot = path->slots[0];
3191 btrfs_item_key_to_cpu(eb, &found_key, slot);
3193 if (found_key.objectid != key.objectid ||
3194 (found_key.type != BTRFS_INODE_REF_KEY &&
3195 found_key.type != BTRFS_INODE_EXTREF_KEY))
3198 ret = iterate_inode_ref(root, path, &found_key, 0, cb, sctx);
3199 btrfs_release_path(path);
3203 key.offset = found_key.offset + 1;
3205 btrfs_release_path(path);
3207 ret = process_recorded_refs(sctx);
3210 btrfs_free_path(path);
3214 static int send_set_xattr(struct send_ctx *sctx,
3215 struct fs_path *path,
3216 const char *name, int name_len,
3217 const char *data, int data_len)
3221 ret = begin_cmd(sctx, BTRFS_SEND_C_SET_XATTR);
3225 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
3226 TLV_PUT_STRING(sctx, BTRFS_SEND_A_XATTR_NAME, name, name_len);
3227 TLV_PUT(sctx, BTRFS_SEND_A_XATTR_DATA, data, data_len);
3229 ret = send_cmd(sctx);
3236 static int send_remove_xattr(struct send_ctx *sctx,
3237 struct fs_path *path,
3238 const char *name, int name_len)
3242 ret = begin_cmd(sctx, BTRFS_SEND_C_REMOVE_XATTR);
3246 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
3247 TLV_PUT_STRING(sctx, BTRFS_SEND_A_XATTR_NAME, name, name_len);
3249 ret = send_cmd(sctx);
3256 static int __process_new_xattr(int num, struct btrfs_key *di_key,
3257 const char *name, int name_len,
3258 const char *data, int data_len,
3262 struct send_ctx *sctx = ctx;
3264 posix_acl_xattr_header dummy_acl;
3266 p = fs_path_alloc();
3271 * This hack is needed because empty acl's are stored as zero byte
3272 * data in xattrs. Problem with that is, that receiving these zero byte
3273 * acl's will fail later. To fix this, we send a dummy acl list that
3274 * only contains the version number and no entries.
3276 if (!strncmp(name, XATTR_NAME_POSIX_ACL_ACCESS, name_len) ||
3277 !strncmp(name, XATTR_NAME_POSIX_ACL_DEFAULT, name_len)) {
3278 if (data_len == 0) {
3279 dummy_acl.a_version =
3280 cpu_to_le32(POSIX_ACL_XATTR_VERSION);
3281 data = (char *)&dummy_acl;
3282 data_len = sizeof(dummy_acl);
3286 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
3290 ret = send_set_xattr(sctx, p, name, name_len, data, data_len);
3297 static int __process_deleted_xattr(int num, struct btrfs_key *di_key,
3298 const char *name, int name_len,
3299 const char *data, int data_len,
3303 struct send_ctx *sctx = ctx;
3306 p = fs_path_alloc();
3310 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
3314 ret = send_remove_xattr(sctx, p, name, name_len);
3321 static int process_new_xattr(struct send_ctx *sctx)
3325 ret = iterate_dir_item(sctx->send_root, sctx->left_path,
3326 sctx->cmp_key, __process_new_xattr, sctx);
3331 static int process_deleted_xattr(struct send_ctx *sctx)
3335 ret = iterate_dir_item(sctx->parent_root, sctx->right_path,
3336 sctx->cmp_key, __process_deleted_xattr, sctx);
3341 struct find_xattr_ctx {
3349 static int __find_xattr(int num, struct btrfs_key *di_key,
3350 const char *name, int name_len,
3351 const char *data, int data_len,
3352 u8 type, void *vctx)
3354 struct find_xattr_ctx *ctx = vctx;
3356 if (name_len == ctx->name_len &&
3357 strncmp(name, ctx->name, name_len) == 0) {
3358 ctx->found_idx = num;
3359 ctx->found_data_len = data_len;
3360 ctx->found_data = kmemdup(data, data_len, GFP_NOFS);
3361 if (!ctx->found_data)
3368 static int find_xattr(struct btrfs_root *root,
3369 struct btrfs_path *path,
3370 struct btrfs_key *key,
3371 const char *name, int name_len,
3372 char **data, int *data_len)
3375 struct find_xattr_ctx ctx;
3378 ctx.name_len = name_len;
3380 ctx.found_data = NULL;
3381 ctx.found_data_len = 0;
3383 ret = iterate_dir_item(root, path, key, __find_xattr, &ctx);
3387 if (ctx.found_idx == -1)
3390 *data = ctx.found_data;
3391 *data_len = ctx.found_data_len;
3393 kfree(ctx.found_data);
3395 return ctx.found_idx;
3399 static int __process_changed_new_xattr(int num, struct btrfs_key *di_key,
3400 const char *name, int name_len,
3401 const char *data, int data_len,
3405 struct send_ctx *sctx = ctx;
3406 char *found_data = NULL;
3407 int found_data_len = 0;
3409 ret = find_xattr(sctx->parent_root, sctx->right_path,
3410 sctx->cmp_key, name, name_len, &found_data,
3412 if (ret == -ENOENT) {
3413 ret = __process_new_xattr(num, di_key, name, name_len, data,
3414 data_len, type, ctx);
3415 } else if (ret >= 0) {
3416 if (data_len != found_data_len ||
3417 memcmp(data, found_data, data_len)) {
3418 ret = __process_new_xattr(num, di_key, name, name_len,
3419 data, data_len, type, ctx);
3429 static int __process_changed_deleted_xattr(int num, struct btrfs_key *di_key,
3430 const char *name, int name_len,
3431 const char *data, int data_len,
3435 struct send_ctx *sctx = ctx;
3437 ret = find_xattr(sctx->send_root, sctx->left_path, sctx->cmp_key,
3438 name, name_len, NULL, NULL);
3440 ret = __process_deleted_xattr(num, di_key, name, name_len, data,
3441 data_len, type, ctx);
3448 static int process_changed_xattr(struct send_ctx *sctx)
3452 ret = iterate_dir_item(sctx->send_root, sctx->left_path,
3453 sctx->cmp_key, __process_changed_new_xattr, sctx);
3456 ret = iterate_dir_item(sctx->parent_root, sctx->right_path,
3457 sctx->cmp_key, __process_changed_deleted_xattr, sctx);
3463 static int process_all_new_xattrs(struct send_ctx *sctx)
3466 struct btrfs_root *root;
3467 struct btrfs_path *path;
3468 struct btrfs_key key;
3469 struct btrfs_key found_key;
3470 struct extent_buffer *eb;
3473 path = alloc_path_for_send();
3477 root = sctx->send_root;
3479 key.objectid = sctx->cmp_key->objectid;
3480 key.type = BTRFS_XATTR_ITEM_KEY;
3483 ret = btrfs_search_slot_for_read(root, &key, path, 1, 0);
3491 eb = path->nodes[0];
3492 slot = path->slots[0];
3493 btrfs_item_key_to_cpu(eb, &found_key, slot);
3495 if (found_key.objectid != key.objectid ||
3496 found_key.type != key.type) {
3501 ret = iterate_dir_item(root, path, &found_key,
3502 __process_new_xattr, sctx);
3506 btrfs_release_path(path);
3507 key.offset = found_key.offset + 1;
3511 btrfs_free_path(path);
3515 static ssize_t fill_read_buf(struct send_ctx *sctx, u64 offset, u32 len)
3517 struct btrfs_root *root = sctx->send_root;
3518 struct btrfs_fs_info *fs_info = root->fs_info;
3519 struct inode *inode;
3522 struct btrfs_key key;
3523 pgoff_t index = offset >> PAGE_CACHE_SHIFT;
3525 unsigned pg_offset = offset & ~PAGE_CACHE_MASK;
3528 key.objectid = sctx->cur_ino;
3529 key.type = BTRFS_INODE_ITEM_KEY;
3532 inode = btrfs_iget(fs_info->sb, &key, root, NULL);
3534 return PTR_ERR(inode);
3536 if (offset + len > i_size_read(inode)) {
3537 if (offset > i_size_read(inode))
3540 len = offset - i_size_read(inode);
3545 last_index = (offset + len - 1) >> PAGE_CACHE_SHIFT;
3546 while (index <= last_index) {
3547 unsigned cur_len = min_t(unsigned, len,
3548 PAGE_CACHE_SIZE - pg_offset);
3549 page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
3555 if (!PageUptodate(page)) {
3556 btrfs_readpage(NULL, page);
3558 if (!PageUptodate(page)) {
3560 page_cache_release(page);
3567 memcpy(sctx->read_buf + ret, addr + pg_offset, cur_len);
3570 page_cache_release(page);
3582 * Read some bytes from the current inode/file and send a write command to
3585 static int send_write(struct send_ctx *sctx, u64 offset, u32 len)
3589 ssize_t num_read = 0;
3591 p = fs_path_alloc();
3595 verbose_printk("btrfs: send_write offset=%llu, len=%d\n", offset, len);
3597 num_read = fill_read_buf(sctx, offset, len);
3598 if (num_read <= 0) {
3604 ret = begin_cmd(sctx, BTRFS_SEND_C_WRITE);
3608 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
3612 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
3613 TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
3614 TLV_PUT(sctx, BTRFS_SEND_A_DATA, sctx->read_buf, num_read);
3616 ret = send_cmd(sctx);
3627 * Send a clone command to user space.
3629 static int send_clone(struct send_ctx *sctx,
3630 u64 offset, u32 len,
3631 struct clone_root *clone_root)
3637 verbose_printk("btrfs: send_clone offset=%llu, len=%d, clone_root=%llu, "
3638 "clone_inode=%llu, clone_offset=%llu\n", offset, len,
3639 clone_root->root->objectid, clone_root->ino,
3640 clone_root->offset);
3642 p = fs_path_alloc();
3646 ret = begin_cmd(sctx, BTRFS_SEND_C_CLONE);
3650 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
3654 TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
3655 TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_LEN, len);
3656 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
3658 if (clone_root->root == sctx->send_root) {
3659 ret = get_inode_info(sctx->send_root, clone_root->ino, NULL,
3660 &gen, NULL, NULL, NULL, NULL);
3663 ret = get_cur_path(sctx, clone_root->ino, gen, p);
3665 ret = get_inode_path(clone_root->root, clone_root->ino, p);
3670 TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID,
3671 clone_root->root->root_item.uuid);
3672 TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_CTRANSID,
3673 le64_to_cpu(clone_root->root->root_item.ctransid));
3674 TLV_PUT_PATH(sctx, BTRFS_SEND_A_CLONE_PATH, p);
3675 TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_OFFSET,
3676 clone_root->offset);
3678 ret = send_cmd(sctx);
3687 * Send an update extent command to user space.
3689 static int send_update_extent(struct send_ctx *sctx,
3690 u64 offset, u32 len)
3695 p = fs_path_alloc();
3699 ret = begin_cmd(sctx, BTRFS_SEND_C_UPDATE_EXTENT);
3703 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
3707 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
3708 TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
3709 TLV_PUT_U64(sctx, BTRFS_SEND_A_SIZE, len);
3711 ret = send_cmd(sctx);
3719 static int send_hole(struct send_ctx *sctx, u64 end)
3721 struct fs_path *p = NULL;
3722 u64 offset = sctx->cur_inode_last_extent;
3726 p = fs_path_alloc();
3729 memset(sctx->read_buf, 0, BTRFS_SEND_READ_SIZE);
3730 while (offset < end) {
3731 len = min_t(u64, end - offset, BTRFS_SEND_READ_SIZE);
3733 ret = begin_cmd(sctx, BTRFS_SEND_C_WRITE);
3736 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
3739 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
3740 TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
3741 TLV_PUT(sctx, BTRFS_SEND_A_DATA, sctx->read_buf, len);
3742 ret = send_cmd(sctx);
3752 static int send_write_or_clone(struct send_ctx *sctx,
3753 struct btrfs_path *path,
3754 struct btrfs_key *key,
3755 struct clone_root *clone_root)
3758 struct btrfs_file_extent_item *ei;
3759 u64 offset = key->offset;
3765 ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
3766 struct btrfs_file_extent_item);
3767 type = btrfs_file_extent_type(path->nodes[0], ei);
3768 if (type == BTRFS_FILE_EXTENT_INLINE) {
3769 len = btrfs_file_extent_inline_len(path->nodes[0], ei);
3771 * it is possible the inline item won't cover the whole page,
3772 * but there may be items after this page. Make
3773 * sure to send the whole thing
3775 len = PAGE_CACHE_ALIGN(len);
3777 len = btrfs_file_extent_num_bytes(path->nodes[0], ei);
3780 if (offset + len > sctx->cur_inode_size)
3781 len = sctx->cur_inode_size - offset;
3788 ret = send_clone(sctx, offset, len, clone_root);
3789 } else if (sctx->flags & BTRFS_SEND_FLAG_NO_FILE_DATA) {
3790 ret = send_update_extent(sctx, offset, len);
3794 if (l > BTRFS_SEND_READ_SIZE)
3795 l = BTRFS_SEND_READ_SIZE;
3796 ret = send_write(sctx, pos + offset, l);
3809 static int is_extent_unchanged(struct send_ctx *sctx,
3810 struct btrfs_path *left_path,
3811 struct btrfs_key *ekey)
3814 struct btrfs_key key;
3815 struct btrfs_path *path = NULL;
3816 struct extent_buffer *eb;
3818 struct btrfs_key found_key;
3819 struct btrfs_file_extent_item *ei;
3824 u64 left_offset_fixed;
3832 path = alloc_path_for_send();
3836 eb = left_path->nodes[0];
3837 slot = left_path->slots[0];
3838 ei = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
3839 left_type = btrfs_file_extent_type(eb, ei);
3841 if (left_type != BTRFS_FILE_EXTENT_REG) {
3845 left_disknr = btrfs_file_extent_disk_bytenr(eb, ei);
3846 left_len = btrfs_file_extent_num_bytes(eb, ei);
3847 left_offset = btrfs_file_extent_offset(eb, ei);
3848 left_gen = btrfs_file_extent_generation(eb, ei);
3851 * Following comments will refer to these graphics. L is the left
3852 * extents which we are checking at the moment. 1-8 are the right
3853 * extents that we iterate.
3856 * |-1-|-2a-|-3-|-4-|-5-|-6-|
3859 * |--1--|-2b-|...(same as above)
3861 * Alternative situation. Happens on files where extents got split.
3863 * |-----------7-----------|-6-|
3865 * Alternative situation. Happens on files which got larger.
3868 * Nothing follows after 8.
3871 key.objectid = ekey->objectid;
3872 key.type = BTRFS_EXTENT_DATA_KEY;
3873 key.offset = ekey->offset;
3874 ret = btrfs_search_slot_for_read(sctx->parent_root, &key, path, 0, 0);
3883 * Handle special case where the right side has no extents at all.
3885 eb = path->nodes[0];
3886 slot = path->slots[0];
3887 btrfs_item_key_to_cpu(eb, &found_key, slot);
3888 if (found_key.objectid != key.objectid ||
3889 found_key.type != key.type) {
3890 /* If we're a hole then just pretend nothing changed */
3891 ret = (left_disknr) ? 0 : 1;
3896 * We're now on 2a, 2b or 7.
3899 while (key.offset < ekey->offset + left_len) {
3900 ei = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
3901 right_type = btrfs_file_extent_type(eb, ei);
3902 if (right_type != BTRFS_FILE_EXTENT_REG) {
3907 right_disknr = btrfs_file_extent_disk_bytenr(eb, ei);
3908 right_len = btrfs_file_extent_num_bytes(eb, ei);
3909 right_offset = btrfs_file_extent_offset(eb, ei);
3910 right_gen = btrfs_file_extent_generation(eb, ei);
3913 * Are we at extent 8? If yes, we know the extent is changed.
3914 * This may only happen on the first iteration.
3916 if (found_key.offset + right_len <= ekey->offset) {
3917 /* If we're a hole just pretend nothing changed */
3918 ret = (left_disknr) ? 0 : 1;
3922 left_offset_fixed = left_offset;
3923 if (key.offset < ekey->offset) {
3924 /* Fix the right offset for 2a and 7. */
3925 right_offset += ekey->offset - key.offset;
3927 /* Fix the left offset for all behind 2a and 2b */
3928 left_offset_fixed += key.offset - ekey->offset;
3932 * Check if we have the same extent.
3934 if (left_disknr != right_disknr ||
3935 left_offset_fixed != right_offset ||
3936 left_gen != right_gen) {
3942 * Go to the next extent.
3944 ret = btrfs_next_item(sctx->parent_root, path);
3948 eb = path->nodes[0];
3949 slot = path->slots[0];
3950 btrfs_item_key_to_cpu(eb, &found_key, slot);
3952 if (ret || found_key.objectid != key.objectid ||
3953 found_key.type != key.type) {
3954 key.offset += right_len;
3957 if (found_key.offset != key.offset + right_len) {
3965 * We're now behind the left extent (treat as unchanged) or at the end
3966 * of the right side (treat as changed).
3968 if (key.offset >= ekey->offset + left_len)
3975 btrfs_free_path(path);
3979 static int get_last_extent(struct send_ctx *sctx, u64 offset)
3981 struct btrfs_path *path;
3982 struct btrfs_root *root = sctx->send_root;
3983 struct btrfs_file_extent_item *fi;
3984 struct btrfs_key key;
3989 path = alloc_path_for_send();
3993 sctx->cur_inode_last_extent = 0;
3995 key.objectid = sctx->cur_ino;
3996 key.type = BTRFS_EXTENT_DATA_KEY;
3997 key.offset = offset;
3998 ret = btrfs_search_slot_for_read(root, &key, path, 0, 1);
4002 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
4003 if (key.objectid != sctx->cur_ino || key.type != BTRFS_EXTENT_DATA_KEY)
4006 fi = btrfs_item_ptr(path->nodes[0], path->slots[0],
4007 struct btrfs_file_extent_item);
4008 type = btrfs_file_extent_type(path->nodes[0], fi);
4009 if (type == BTRFS_FILE_EXTENT_INLINE) {
4010 u64 size = btrfs_file_extent_inline_len(path->nodes[0], fi);
4011 extent_end = ALIGN(key.offset + size,
4012 sctx->send_root->sectorsize);
4014 extent_end = key.offset +
4015 btrfs_file_extent_num_bytes(path->nodes[0], fi);
4017 sctx->cur_inode_last_extent = extent_end;
4019 btrfs_free_path(path);
4023 static int maybe_send_hole(struct send_ctx *sctx, struct btrfs_path *path,
4024 struct btrfs_key *key)
4026 struct btrfs_file_extent_item *fi;
4031 if (sctx->cur_ino != key->objectid || !need_send_hole(sctx))
4034 if (sctx->cur_inode_last_extent == (u64)-1) {
4035 ret = get_last_extent(sctx, key->offset - 1);
4040 fi = btrfs_item_ptr(path->nodes[0], path->slots[0],
4041 struct btrfs_file_extent_item);
4042 type = btrfs_file_extent_type(path->nodes[0], fi);
4043 if (type == BTRFS_FILE_EXTENT_INLINE) {
4044 u64 size = btrfs_file_extent_inline_len(path->nodes[0], fi);
4045 extent_end = ALIGN(key->offset + size,
4046 sctx->send_root->sectorsize);
4048 extent_end = key->offset +
4049 btrfs_file_extent_num_bytes(path->nodes[0], fi);
4051 if (sctx->cur_inode_last_extent < key->offset)
4052 ret = send_hole(sctx, key->offset);
4053 sctx->cur_inode_last_extent = extent_end;
4057 static int process_extent(struct send_ctx *sctx,
4058 struct btrfs_path *path,
4059 struct btrfs_key *key)
4061 struct clone_root *found_clone = NULL;
4064 if (S_ISLNK(sctx->cur_inode_mode))
4067 if (sctx->parent_root && !sctx->cur_inode_new) {
4068 ret = is_extent_unchanged(sctx, path, key);
4076 struct btrfs_file_extent_item *ei;
4079 ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
4080 struct btrfs_file_extent_item);
4081 type = btrfs_file_extent_type(path->nodes[0], ei);
4082 if (type == BTRFS_FILE_EXTENT_PREALLOC ||
4083 type == BTRFS_FILE_EXTENT_REG) {
4085 * The send spec does not have a prealloc command yet,
4086 * so just leave a hole for prealloc'ed extents until
4087 * we have enough commands queued up to justify rev'ing
4090 if (type == BTRFS_FILE_EXTENT_PREALLOC) {
4095 /* Have a hole, just skip it. */
4096 if (btrfs_file_extent_disk_bytenr(path->nodes[0], ei) == 0) {
4103 ret = find_extent_clone(sctx, path, key->objectid, key->offset,
4104 sctx->cur_inode_size, &found_clone);
4105 if (ret != -ENOENT && ret < 0)
4108 ret = send_write_or_clone(sctx, path, key, found_clone);
4112 ret = maybe_send_hole(sctx, path, key);
4117 static int process_all_extents(struct send_ctx *sctx)
4120 struct btrfs_root *root;
4121 struct btrfs_path *path;
4122 struct btrfs_key key;
4123 struct btrfs_key found_key;
4124 struct extent_buffer *eb;
4127 root = sctx->send_root;
4128 path = alloc_path_for_send();
4132 key.objectid = sctx->cmp_key->objectid;
4133 key.type = BTRFS_EXTENT_DATA_KEY;
4136 ret = btrfs_search_slot_for_read(root, &key, path, 1, 0);
4144 eb = path->nodes[0];
4145 slot = path->slots[0];
4146 btrfs_item_key_to_cpu(eb, &found_key, slot);
4148 if (found_key.objectid != key.objectid ||
4149 found_key.type != key.type) {
4154 ret = process_extent(sctx, path, &found_key);
4158 btrfs_release_path(path);
4159 key.offset = found_key.offset + 1;
4163 btrfs_free_path(path);
4167 static int process_recorded_refs_if_needed(struct send_ctx *sctx, int at_end)
4171 if (sctx->cur_ino == 0)
4173 if (!at_end && sctx->cur_ino == sctx->cmp_key->objectid &&
4174 sctx->cmp_key->type <= BTRFS_INODE_EXTREF_KEY)
4176 if (list_empty(&sctx->new_refs) && list_empty(&sctx->deleted_refs))
4179 ret = process_recorded_refs(sctx);
4184 * We have processed the refs and thus need to advance send_progress.
4185 * Now, calls to get_cur_xxx will take the updated refs of the current
4186 * inode into account.
4188 sctx->send_progress = sctx->cur_ino + 1;
4194 static int finish_inode_if_needed(struct send_ctx *sctx, int at_end)
4206 ret = process_recorded_refs_if_needed(sctx, at_end);
4210 if (sctx->cur_ino == 0 || sctx->cur_inode_deleted)
4212 if (!at_end && sctx->cmp_key->objectid == sctx->cur_ino)
4215 ret = get_inode_info(sctx->send_root, sctx->cur_ino, NULL, NULL,
4216 &left_mode, &left_uid, &left_gid, NULL);
4220 if (!sctx->parent_root || sctx->cur_inode_new) {
4222 if (!S_ISLNK(sctx->cur_inode_mode))
4225 ret = get_inode_info(sctx->parent_root, sctx->cur_ino,
4226 NULL, NULL, &right_mode, &right_uid,
4231 if (left_uid != right_uid || left_gid != right_gid)
4233 if (!S_ISLNK(sctx->cur_inode_mode) && left_mode != right_mode)
4237 if (S_ISREG(sctx->cur_inode_mode)) {
4238 if (need_send_hole(sctx)) {
4239 if (sctx->cur_inode_last_extent == (u64)-1) {
4240 ret = get_last_extent(sctx, (u64)-1);
4244 if (sctx->cur_inode_last_extent <
4245 sctx->cur_inode_size) {
4246 ret = send_hole(sctx, sctx->cur_inode_size);
4251 ret = send_truncate(sctx, sctx->cur_ino, sctx->cur_inode_gen,
4252 sctx->cur_inode_size);
4258 ret = send_chown(sctx, sctx->cur_ino, sctx->cur_inode_gen,
4259 left_uid, left_gid);
4264 ret = send_chmod(sctx, sctx->cur_ino, sctx->cur_inode_gen,
4271 * Need to send that every time, no matter if it actually changed
4272 * between the two trees as we have done changes to the inode before.
4274 ret = send_utimes(sctx, sctx->cur_ino, sctx->cur_inode_gen);
4282 static int changed_inode(struct send_ctx *sctx,
4283 enum btrfs_compare_tree_result result)
4286 struct btrfs_key *key = sctx->cmp_key;
4287 struct btrfs_inode_item *left_ii = NULL;
4288 struct btrfs_inode_item *right_ii = NULL;
4292 sctx->cur_ino = key->objectid;
4293 sctx->cur_inode_new_gen = 0;
4294 sctx->cur_inode_last_extent = (u64)-1;
4297 * Set send_progress to current inode. This will tell all get_cur_xxx
4298 * functions that the current inode's refs are not updated yet. Later,
4299 * when process_recorded_refs is finished, it is set to cur_ino + 1.
4301 sctx->send_progress = sctx->cur_ino;
4303 if (result == BTRFS_COMPARE_TREE_NEW ||
4304 result == BTRFS_COMPARE_TREE_CHANGED) {
4305 left_ii = btrfs_item_ptr(sctx->left_path->nodes[0],
4306 sctx->left_path->slots[0],
4307 struct btrfs_inode_item);
4308 left_gen = btrfs_inode_generation(sctx->left_path->nodes[0],
4311 right_ii = btrfs_item_ptr(sctx->right_path->nodes[0],
4312 sctx->right_path->slots[0],
4313 struct btrfs_inode_item);
4314 right_gen = btrfs_inode_generation(sctx->right_path->nodes[0],
4317 if (result == BTRFS_COMPARE_TREE_CHANGED) {
4318 right_ii = btrfs_item_ptr(sctx->right_path->nodes[0],
4319 sctx->right_path->slots[0],
4320 struct btrfs_inode_item);
4322 right_gen = btrfs_inode_generation(sctx->right_path->nodes[0],
4326 * The cur_ino = root dir case is special here. We can't treat
4327 * the inode as deleted+reused because it would generate a
4328 * stream that tries to delete/mkdir the root dir.
4330 if (left_gen != right_gen &&
4331 sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID)
4332 sctx->cur_inode_new_gen = 1;
4335 if (result == BTRFS_COMPARE_TREE_NEW) {
4336 sctx->cur_inode_gen = left_gen;
4337 sctx->cur_inode_new = 1;
4338 sctx->cur_inode_deleted = 0;
4339 sctx->cur_inode_size = btrfs_inode_size(
4340 sctx->left_path->nodes[0], left_ii);
4341 sctx->cur_inode_mode = btrfs_inode_mode(
4342 sctx->left_path->nodes[0], left_ii);
4343 if (sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID)
4344 ret = send_create_inode_if_needed(sctx);
4345 } else if (result == BTRFS_COMPARE_TREE_DELETED) {
4346 sctx->cur_inode_gen = right_gen;
4347 sctx->cur_inode_new = 0;
4348 sctx->cur_inode_deleted = 1;
4349 sctx->cur_inode_size = btrfs_inode_size(
4350 sctx->right_path->nodes[0], right_ii);
4351 sctx->cur_inode_mode = btrfs_inode_mode(
4352 sctx->right_path->nodes[0], right_ii);
4353 } else if (result == BTRFS_COMPARE_TREE_CHANGED) {
4355 * We need to do some special handling in case the inode was
4356 * reported as changed with a changed generation number. This
4357 * means that the original inode was deleted and new inode
4358 * reused the same inum. So we have to treat the old inode as
4359 * deleted and the new one as new.
4361 if (sctx->cur_inode_new_gen) {
4363 * First, process the inode as if it was deleted.
4365 sctx->cur_inode_gen = right_gen;
4366 sctx->cur_inode_new = 0;
4367 sctx->cur_inode_deleted = 1;
4368 sctx->cur_inode_size = btrfs_inode_size(
4369 sctx->right_path->nodes[0], right_ii);
4370 sctx->cur_inode_mode = btrfs_inode_mode(
4371 sctx->right_path->nodes[0], right_ii);
4372 ret = process_all_refs(sctx,
4373 BTRFS_COMPARE_TREE_DELETED);
4378 * Now process the inode as if it was new.
4380 sctx->cur_inode_gen = left_gen;
4381 sctx->cur_inode_new = 1;
4382 sctx->cur_inode_deleted = 0;
4383 sctx->cur_inode_size = btrfs_inode_size(
4384 sctx->left_path->nodes[0], left_ii);
4385 sctx->cur_inode_mode = btrfs_inode_mode(
4386 sctx->left_path->nodes[0], left_ii);
4387 ret = send_create_inode_if_needed(sctx);
4391 ret = process_all_refs(sctx, BTRFS_COMPARE_TREE_NEW);
4395 * Advance send_progress now as we did not get into
4396 * process_recorded_refs_if_needed in the new_gen case.
4398 sctx->send_progress = sctx->cur_ino + 1;
4401 * Now process all extents and xattrs of the inode as if
4402 * they were all new.
4404 ret = process_all_extents(sctx);
4407 ret = process_all_new_xattrs(sctx);
4411 sctx->cur_inode_gen = left_gen;
4412 sctx->cur_inode_new = 0;
4413 sctx->cur_inode_new_gen = 0;
4414 sctx->cur_inode_deleted = 0;
4415 sctx->cur_inode_size = btrfs_inode_size(
4416 sctx->left_path->nodes[0], left_ii);
4417 sctx->cur_inode_mode = btrfs_inode_mode(
4418 sctx->left_path->nodes[0], left_ii);
4427 * We have to process new refs before deleted refs, but compare_trees gives us
4428 * the new and deleted refs mixed. To fix this, we record the new/deleted refs
4429 * first and later process them in process_recorded_refs.
4430 * For the cur_inode_new_gen case, we skip recording completely because
4431 * changed_inode did already initiate processing of refs. The reason for this is
4432 * that in this case, compare_tree actually compares the refs of 2 different
4433 * inodes. To fix this, process_all_refs is used in changed_inode to handle all
4434 * refs of the right tree as deleted and all refs of the left tree as new.
4436 static int changed_ref(struct send_ctx *sctx,
4437 enum btrfs_compare_tree_result result)
4441 BUG_ON(sctx->cur_ino != sctx->cmp_key->objectid);
4443 if (!sctx->cur_inode_new_gen &&
4444 sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID) {
4445 if (result == BTRFS_COMPARE_TREE_NEW)
4446 ret = record_new_ref(sctx);
4447 else if (result == BTRFS_COMPARE_TREE_DELETED)
4448 ret = record_deleted_ref(sctx);
4449 else if (result == BTRFS_COMPARE_TREE_CHANGED)
4450 ret = record_changed_ref(sctx);
4457 * Process new/deleted/changed xattrs. We skip processing in the
4458 * cur_inode_new_gen case because changed_inode did already initiate processing
4459 * of xattrs. The reason is the same as in changed_ref
4461 static int changed_xattr(struct send_ctx *sctx,
4462 enum btrfs_compare_tree_result result)
4466 BUG_ON(sctx->cur_ino != sctx->cmp_key->objectid);
4468 if (!sctx->cur_inode_new_gen && !sctx->cur_inode_deleted) {
4469 if (result == BTRFS_COMPARE_TREE_NEW)
4470 ret = process_new_xattr(sctx);
4471 else if (result == BTRFS_COMPARE_TREE_DELETED)
4472 ret = process_deleted_xattr(sctx);
4473 else if (result == BTRFS_COMPARE_TREE_CHANGED)
4474 ret = process_changed_xattr(sctx);
4481 * Process new/deleted/changed extents. We skip processing in the
4482 * cur_inode_new_gen case because changed_inode did already initiate processing
4483 * of extents. The reason is the same as in changed_ref
4485 static int changed_extent(struct send_ctx *sctx,
4486 enum btrfs_compare_tree_result result)
4490 BUG_ON(sctx->cur_ino != sctx->cmp_key->objectid);
4492 if (!sctx->cur_inode_new_gen && !sctx->cur_inode_deleted) {
4493 if (result != BTRFS_COMPARE_TREE_DELETED)
4494 ret = process_extent(sctx, sctx->left_path,
4501 static int dir_changed(struct send_ctx *sctx, u64 dir)
4503 u64 orig_gen, new_gen;
4506 ret = get_inode_info(sctx->send_root, dir, NULL, &new_gen, NULL, NULL,
4511 ret = get_inode_info(sctx->parent_root, dir, NULL, &orig_gen, NULL,
4516 return (orig_gen != new_gen) ? 1 : 0;
4519 static int compare_refs(struct send_ctx *sctx, struct btrfs_path *path,
4520 struct btrfs_key *key)
4522 struct btrfs_inode_extref *extref;
4523 struct extent_buffer *leaf;
4524 u64 dirid = 0, last_dirid = 0;
4531 /* Easy case, just check this one dirid */
4532 if (key->type == BTRFS_INODE_REF_KEY) {
4533 dirid = key->offset;
4535 ret = dir_changed(sctx, dirid);
4539 leaf = path->nodes[0];
4540 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
4541 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
4542 while (cur_offset < item_size) {
4543 extref = (struct btrfs_inode_extref *)(ptr +
4545 dirid = btrfs_inode_extref_parent(leaf, extref);
4546 ref_name_len = btrfs_inode_extref_name_len(leaf, extref);
4547 cur_offset += ref_name_len + sizeof(*extref);
4548 if (dirid == last_dirid)
4550 ret = dir_changed(sctx, dirid);
4560 * Updates compare related fields in sctx and simply forwards to the actual
4561 * changed_xxx functions.
4563 static int changed_cb(struct btrfs_root *left_root,
4564 struct btrfs_root *right_root,
4565 struct btrfs_path *left_path,
4566 struct btrfs_path *right_path,
4567 struct btrfs_key *key,
4568 enum btrfs_compare_tree_result result,
4572 struct send_ctx *sctx = ctx;
4574 if (result == BTRFS_COMPARE_TREE_SAME) {
4575 if (key->type == BTRFS_INODE_REF_KEY ||
4576 key->type == BTRFS_INODE_EXTREF_KEY) {
4577 ret = compare_refs(sctx, left_path, key);
4582 } else if (key->type == BTRFS_EXTENT_DATA_KEY) {
4583 return maybe_send_hole(sctx, left_path, key);
4587 result = BTRFS_COMPARE_TREE_CHANGED;
4591 sctx->left_path = left_path;
4592 sctx->right_path = right_path;
4593 sctx->cmp_key = key;
4595 ret = finish_inode_if_needed(sctx, 0);
4599 /* Ignore non-FS objects */
4600 if (key->objectid == BTRFS_FREE_INO_OBJECTID ||
4601 key->objectid == BTRFS_FREE_SPACE_OBJECTID)
4604 if (key->type == BTRFS_INODE_ITEM_KEY)
4605 ret = changed_inode(sctx, result);
4606 else if (key->type == BTRFS_INODE_REF_KEY ||
4607 key->type == BTRFS_INODE_EXTREF_KEY)
4608 ret = changed_ref(sctx, result);
4609 else if (key->type == BTRFS_XATTR_ITEM_KEY)
4610 ret = changed_xattr(sctx, result);
4611 else if (key->type == BTRFS_EXTENT_DATA_KEY)
4612 ret = changed_extent(sctx, result);
4618 static int full_send_tree(struct send_ctx *sctx)
4621 struct btrfs_root *send_root = sctx->send_root;
4622 struct btrfs_key key;
4623 struct btrfs_key found_key;
4624 struct btrfs_path *path;
4625 struct extent_buffer *eb;
4630 path = alloc_path_for_send();
4634 spin_lock(&send_root->root_item_lock);
4635 start_ctransid = btrfs_root_ctransid(&send_root->root_item);
4636 spin_unlock(&send_root->root_item_lock);
4638 key.objectid = BTRFS_FIRST_FREE_OBJECTID;
4639 key.type = BTRFS_INODE_ITEM_KEY;
4643 * Make sure the tree has not changed after re-joining. We detect this
4644 * by comparing start_ctransid and ctransid. They should always match.
4646 spin_lock(&send_root->root_item_lock);
4647 ctransid = btrfs_root_ctransid(&send_root->root_item);
4648 spin_unlock(&send_root->root_item_lock);
4650 if (ctransid != start_ctransid) {
4651 WARN(1, KERN_WARNING "BTRFS: the root that you're trying to "
4652 "send was modified in between. This is "
4653 "probably a bug.\n");
4658 ret = btrfs_search_slot_for_read(send_root, &key, path, 1, 0);
4665 eb = path->nodes[0];
4666 slot = path->slots[0];
4667 btrfs_item_key_to_cpu(eb, &found_key, slot);
4669 ret = changed_cb(send_root, NULL, path, NULL,
4670 &found_key, BTRFS_COMPARE_TREE_NEW, sctx);
4674 key.objectid = found_key.objectid;
4675 key.type = found_key.type;
4676 key.offset = found_key.offset + 1;
4678 ret = btrfs_next_item(send_root, path);
4688 ret = finish_inode_if_needed(sctx, 1);
4691 btrfs_free_path(path);
4695 static int send_subvol(struct send_ctx *sctx)
4699 if (!(sctx->flags & BTRFS_SEND_FLAG_OMIT_STREAM_HEADER)) {
4700 ret = send_header(sctx);
4705 ret = send_subvol_begin(sctx);
4709 if (sctx->parent_root) {
4710 ret = btrfs_compare_trees(sctx->send_root, sctx->parent_root,
4714 ret = finish_inode_if_needed(sctx, 1);
4718 ret = full_send_tree(sctx);
4724 free_recorded_refs(sctx);
4728 static void btrfs_root_dec_send_in_progress(struct btrfs_root* root)
4730 spin_lock(&root->root_item_lock);
4731 root->send_in_progress--;
4733 * Not much left to do, we don't know why it's unbalanced and
4734 * can't blindly reset it to 0.
4736 if (root->send_in_progress < 0)
4737 btrfs_err(root->fs_info,
4738 "send_in_progres unbalanced %d root %llu\n",
4739 root->send_in_progress, root->root_key.objectid);
4740 spin_unlock(&root->root_item_lock);
4743 long btrfs_ioctl_send(struct file *mnt_file, void __user *arg_)
4746 struct btrfs_root *send_root;
4747 struct btrfs_root *clone_root;
4748 struct btrfs_fs_info *fs_info;
4749 struct btrfs_ioctl_send_args *arg = NULL;
4750 struct btrfs_key key;
4751 struct send_ctx *sctx = NULL;
4753 u64 *clone_sources_tmp = NULL;
4754 int clone_sources_to_rollback = 0;
4755 int sort_clone_roots = 0;
4758 if (!capable(CAP_SYS_ADMIN))
4761 send_root = BTRFS_I(file_inode(mnt_file))->root;
4762 fs_info = send_root->fs_info;
4765 * The subvolume must remain read-only during send, protect against
4768 spin_lock(&send_root->root_item_lock);
4769 send_root->send_in_progress++;
4770 spin_unlock(&send_root->root_item_lock);
4773 * This is done when we lookup the root, it should already be complete
4774 * by the time we get here.
4776 WARN_ON(send_root->orphan_cleanup_state != ORPHAN_CLEANUP_DONE);
4779 * If we just created this root we need to make sure that the orphan
4780 * cleanup has been done and committed since we search the commit root,
4781 * so check its commit root transid with our otransid and if they match
4782 * commit the transaction to make sure everything is updated.
4784 down_read(&send_root->fs_info->extent_commit_sem);
4785 if (btrfs_header_generation(send_root->commit_root) ==
4786 btrfs_root_otransid(&send_root->root_item)) {
4787 struct btrfs_trans_handle *trans;
4789 up_read(&send_root->fs_info->extent_commit_sem);
4791 trans = btrfs_attach_transaction_barrier(send_root);
4792 if (IS_ERR(trans)) {
4793 if (PTR_ERR(trans) != -ENOENT) {
4794 ret = PTR_ERR(trans);
4797 /* ENOENT means theres no transaction */
4799 ret = btrfs_commit_transaction(trans, send_root);
4804 up_read(&send_root->fs_info->extent_commit_sem);
4808 * Userspace tools do the checks and warn the user if it's
4811 if (!btrfs_root_readonly(send_root)) {
4816 arg = memdup_user(arg_, sizeof(*arg));
4823 if (!access_ok(VERIFY_READ, arg->clone_sources,
4824 sizeof(*arg->clone_sources) *
4825 arg->clone_sources_count)) {
4830 if (arg->flags & ~BTRFS_SEND_FLAG_MASK) {
4835 sctx = kzalloc(sizeof(struct send_ctx), GFP_NOFS);
4841 INIT_LIST_HEAD(&sctx->new_refs);
4842 INIT_LIST_HEAD(&sctx->deleted_refs);
4843 INIT_RADIX_TREE(&sctx->name_cache, GFP_NOFS);
4844 INIT_LIST_HEAD(&sctx->name_cache_list);
4846 sctx->flags = arg->flags;
4848 sctx->send_filp = fget(arg->send_fd);
4849 if (!sctx->send_filp) {
4854 sctx->send_root = send_root;
4855 sctx->clone_roots_cnt = arg->clone_sources_count;
4857 sctx->send_max_size = BTRFS_SEND_BUF_SIZE;
4858 sctx->send_buf = vmalloc(sctx->send_max_size);
4859 if (!sctx->send_buf) {
4864 sctx->read_buf = vmalloc(BTRFS_SEND_READ_SIZE);
4865 if (!sctx->read_buf) {
4870 sctx->clone_roots = vzalloc(sizeof(struct clone_root) *
4871 (arg->clone_sources_count + 1));
4872 if (!sctx->clone_roots) {
4877 if (arg->clone_sources_count) {
4878 clone_sources_tmp = vmalloc(arg->clone_sources_count *
4879 sizeof(*arg->clone_sources));
4880 if (!clone_sources_tmp) {
4885 ret = copy_from_user(clone_sources_tmp, arg->clone_sources,
4886 arg->clone_sources_count *
4887 sizeof(*arg->clone_sources));
4893 for (i = 0; i < arg->clone_sources_count; i++) {
4894 key.objectid = clone_sources_tmp[i];
4895 key.type = BTRFS_ROOT_ITEM_KEY;
4896 key.offset = (u64)-1;
4898 index = srcu_read_lock(&fs_info->subvol_srcu);
4900 clone_root = btrfs_read_fs_root_no_name(fs_info, &key);
4901 if (IS_ERR(clone_root)) {
4902 srcu_read_unlock(&fs_info->subvol_srcu, index);
4903 ret = PTR_ERR(clone_root);
4906 clone_sources_to_rollback = i + 1;
4907 spin_lock(&clone_root->root_item_lock);
4908 clone_root->send_in_progress++;
4909 if (!btrfs_root_readonly(clone_root)) {
4910 spin_unlock(&clone_root->root_item_lock);
4911 srcu_read_unlock(&fs_info->subvol_srcu, index);
4915 spin_unlock(&clone_root->root_item_lock);
4916 srcu_read_unlock(&fs_info->subvol_srcu, index);
4918 sctx->clone_roots[i].root = clone_root;
4920 vfree(clone_sources_tmp);
4921 clone_sources_tmp = NULL;
4924 if (arg->parent_root) {
4925 key.objectid = arg->parent_root;
4926 key.type = BTRFS_ROOT_ITEM_KEY;
4927 key.offset = (u64)-1;
4929 index = srcu_read_lock(&fs_info->subvol_srcu);
4931 sctx->parent_root = btrfs_read_fs_root_no_name(fs_info, &key);
4932 if (IS_ERR(sctx->parent_root)) {
4933 srcu_read_unlock(&fs_info->subvol_srcu, index);
4934 ret = PTR_ERR(sctx->parent_root);
4938 spin_lock(&sctx->parent_root->root_item_lock);
4939 sctx->parent_root->send_in_progress++;
4940 if (!btrfs_root_readonly(sctx->parent_root)) {
4941 spin_unlock(&sctx->parent_root->root_item_lock);
4942 srcu_read_unlock(&fs_info->subvol_srcu, index);
4946 spin_unlock(&sctx->parent_root->root_item_lock);
4948 srcu_read_unlock(&fs_info->subvol_srcu, index);
4952 * Clones from send_root are allowed, but only if the clone source
4953 * is behind the current send position. This is checked while searching
4954 * for possible clone sources.
4956 sctx->clone_roots[sctx->clone_roots_cnt++].root = sctx->send_root;
4958 /* We do a bsearch later */
4959 sort(sctx->clone_roots, sctx->clone_roots_cnt,
4960 sizeof(*sctx->clone_roots), __clone_root_cmp_sort,
4962 sort_clone_roots = 1;
4964 ret = send_subvol(sctx);
4968 if (!(sctx->flags & BTRFS_SEND_FLAG_OMIT_END_CMD)) {
4969 ret = begin_cmd(sctx, BTRFS_SEND_C_END);
4972 ret = send_cmd(sctx);
4978 if (sort_clone_roots) {
4979 for (i = 0; i < sctx->clone_roots_cnt; i++)
4980 btrfs_root_dec_send_in_progress(
4981 sctx->clone_roots[i].root);
4983 for (i = 0; sctx && i < clone_sources_to_rollback; i++)
4984 btrfs_root_dec_send_in_progress(
4985 sctx->clone_roots[i].root);
4987 btrfs_root_dec_send_in_progress(send_root);
4989 if (sctx && !IS_ERR_OR_NULL(sctx->parent_root))
4990 btrfs_root_dec_send_in_progress(sctx->parent_root);
4993 vfree(clone_sources_tmp);
4996 if (sctx->send_filp)
4997 fput(sctx->send_filp);
4999 vfree(sctx->clone_roots);
5000 vfree(sctx->send_buf);
5001 vfree(sctx->read_buf);
5003 name_cache_free(sctx);