1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHEFS_FORMAT_H
3 #define _BCACHEFS_FORMAT_H
6 * bcachefs on disk data structures
10 * There are three main types of on disk data structures in bcachefs (this is
11 * reduced from 5 in bcache)
17 * The btree is the primary structure; most metadata exists as keys in the
18 * various btrees. There are only a small number of btrees, they're not
19 * sharded - we have one btree for extents, another for inodes, et cetera.
23 * The superblock contains the location of the journal, the list of devices in
24 * the filesystem, and in general any metadata we need in order to decide
25 * whether we can start a filesystem or prior to reading the journal/btree
28 * The superblock is extensible, and most of the contents of the superblock are
29 * in variable length, type tagged fields; see struct bch_sb_field.
31 * Backup superblocks do not reside in a fixed location; also, superblocks do
32 * not have a fixed size. To locate backup superblocks we have struct
33 * bch_sb_layout; we store a copy of this inside every superblock, and also
34 * before the first superblock.
38 * The journal primarily records btree updates in the order they occurred;
39 * journal replay consists of just iterating over all the keys in the open
40 * journal entries and re-inserting them into the btrees.
42 * The journal also contains entry types for the btree roots, and blacklisted
43 * journal sequence numbers (see journal_seq_blacklist.c).
47 * bcachefs btrees are copy on write b+ trees, where nodes are big (typically
48 * 128k-256k) and log structured. We use struct btree_node for writing the first
49 * entry in a given node (offset 0), and struct btree_node_entry for all
52 * After the header, btree node entries contain a list of keys in sorted order.
53 * Values are stored inline with the keys; since values are variable length (and
54 * keys effectively are variable length too, due to packing) we can't do random
55 * access without building up additional in memory tables in the btree node read
58 * BTREE KEYS (struct bkey):
60 * The various btrees share a common format for the key - so as to avoid
61 * switching in fastpath lookup/comparison code - but define their own
62 * structures for the key values.
64 * The size of a key/value pair is stored as a u8 in units of u64s, so the max
65 * size is just under 2k. The common part also contains a type tag for the
66 * value, and a format field indicating whether the key is packed or not (and
67 * also meant to allow adding new key fields in the future, if desired).
69 * bkeys, when stored within a btree node, may also be packed. In that case, the
70 * bkey_format in that node is used to unpack it. Packed bkeys mean that we can
71 * be generous with field sizes in the common part of the key format (64 bit
72 * inode number, 64 bit offset, 96 bit version field, etc.) for negligible cost.
75 #include <asm/types.h>
76 #include <asm/byteorder.h>
77 #include <linux/kernel.h>
78 #include <linux/uuid.h>
82 typedef uuid_t __uuid_t;
85 #define BITMASK(name, type, field, offset, end) \
86 static const __maybe_unused unsigned name##_OFFSET = offset; \
87 static const __maybe_unused unsigned name##_BITS = (end - offset); \
89 static inline __u64 name(const type *k) \
91 return (k->field >> offset) & ~(~0ULL << (end - offset)); \
94 static inline void SET_##name(type *k, __u64 v) \
96 k->field &= ~(~(~0ULL << (end - offset)) << offset); \
97 k->field |= (v & ~(~0ULL << (end - offset))) << offset; \
100 #define LE_BITMASK(_bits, name, type, field, offset, end) \
101 static const __maybe_unused unsigned name##_OFFSET = offset; \
102 static const __maybe_unused unsigned name##_BITS = (end - offset); \
103 static const __maybe_unused __u##_bits name##_MAX = (1ULL << (end - offset)) - 1;\
105 static inline __u64 name(const type *k) \
107 return (__le##_bits##_to_cpu(k->field) >> offset) & \
108 ~(~0ULL << (end - offset)); \
111 static inline void SET_##name(type *k, __u64 v) \
113 __u##_bits new = __le##_bits##_to_cpu(k->field); \
115 new &= ~(~(~0ULL << (end - offset)) << offset); \
116 new |= (v & ~(~0ULL << (end - offset))) << offset; \
117 k->field = __cpu_to_le##_bits(new); \
120 #define LE16_BITMASK(n, t, f, o, e) LE_BITMASK(16, n, t, f, o, e)
121 #define LE32_BITMASK(n, t, f, o, e) LE_BITMASK(32, n, t, f, o, e)
122 #define LE64_BITMASK(n, t, f, o, e) LE_BITMASK(64, n, t, f, o, e)
127 /* One unused slot for now: */
128 __u8 bits_per_field[6];
129 __le64 field_offset[6];
132 /* Btree keys - all units are in sectors */
136 * Word order matches machine byte order - btree code treats a bpos as a
137 * single large integer, for search/comparison purposes
139 * Note that wherever a bpos is embedded in another on disk data
140 * structure, it has to be byte swabbed when reading in metadata that
141 * wasn't written in native endian order:
143 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
147 #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
149 __u64 offset; /* Points to end of extent - sectors */
152 #error edit for your odd byteorder.
155 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
160 #define KEY_INODE_MAX ((__u64)~0ULL)
161 #define KEY_OFFSET_MAX ((__u64)~0ULL)
162 #define KEY_SNAPSHOT_MAX ((__u32)~0U)
163 #define KEY_SIZE_MAX ((__u32)~0U)
165 static inline struct bpos SPOS(__u64 inode, __u64 offset, __u32 snapshot)
167 return (struct bpos) {
170 .snapshot = snapshot,
174 #define POS_MIN SPOS(0, 0, 0)
175 #define POS_MAX SPOS(KEY_INODE_MAX, KEY_OFFSET_MAX, 0)
176 #define SPOS_MAX SPOS(KEY_INODE_MAX, KEY_OFFSET_MAX, KEY_SNAPSHOT_MAX)
177 #define POS(_inode, _offset) SPOS(_inode, _offset, 0)
179 /* Empty placeholder struct, for container_of() */
185 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
188 #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
192 } __packed __aligned(4);
195 /* Size of combined key and value, in u64s */
198 /* Format of key (0 for format local to btree node) */
199 #if defined(__LITTLE_ENDIAN_BITFIELD)
202 #elif defined (__BIG_ENDIAN_BITFIELD)
203 __u8 needs_whiteout:1,
206 #error edit for your odd byteorder.
209 /* Type of the value */
212 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
215 struct bversion version;
216 __u32 size; /* extent size, in sectors */
218 #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
220 __u32 size; /* extent size, in sectors */
221 struct bversion version;
225 } __packed __aligned(8);
230 /* Size of combined key and value, in u64s */
233 /* Format of key (0 for format local to btree node) */
236 * XXX: next incompat on disk format change, switch format and
237 * needs_whiteout - bkey_packed() will be cheaper if format is the high
238 * bits of the bitfield
240 #if defined(__LITTLE_ENDIAN_BITFIELD)
243 #elif defined (__BIG_ENDIAN_BITFIELD)
244 __u8 needs_whiteout:1,
248 /* Type of the value */
253 * We copy bkeys with struct assignment in various places, and while
254 * that shouldn't be done with packed bkeys we can't disallow it in C,
255 * and it's legal to cast a bkey to a bkey_packed - so padding it out
256 * to the same size as struct bkey should hopefully be safest.
258 __u8 pad[sizeof(struct bkey) - 3];
259 } __packed __aligned(8);
266 #define BKEY_U64s (sizeof(struct bkey) / sizeof(__u64))
267 #define BKEY_U64s_MAX U8_MAX
268 #define BKEY_VAL_U64s_MAX (BKEY_U64s_MAX - BKEY_U64s)
270 #define KEY_PACKED_BITS_START 24
272 #define KEY_FORMAT_LOCAL_BTREE 0
273 #define KEY_FORMAT_CURRENT 1
275 enum bch_bkey_fields {
280 BKEY_FIELD_VERSION_HI,
281 BKEY_FIELD_VERSION_LO,
285 #define bkey_format_field(name, field) \
286 [BKEY_FIELD_##name] = (sizeof(((struct bkey *) NULL)->field) * 8)
288 #define BKEY_FORMAT_CURRENT \
289 ((struct bkey_format) { \
290 .key_u64s = BKEY_U64s, \
291 .nr_fields = BKEY_NR_FIELDS, \
292 .bits_per_field = { \
293 bkey_format_field(INODE, p.inode), \
294 bkey_format_field(OFFSET, p.offset), \
295 bkey_format_field(SNAPSHOT, p.snapshot), \
296 bkey_format_field(SIZE, size), \
297 bkey_format_field(VERSION_HI, version.hi), \
298 bkey_format_field(VERSION_LO, version.lo), \
302 /* bkey with inline value */
310 #define KEY(_inode, _offset, _size) \
313 .format = KEY_FORMAT_CURRENT, \
314 .p = POS(_inode, _offset), \
318 static inline void bkey_init(struct bkey *k)
323 #define bkey_bytes(_k) ((_k)->u64s * sizeof(__u64))
325 #define __BKEY_PADDED(key, pad) \
326 struct bkey_i key; __u64 key ## _pad[pad]
329 * - DELETED keys are used internally to mark keys that should be ignored but
330 * override keys in composition order. Their version number is ignored.
332 * - DISCARDED keys indicate that the data is all 0s because it has been
333 * discarded. DISCARDs may have a version; if the version is nonzero the key
334 * will be persistent, otherwise the key will be dropped whenever the btree
335 * node is rewritten (like DELETED keys).
337 * - ERROR: any read of the data returns a read error, as the data was lost due
338 * to a failing device. Like DISCARDED keys, they can be removed (overridden)
339 * by new writes or cluster-wide GC. Node repair can also overwrite them with
340 * the same or a more recent version number, but not with an older version
343 * - WHITEOUT: for hash table btrees
345 #define BCH_BKEY_TYPES() \
350 x(hash_whiteout, 4) \
355 x(inode_generation, 9) \
364 x(btree_ptr_v2, 18) \
365 x(indirect_inline_data, 19) \
377 x(snapshot_tree, 31) \
378 x(logged_op_truncate, 32) \
379 x(logged_op_finsert, 33)
382 #define x(name, nr) KEY_TYPE_##name = nr,
392 struct bch_whiteout {
405 struct bch_hash_whiteout {
416 * In extent bkeys, the value is a list of pointers (bch_extent_ptr), optionally
417 * preceded by checksum/compression information (bch_extent_crc32 or
420 * One major determining factor in the format of extents is how we handle and
421 * represent extents that have been partially overwritten and thus trimmed:
423 * If an extent is not checksummed or compressed, when the extent is trimmed we
424 * don't have to remember the extent we originally allocated and wrote: we can
425 * merely adjust ptr->offset to point to the start of the data that is currently
426 * live. The size field in struct bkey records the current (live) size of the
427 * extent, and is also used to mean "size of region on disk that we point to" in
430 * Thus an extent that is not checksummed or compressed will consist only of a
431 * list of bch_extent_ptrs, with none of the fields in
432 * bch_extent_crc32/bch_extent_crc64.
434 * When an extent is checksummed or compressed, it's not possible to read only
435 * the data that is currently live: we have to read the entire extent that was
436 * originally written, and then return only the part of the extent that is
439 * Thus, in addition to the current size of the extent in struct bkey, we need
440 * to store the size of the originally allocated space - this is the
441 * compressed_size and uncompressed_size fields in bch_extent_crc32/64. Also,
442 * when the extent is trimmed, instead of modifying the offset field of the
443 * pointer, we keep a second smaller offset field - "offset into the original
444 * extent of the currently live region".
446 * The other major determining factor is replication and data migration:
448 * Each pointer may have its own bch_extent_crc32/64. When doing a replicated
449 * write, we will initially write all the replicas in the same format, with the
450 * same checksum type and compression format - however, when copygc runs later (or
451 * tiering/cache promotion, anything that moves data), it is not in general
452 * going to rewrite all the pointers at once - one of the replicas may be in a
453 * bucket on one device that has very little fragmentation while another lives
454 * in a bucket that has become heavily fragmented, and thus is being rewritten
455 * sooner than the rest.
457 * Thus it will only move a subset of the pointers (or in the case of
458 * tiering/cache promotion perhaps add a single pointer without dropping any
459 * current pointers), and if the extent has been partially overwritten it must
460 * write only the currently live portion (or copygc would not be able to reduce
461 * fragmentation!) - which necessitates a different bch_extent_crc format for
464 * But in the interests of space efficiency, we don't want to store one
465 * bch_extent_crc for each pointer if we don't have to.
467 * Thus, a bch_extent consists of bch_extent_crc32s, bch_extent_crc64s, and
468 * bch_extent_ptrs appended arbitrarily one after the other. We determine the
469 * type of a given entry with a scheme similar to utf8 (except we're encoding a
470 * type, not a size), encoding the type in the position of the first set bit:
472 * bch_extent_crc32 - 0b1
473 * bch_extent_ptr - 0b10
474 * bch_extent_crc64 - 0b100
476 * We do it this way because bch_extent_crc32 is _very_ constrained on bits (and
477 * bch_extent_crc64 is the least constrained).
479 * Then, each bch_extent_crc32/64 applies to the pointers that follow after it,
480 * until the next bch_extent_crc32/64.
482 * If there are no bch_extent_crcs preceding a bch_extent_ptr, then that pointer
483 * is neither checksummed nor compressed.
486 /* 128 bits, sufficient for cryptographic MACs: */
490 } __packed __aligned(8);
492 #define BCH_EXTENT_ENTRY_TYPES() \
499 #define BCH_EXTENT_ENTRY_MAX 6
501 enum bch_extent_entry_type {
502 #define x(f, n) BCH_EXTENT_ENTRY_##f = n,
503 BCH_EXTENT_ENTRY_TYPES()
507 /* Compressed/uncompressed size are stored biased by 1: */
508 struct bch_extent_crc32 {
509 #if defined(__LITTLE_ENDIAN_BITFIELD)
512 _uncompressed_size:7,
518 #elif defined (__BIG_ENDIAN_BITFIELD)
520 __u32 compression_type:4,
524 _uncompressed_size:7,
528 } __packed __aligned(8);
530 #define CRC32_SIZE_MAX (1U << 7)
531 #define CRC32_NONCE_MAX 0
533 struct bch_extent_crc64 {
534 #if defined(__LITTLE_ENDIAN_BITFIELD)
537 _uncompressed_size:9,
543 #elif defined (__BIG_ENDIAN_BITFIELD)
549 _uncompressed_size:9,
554 } __packed __aligned(8);
556 #define CRC64_SIZE_MAX (1U << 9)
557 #define CRC64_NONCE_MAX ((1U << 10) - 1)
559 struct bch_extent_crc128 {
560 #if defined(__LITTLE_ENDIAN_BITFIELD)
563 _uncompressed_size:13,
568 #elif defined (__BIG_ENDIAN_BITFIELD)
569 __u64 compression_type:4,
573 _uncompressed_size:13,
577 struct bch_csum csum;
578 } __packed __aligned(8);
580 #define CRC128_SIZE_MAX (1U << 13)
581 #define CRC128_NONCE_MAX ((1U << 13) - 1)
584 * @reservation - pointer hasn't been written to, just reserved
586 struct bch_extent_ptr {
587 #if defined(__LITTLE_ENDIAN_BITFIELD)
592 offset:44, /* 8 petabytes */
595 #elif defined (__BIG_ENDIAN_BITFIELD)
604 } __packed __aligned(8);
606 struct bch_extent_stripe_ptr {
607 #if defined(__LITTLE_ENDIAN_BITFIELD)
612 #elif defined (__BIG_ENDIAN_BITFIELD)
620 struct bch_extent_rebalance {
621 #if defined(__LITTLE_ENDIAN_BITFIELD)
624 compression:8, /* enum bch_compression_opt */
626 #elif defined (__BIG_ENDIAN_BITFIELD)
634 union bch_extent_entry {
635 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ || __BITS_PER_LONG == 64
637 #elif __BITS_PER_LONG == 32
643 #error edit for your odd byteorder.
646 #define x(f, n) struct bch_extent_##f f;
647 BCH_EXTENT_ENTRY_TYPES()
651 struct bch_btree_ptr {
655 struct bch_extent_ptr start[];
656 } __packed __aligned(8);
658 struct bch_btree_ptr_v2 {
663 __le16 sectors_written;
667 struct bch_extent_ptr start[];
668 } __packed __aligned(8);
670 LE16_BITMASK(BTREE_PTR_RANGE_UPDATED, struct bch_btree_ptr_v2, flags, 0, 1);
676 union bch_extent_entry start[];
677 } __packed __aligned(8);
679 struct bch_reservation {
685 } __packed __aligned(8);
687 /* Maximum size (in u64s) a single pointer could be: */
688 #define BKEY_EXTENT_PTR_U64s_MAX\
689 ((sizeof(struct bch_extent_crc128) + \
690 sizeof(struct bch_extent_ptr)) / sizeof(__u64))
692 /* Maximum possible size of an entire extent value: */
693 #define BKEY_EXTENT_VAL_U64s_MAX \
694 (1 + BKEY_EXTENT_PTR_U64s_MAX * (BCH_REPLICAS_MAX + 1))
696 /* * Maximum possible size of an entire extent, key + value: */
697 #define BKEY_EXTENT_U64s_MAX (BKEY_U64s + BKEY_EXTENT_VAL_U64s_MAX)
699 /* Btree pointers don't carry around checksums: */
700 #define BKEY_BTREE_PTR_VAL_U64s_MAX \
701 ((sizeof(struct bch_btree_ptr_v2) + \
702 sizeof(struct bch_extent_ptr) * BCH_REPLICAS_MAX) / sizeof(__u64))
703 #define BKEY_BTREE_PTR_U64s_MAX \
704 (BKEY_U64s + BKEY_BTREE_PTR_VAL_U64s_MAX)
708 #define BLOCKDEV_INODE_MAX 4096
710 #define BCACHEFS_ROOT_INO 4096
719 } __packed __aligned(8);
721 struct bch_inode_v2 {
724 __le64 bi_journal_seq;
729 } __packed __aligned(8);
731 struct bch_inode_v3 {
734 __le64 bi_journal_seq;
741 } __packed __aligned(8);
743 #define INODEv3_FIELDS_START_INITIAL 6
744 #define INODEv3_FIELDS_START_CUR (offsetof(struct bch_inode_v3, fields) / sizeof(__u64))
746 struct bch_inode_generation {
749 __le32 bi_generation;
751 } __packed __aligned(8);
754 * bi_subvol and bi_parent_subvol are only set for subvolume roots:
757 #define BCH_INODE_FIELDS_v2() \
767 x(bi_generation, 32) \
769 x(bi_data_checksum, 8) \
770 x(bi_compression, 8) \
772 x(bi_background_compression, 8) \
773 x(bi_data_replicas, 8) \
774 x(bi_promote_target, 16) \
775 x(bi_foreground_target, 16) \
776 x(bi_background_target, 16) \
777 x(bi_erasure_code, 16) \
778 x(bi_fields_set, 16) \
780 x(bi_dir_offset, 64) \
782 x(bi_parent_subvol, 32)
784 #define BCH_INODE_FIELDS_v3() \
792 x(bi_generation, 32) \
794 x(bi_data_checksum, 8) \
795 x(bi_compression, 8) \
797 x(bi_background_compression, 8) \
798 x(bi_data_replicas, 8) \
799 x(bi_promote_target, 16) \
800 x(bi_foreground_target, 16) \
801 x(bi_background_target, 16) \
802 x(bi_erasure_code, 16) \
803 x(bi_fields_set, 16) \
805 x(bi_dir_offset, 64) \
807 x(bi_parent_subvol, 32) \
810 /* subset of BCH_INODE_FIELDS */
811 #define BCH_INODE_OPTS() \
812 x(data_checksum, 8) \
815 x(background_compression, 8) \
816 x(data_replicas, 8) \
817 x(promote_target, 16) \
818 x(foreground_target, 16) \
819 x(background_target, 16) \
820 x(erasure_code, 16) \
824 #define x(name, ...) \
831 #define BCH_INODE_FLAGS() \
838 x(i_sectors_dirty, 6) \
840 x(backptr_untrusted, 8)
842 /* bits 20+ reserved for packed fields below: */
844 enum bch_inode_flags {
845 #define x(t, n) BCH_INODE_##t = 1U << n,
850 enum __bch_inode_flags {
851 #define x(t, n) __BCH_INODE_##t = n,
856 LE32_BITMASK(INODE_STR_HASH, struct bch_inode, bi_flags, 20, 24);
857 LE32_BITMASK(INODE_NR_FIELDS, struct bch_inode, bi_flags, 24, 31);
858 LE32_BITMASK(INODE_NEW_VARINT, struct bch_inode, bi_flags, 31, 32);
860 LE64_BITMASK(INODEv2_STR_HASH, struct bch_inode_v2, bi_flags, 20, 24);
861 LE64_BITMASK(INODEv2_NR_FIELDS, struct bch_inode_v2, bi_flags, 24, 31);
863 LE64_BITMASK(INODEv3_STR_HASH, struct bch_inode_v3, bi_flags, 20, 24);
864 LE64_BITMASK(INODEv3_NR_FIELDS, struct bch_inode_v3, bi_flags, 24, 31);
866 LE64_BITMASK(INODEv3_FIELDS_START,
867 struct bch_inode_v3, bi_flags, 31, 36);
868 LE64_BITMASK(INODEv3_MODE, struct bch_inode_v3, bi_flags, 36, 52);
873 * Dirents (and xattrs) have to implement string lookups; since our b-tree
874 * doesn't support arbitrary length strings for the key, we instead index by a
875 * 64 bit hash (currently truncated sha1) of the string, stored in the offset
876 * field of the key - using linear probing to resolve hash collisions. This also
877 * provides us with the readdir cookie posix requires.
879 * Linear probing requires us to use whiteouts for deletions, in the event of a
886 /* Target inode number: */
889 struct { /* DT_SUBVOL */
890 __le32 d_child_subvol;
891 __le32 d_parent_subvol;
896 * Copy of mode bits 12-15 from the target inode - so userspace can get
897 * the filetype without having to do a stat()
902 } __packed __aligned(8);
905 #define BCH_DT_MAX 17
907 #define BCH_NAME_MAX 512
911 #define KEY_TYPE_XATTR_INDEX_USER 0
912 #define KEY_TYPE_XATTR_INDEX_POSIX_ACL_ACCESS 1
913 #define KEY_TYPE_XATTR_INDEX_POSIX_ACL_DEFAULT 2
914 #define KEY_TYPE_XATTR_INDEX_TRUSTED 3
915 #define KEY_TYPE_XATTR_INDEX_SECURITY 4
923 } __packed __aligned(8);
925 /* Bucket/allocation information: */
932 } __packed __aligned(8);
934 #define BCH_ALLOC_FIELDS_V1() \
938 x(dirty_sectors, 16) \
939 x(cached_sectors, 16) \
942 x(stripe_redundancy, 8)
945 #define x(name, _bits) BCH_ALLOC_FIELD_V1_##name,
946 BCH_ALLOC_FIELDS_V1()
950 struct bch_alloc_v2 {
957 } __packed __aligned(8);
959 #define BCH_ALLOC_FIELDS_V2() \
962 x(dirty_sectors, 32) \
963 x(cached_sectors, 32) \
965 x(stripe_redundancy, 8)
967 struct bch_alloc_v3 {
976 } __packed __aligned(8);
978 LE32_BITMASK(BCH_ALLOC_V3_NEED_DISCARD,struct bch_alloc_v3, flags, 0, 1)
979 LE32_BITMASK(BCH_ALLOC_V3_NEED_INC_GEN,struct bch_alloc_v3, flags, 1, 2)
981 struct bch_alloc_v4 {
988 __u8 stripe_redundancy;
990 __u32 cached_sectors;
993 __u32 nr_external_backpointers;
994 __u64 fragmentation_lru;
995 } __packed __aligned(8);
997 #define BCH_ALLOC_V4_U64s_V0 6
998 #define BCH_ALLOC_V4_U64s (sizeof(struct bch_alloc_v4) / sizeof(__u64))
1000 BITMASK(BCH_ALLOC_V4_NEED_DISCARD, struct bch_alloc_v4, flags, 0, 1)
1001 BITMASK(BCH_ALLOC_V4_NEED_INC_GEN, struct bch_alloc_v4, flags, 1, 2)
1002 BITMASK(BCH_ALLOC_V4_BACKPOINTERS_START,struct bch_alloc_v4, flags, 2, 8)
1003 BITMASK(BCH_ALLOC_V4_NR_BACKPOINTERS, struct bch_alloc_v4, flags, 8, 14)
1005 #define BCH_ALLOC_V4_NR_BACKPOINTERS_MAX 40
1007 struct bch_backpointer {
1012 __u64 bucket_offset:40;
1015 } __packed __aligned(8);
1017 #define KEY_TYPE_BUCKET_GENS_BITS 8
1018 #define KEY_TYPE_BUCKET_GENS_NR (1U << KEY_TYPE_BUCKET_GENS_BITS)
1019 #define KEY_TYPE_BUCKET_GENS_MASK (KEY_TYPE_BUCKET_GENS_NR - 1)
1021 struct bch_bucket_gens {
1023 u8 gens[KEY_TYPE_BUCKET_GENS_NR];
1024 } __packed __aligned(8);
1035 enum quota_counters {
1041 struct bch_quota_counter {
1048 struct bch_quota_counter c[Q_COUNTERS];
1049 } __packed __aligned(8);
1051 /* Erasure coding */
1060 __u8 csum_granularity_bits;
1064 struct bch_extent_ptr ptrs[];
1065 } __packed __aligned(8);
1069 struct bch_reflink_p {
1073 * A reflink pointer might point to an indirect extent which is then
1074 * later split (by copygc or rebalance). If we only pointed to part of
1075 * the original indirect extent, and then one of the fragments is
1076 * outside the range we point to, we'd leak a refcount: so when creating
1077 * reflink pointers, we need to store pad values to remember the full
1078 * range we were taking a reference on.
1082 } __packed __aligned(8);
1084 struct bch_reflink_v {
1087 union bch_extent_entry start[0];
1089 } __packed __aligned(8);
1091 struct bch_indirect_inline_data {
1099 struct bch_inline_data {
1106 #define SUBVOL_POS_MIN POS(0, 1)
1107 #define SUBVOL_POS_MAX POS(0, S32_MAX)
1108 #define BCACHEFS_ROOT_SUBVOL 1
1110 struct bch_subvolume {
1116 * Snapshot subvolumes form a tree, separate from the snapshot nodes
1117 * tree - if this subvolume is a snapshot, this is the ID of the
1118 * subvolume it was created from:
1125 LE32_BITMASK(BCH_SUBVOLUME_RO, struct bch_subvolume, flags, 0, 1)
1127 * We need to know whether a subvolume is a snapshot so we can know whether we
1128 * can delete it (or whether it should just be rm -rf'd)
1130 LE32_BITMASK(BCH_SUBVOLUME_SNAP, struct bch_subvolume, flags, 1, 2)
1131 LE32_BITMASK(BCH_SUBVOLUME_UNLINKED, struct bch_subvolume, flags, 2, 3)
1135 struct bch_snapshot {
1141 /* corresponds to a bch_snapshot_tree in BTREE_ID_snapshot_trees */
1147 LE32_BITMASK(BCH_SNAPSHOT_DELETED, struct bch_snapshot, flags, 0, 1)
1149 /* True if a subvolume points to this snapshot node: */
1150 LE32_BITMASK(BCH_SNAPSHOT_SUBVOL, struct bch_snapshot, flags, 1, 2)
1155 * The snapshot_trees btree gives us persistent indentifier for each tree of
1156 * bch_snapshot nodes, and allow us to record and easily find the root/master
1157 * subvolume that other snapshots were created from:
1159 struct bch_snapshot_tree {
1161 __le32 master_subvol;
1162 __le32 root_snapshot;
1170 } __packed __aligned(8);
1172 #define LRU_ID_STRIPES (1U << 16)
1174 /* Logged operations btree: */
1176 struct bch_logged_op_truncate {
1184 enum logged_op_finsert_state {
1185 LOGGED_OP_FINSERT_start,
1186 LOGGED_OP_FINSERT_shift_extents,
1187 LOGGED_OP_FINSERT_finish,
1190 struct bch_logged_op_finsert {
1201 /* Optional/variable size superblock sections: */
1203 struct bch_sb_field {
1209 #define BCH_SB_FIELDS() \
1218 x(journal_seq_blacklist, 8) \
1224 enum bch_sb_field_type {
1225 #define x(f, nr) BCH_SB_FIELD_##f = nr,
1232 * Most superblock fields are replicated in all device's superblocks - a few are
1235 #define BCH_SINGLE_DEVICE_SB_FIELDS \
1236 ((1U << BCH_SB_FIELD_journal)| \
1237 (1U << BCH_SB_FIELD_journal_v2))
1239 /* BCH_SB_FIELD_journal: */
1241 struct bch_sb_field_journal {
1242 struct bch_sb_field field;
1246 struct bch_sb_field_journal_v2 {
1247 struct bch_sb_field field;
1249 struct bch_sb_field_journal_v2_entry {
1255 /* BCH_SB_FIELD_members_v1: */
1257 #define BCH_MIN_NR_NBUCKETS (1 << 6)
1259 #define BCH_IOPS_MEASUREMENTS() \
1265 enum bch_iops_measurement {
1266 #define x(t, n) BCH_IOPS_##t = n,
1267 BCH_IOPS_MEASUREMENTS()
1272 #define BCH_MEMBER_ERROR_TYPES() \
1277 enum bch_member_error_type {
1278 #define x(t, n) BCH_MEMBER_ERROR_##t = n,
1279 BCH_MEMBER_ERROR_TYPES()
1286 __le64 nbuckets; /* device size */
1287 __le16 first_bucket; /* index of first bucket used */
1288 __le16 bucket_size; /* sectors */
1290 __le64 last_mount; /* time_t */
1294 __le64 errors[BCH_MEMBER_ERROR_NR];
1295 __le64 errors_at_reset[BCH_MEMBER_ERROR_NR];
1296 __le64 errors_reset_time;
1299 #define BCH_MEMBER_V1_BYTES 56
1301 LE64_BITMASK(BCH_MEMBER_STATE, struct bch_member, flags, 0, 4)
1302 /* 4-14 unused, was TIER, HAS_(META)DATA, REPLACEMENT */
1303 LE64_BITMASK(BCH_MEMBER_DISCARD, struct bch_member, flags, 14, 15)
1304 LE64_BITMASK(BCH_MEMBER_DATA_ALLOWED, struct bch_member, flags, 15, 20)
1305 LE64_BITMASK(BCH_MEMBER_GROUP, struct bch_member, flags, 20, 28)
1306 LE64_BITMASK(BCH_MEMBER_DURABILITY, struct bch_member, flags, 28, 30)
1307 LE64_BITMASK(BCH_MEMBER_FREESPACE_INITIALIZED,
1308 struct bch_member, flags, 30, 31)
1311 LE64_BITMASK(BCH_MEMBER_NR_READ_ERRORS, struct bch_member, flags[1], 0, 20);
1312 LE64_BITMASK(BCH_MEMBER_NR_WRITE_ERRORS,struct bch_member, flags[1], 20, 40);
1315 #define BCH_MEMBER_STATES() \
1321 enum bch_member_state {
1322 #define x(t, n) BCH_MEMBER_STATE_##t = n,
1328 struct bch_sb_field_members_v1 {
1329 struct bch_sb_field field;
1330 struct bch_member _members[]; //Members are now variable size
1333 struct bch_sb_field_members_v2 {
1334 struct bch_sb_field field;
1335 __le16 member_bytes; //size of single member entry
1337 struct bch_member _members[];
1340 /* BCH_SB_FIELD_crypt: */
1350 #define BCH_KEY_MAGIC \
1351 (((__u64) 'b' << 0)|((__u64) 'c' << 8)| \
1352 ((__u64) 'h' << 16)|((__u64) '*' << 24)| \
1353 ((__u64) '*' << 32)|((__u64) 'k' << 40)| \
1354 ((__u64) 'e' << 48)|((__u64) 'y' << 56))
1356 struct bch_encrypted_key {
1362 * If this field is present in the superblock, it stores an encryption key which
1363 * is used encrypt all other data/metadata. The key will normally be encrypted
1364 * with the key userspace provides, but if encryption has been turned off we'll
1365 * just store the master key unencrypted in the superblock so we can access the
1366 * previously encrypted data.
1368 struct bch_sb_field_crypt {
1369 struct bch_sb_field field;
1373 struct bch_encrypted_key key;
1376 LE64_BITMASK(BCH_CRYPT_KDF_TYPE, struct bch_sb_field_crypt, flags, 0, 4);
1378 enum bch_kdf_types {
1383 /* stored as base 2 log of scrypt params: */
1384 LE64_BITMASK(BCH_KDF_SCRYPT_N, struct bch_sb_field_crypt, kdf_flags, 0, 16);
1385 LE64_BITMASK(BCH_KDF_SCRYPT_R, struct bch_sb_field_crypt, kdf_flags, 16, 32);
1386 LE64_BITMASK(BCH_KDF_SCRYPT_P, struct bch_sb_field_crypt, kdf_flags, 32, 48);
1388 /* BCH_SB_FIELD_replicas: */
1390 #define BCH_DATA_TYPES() \
1399 x(need_gc_gens, 8) \
1402 enum bch_data_type {
1403 #define x(t, n) BCH_DATA_##t,
1409 static inline bool data_type_is_empty(enum bch_data_type type)
1413 case BCH_DATA_need_gc_gens:
1414 case BCH_DATA_need_discard:
1421 static inline bool data_type_is_hidden(enum bch_data_type type)
1425 case BCH_DATA_journal:
1432 struct bch_replicas_entry_v0 {
1438 struct bch_sb_field_replicas_v0 {
1439 struct bch_sb_field field;
1440 struct bch_replicas_entry_v0 entries[];
1441 } __packed __aligned(8);
1443 struct bch_replicas_entry {
1450 #define replicas_entry_bytes(_i) \
1451 (offsetof(typeof(*(_i)), devs) + (_i)->nr_devs)
1453 struct bch_sb_field_replicas {
1454 struct bch_sb_field field;
1455 struct bch_replicas_entry entries[];
1456 } __packed __aligned(8);
1458 /* BCH_SB_FIELD_quota: */
1460 struct bch_sb_quota_counter {
1465 struct bch_sb_quota_type {
1467 struct bch_sb_quota_counter c[Q_COUNTERS];
1470 struct bch_sb_field_quota {
1471 struct bch_sb_field field;
1472 struct bch_sb_quota_type q[QTYP_NR];
1473 } __packed __aligned(8);
1475 /* BCH_SB_FIELD_disk_groups: */
1477 #define BCH_SB_LABEL_SIZE 32
1479 struct bch_disk_group {
1480 __u8 label[BCH_SB_LABEL_SIZE];
1482 } __packed __aligned(8);
1484 LE64_BITMASK(BCH_GROUP_DELETED, struct bch_disk_group, flags[0], 0, 1)
1485 LE64_BITMASK(BCH_GROUP_DATA_ALLOWED, struct bch_disk_group, flags[0], 1, 6)
1486 LE64_BITMASK(BCH_GROUP_PARENT, struct bch_disk_group, flags[0], 6, 24)
1488 struct bch_sb_field_disk_groups {
1489 struct bch_sb_field field;
1490 struct bch_disk_group entries[];
1491 } __packed __aligned(8);
1493 /* BCH_SB_FIELD_counters */
1495 #define BCH_PERSISTENT_COUNTERS() \
1499 x(bucket_invalidate, 3) \
1500 x(bucket_discard, 4) \
1501 x(bucket_alloc, 5) \
1502 x(bucket_alloc_fail, 6) \
1503 x(btree_cache_scan, 7) \
1504 x(btree_cache_reap, 8) \
1505 x(btree_cache_cannibalize, 9) \
1506 x(btree_cache_cannibalize_lock, 10) \
1507 x(btree_cache_cannibalize_lock_fail, 11) \
1508 x(btree_cache_cannibalize_unlock, 12) \
1509 x(btree_node_write, 13) \
1510 x(btree_node_read, 14) \
1511 x(btree_node_compact, 15) \
1512 x(btree_node_merge, 16) \
1513 x(btree_node_split, 17) \
1514 x(btree_node_rewrite, 18) \
1515 x(btree_node_alloc, 19) \
1516 x(btree_node_free, 20) \
1517 x(btree_node_set_root, 21) \
1518 x(btree_path_relock_fail, 22) \
1519 x(btree_path_upgrade_fail, 23) \
1520 x(btree_reserve_get_fail, 24) \
1521 x(journal_entry_full, 25) \
1522 x(journal_full, 26) \
1523 x(journal_reclaim_finish, 27) \
1524 x(journal_reclaim_start, 28) \
1525 x(journal_write, 29) \
1526 x(read_promote, 30) \
1527 x(read_bounce, 31) \
1530 x(read_reuse_race, 34) \
1531 x(move_extent_read, 35) \
1532 x(move_extent_write, 36) \
1533 x(move_extent_finish, 37) \
1534 x(move_extent_fail, 38) \
1535 x(move_extent_start_fail, 39) \
1537 x(copygc_wait, 41) \
1538 x(gc_gens_end, 42) \
1539 x(gc_gens_start, 43) \
1540 x(trans_blocked_journal_reclaim, 44) \
1541 x(trans_restart_btree_node_reused, 45) \
1542 x(trans_restart_btree_node_split, 46) \
1543 x(trans_restart_fault_inject, 47) \
1544 x(trans_restart_iter_upgrade, 48) \
1545 x(trans_restart_journal_preres_get, 49) \
1546 x(trans_restart_journal_reclaim, 50) \
1547 x(trans_restart_journal_res_get, 51) \
1548 x(trans_restart_key_cache_key_realloced, 52) \
1549 x(trans_restart_key_cache_raced, 53) \
1550 x(trans_restart_mark_replicas, 54) \
1551 x(trans_restart_mem_realloced, 55) \
1552 x(trans_restart_memory_allocation_failure, 56) \
1553 x(trans_restart_relock, 57) \
1554 x(trans_restart_relock_after_fill, 58) \
1555 x(trans_restart_relock_key_cache_fill, 59) \
1556 x(trans_restart_relock_next_node, 60) \
1557 x(trans_restart_relock_parent_for_fill, 61) \
1558 x(trans_restart_relock_path, 62) \
1559 x(trans_restart_relock_path_intent, 63) \
1560 x(trans_restart_too_many_iters, 64) \
1561 x(trans_restart_traverse, 65) \
1562 x(trans_restart_upgrade, 66) \
1563 x(trans_restart_would_deadlock, 67) \
1564 x(trans_restart_would_deadlock_write, 68) \
1565 x(trans_restart_injected, 69) \
1566 x(trans_restart_key_cache_upgrade, 70) \
1567 x(trans_traverse_all, 71) \
1568 x(transaction_commit, 72) \
1569 x(write_super, 73) \
1570 x(trans_restart_would_deadlock_recursion_limit, 74) \
1571 x(trans_restart_write_buffer_flush, 75) \
1572 x(trans_restart_split_race, 76)
1574 enum bch_persistent_counters {
1575 #define x(t, n, ...) BCH_COUNTER_##t,
1576 BCH_PERSISTENT_COUNTERS()
1581 struct bch_sb_field_counters {
1582 struct bch_sb_field field;
1587 * On clean shutdown, store btree roots and current journal sequence number in
1594 __u8 type; /* designates what this jset holds */
1597 struct bkey_i start[0];
1601 struct bch_sb_field_clean {
1602 struct bch_sb_field field;
1605 __le16 _read_clock; /* no longer used */
1606 __le16 _write_clock;
1609 struct jset_entry start[0];
1613 struct journal_seq_blacklist_entry {
1618 struct bch_sb_field_journal_seq_blacklist {
1619 struct bch_sb_field field;
1620 struct journal_seq_blacklist_entry start[];
1623 struct bch_sb_field_errors {
1624 struct bch_sb_field field;
1625 struct bch_sb_field_error_entry {
1627 __le64 last_error_time;
1631 LE64_BITMASK(BCH_SB_ERROR_ENTRY_ID, struct bch_sb_field_error_entry, v, 0, 16);
1632 LE64_BITMASK(BCH_SB_ERROR_ENTRY_NR, struct bch_sb_field_error_entry, v, 16, 64);
1637 * New versioning scheme:
1638 * One common version number for all on disk data structures - superblock, btree
1639 * nodes, journal entries
1641 #define BCH_VERSION_MAJOR(_v) ((__u16) ((_v) >> 10))
1642 #define BCH_VERSION_MINOR(_v) ((__u16) ((_v) & ~(~0U << 10)))
1643 #define BCH_VERSION(_major, _minor) (((_major) << 10)|(_minor) << 0)
1645 #define RECOVERY_PASS_ALL_FSCK (1ULL << 63)
1647 #define BCH_METADATA_VERSIONS() \
1648 x(bkey_renumber, BCH_VERSION(0, 10), \
1649 RECOVERY_PASS_ALL_FSCK) \
1650 x(inode_btree_change, BCH_VERSION(0, 11), \
1651 RECOVERY_PASS_ALL_FSCK) \
1652 x(snapshot, BCH_VERSION(0, 12), \
1653 RECOVERY_PASS_ALL_FSCK) \
1654 x(inode_backpointers, BCH_VERSION(0, 13), \
1655 RECOVERY_PASS_ALL_FSCK) \
1656 x(btree_ptr_sectors_written, BCH_VERSION(0, 14), \
1657 RECOVERY_PASS_ALL_FSCK) \
1658 x(snapshot_2, BCH_VERSION(0, 15), \
1659 BIT_ULL(BCH_RECOVERY_PASS_fs_upgrade_for_subvolumes)| \
1660 BIT_ULL(BCH_RECOVERY_PASS_initialize_subvolumes)| \
1661 RECOVERY_PASS_ALL_FSCK) \
1662 x(reflink_p_fix, BCH_VERSION(0, 16), \
1663 BIT_ULL(BCH_RECOVERY_PASS_fix_reflink_p)) \
1664 x(subvol_dirent, BCH_VERSION(0, 17), \
1665 RECOVERY_PASS_ALL_FSCK) \
1666 x(inode_v2, BCH_VERSION(0, 18), \
1667 RECOVERY_PASS_ALL_FSCK) \
1668 x(freespace, BCH_VERSION(0, 19), \
1669 RECOVERY_PASS_ALL_FSCK) \
1670 x(alloc_v4, BCH_VERSION(0, 20), \
1671 RECOVERY_PASS_ALL_FSCK) \
1672 x(new_data_types, BCH_VERSION(0, 21), \
1673 RECOVERY_PASS_ALL_FSCK) \
1674 x(backpointers, BCH_VERSION(0, 22), \
1675 RECOVERY_PASS_ALL_FSCK) \
1676 x(inode_v3, BCH_VERSION(0, 23), \
1677 RECOVERY_PASS_ALL_FSCK) \
1678 x(unwritten_extents, BCH_VERSION(0, 24), \
1679 RECOVERY_PASS_ALL_FSCK) \
1680 x(bucket_gens, BCH_VERSION(0, 25), \
1681 BIT_ULL(BCH_RECOVERY_PASS_bucket_gens_init)| \
1682 RECOVERY_PASS_ALL_FSCK) \
1683 x(lru_v2, BCH_VERSION(0, 26), \
1684 RECOVERY_PASS_ALL_FSCK) \
1685 x(fragmentation_lru, BCH_VERSION(0, 27), \
1686 RECOVERY_PASS_ALL_FSCK) \
1687 x(no_bps_in_alloc_keys, BCH_VERSION(0, 28), \
1688 RECOVERY_PASS_ALL_FSCK) \
1689 x(snapshot_trees, BCH_VERSION(0, 29), \
1690 RECOVERY_PASS_ALL_FSCK) \
1691 x(major_minor, BCH_VERSION(1, 0), \
1693 x(snapshot_skiplists, BCH_VERSION(1, 1), \
1694 BIT_ULL(BCH_RECOVERY_PASS_check_snapshots)) \
1695 x(deleted_inodes, BCH_VERSION(1, 2), \
1696 BIT_ULL(BCH_RECOVERY_PASS_check_inodes)) \
1697 x(rebalance_work, BCH_VERSION(1, 3), \
1698 BIT_ULL(BCH_RECOVERY_PASS_set_fs_needs_rebalance))
1700 enum bcachefs_metadata_version {
1701 bcachefs_metadata_version_min = 9,
1702 #define x(t, n, upgrade_passes) bcachefs_metadata_version_##t = n,
1703 BCH_METADATA_VERSIONS()
1705 bcachefs_metadata_version_max
1708 static const __maybe_unused
1709 unsigned bcachefs_metadata_required_upgrade_below = bcachefs_metadata_version_rebalance_work;
1711 #define bcachefs_metadata_version_current (bcachefs_metadata_version_max - 1)
1713 #define BCH_SB_SECTOR 8
1714 #define BCH_SB_MEMBERS_MAX 64 /* XXX kill */
1716 struct bch_sb_layout {
1717 __uuid_t magic; /* bcachefs superblock UUID */
1719 __u8 sb_max_size_bits; /* base 2 of 512 byte sectors */
1720 __u8 nr_superblocks;
1722 __le64 sb_offset[61];
1723 } __packed __aligned(8);
1725 #define BCH_SB_LAYOUT_SECTOR 7
1728 * @offset - sector where this sb was written
1729 * @version - on disk format version
1730 * @version_min - Oldest metadata version this filesystem contains; so we can
1731 * safely drop compatibility code and refuse to mount filesystems
1733 * @magic - identifies as a bcachefs superblock (BCHFS_MAGIC)
1734 * @seq - incremented each time superblock is written
1735 * @uuid - used for generating various magic numbers and identifying
1736 * member devices, never changes
1737 * @user_uuid - user visible UUID, may be changed
1738 * @label - filesystem label
1739 * @seq - identifies most recent superblock, incremented each time
1740 * superblock is written
1741 * @features - enabled incompatible features
1744 struct bch_csum csum;
1751 __u8 label[BCH_SB_LABEL_SIZE];
1760 __le64 time_base_lo;
1761 __le32 time_base_hi;
1762 __le32 time_precision;
1768 struct bch_sb_layout layout;
1770 struct bch_sb_field start[0];
1772 } __packed __aligned(8);
1776 * BCH_SB_INITALIZED - set on first mount
1777 * BCH_SB_CLEAN - did we shut down cleanly? Just a hint, doesn't affect
1778 * behaviour of mount/recovery path:
1779 * BCH_SB_INODE_32BIT - limit inode numbers to 32 bits
1780 * BCH_SB_128_BIT_MACS - 128 bit macs instead of 80
1781 * BCH_SB_ENCRYPTION_TYPE - if nonzero encryption is enabled; overrides
1782 * DATA/META_CSUM_TYPE. Also indicates encryption
1783 * algorithm in use, if/when we get more than one
1786 LE16_BITMASK(BCH_SB_BLOCK_SIZE, struct bch_sb, block_size, 0, 16);
1788 LE64_BITMASK(BCH_SB_INITIALIZED, struct bch_sb, flags[0], 0, 1);
1789 LE64_BITMASK(BCH_SB_CLEAN, struct bch_sb, flags[0], 1, 2);
1790 LE64_BITMASK(BCH_SB_CSUM_TYPE, struct bch_sb, flags[0], 2, 8);
1791 LE64_BITMASK(BCH_SB_ERROR_ACTION, struct bch_sb, flags[0], 8, 12);
1793 LE64_BITMASK(BCH_SB_BTREE_NODE_SIZE, struct bch_sb, flags[0], 12, 28);
1795 LE64_BITMASK(BCH_SB_GC_RESERVE, struct bch_sb, flags[0], 28, 33);
1796 LE64_BITMASK(BCH_SB_ROOT_RESERVE, struct bch_sb, flags[0], 33, 40);
1798 LE64_BITMASK(BCH_SB_META_CSUM_TYPE, struct bch_sb, flags[0], 40, 44);
1799 LE64_BITMASK(BCH_SB_DATA_CSUM_TYPE, struct bch_sb, flags[0], 44, 48);
1801 LE64_BITMASK(BCH_SB_META_REPLICAS_WANT, struct bch_sb, flags[0], 48, 52);
1802 LE64_BITMASK(BCH_SB_DATA_REPLICAS_WANT, struct bch_sb, flags[0], 52, 56);
1804 LE64_BITMASK(BCH_SB_POSIX_ACL, struct bch_sb, flags[0], 56, 57);
1805 LE64_BITMASK(BCH_SB_USRQUOTA, struct bch_sb, flags[0], 57, 58);
1806 LE64_BITMASK(BCH_SB_GRPQUOTA, struct bch_sb, flags[0], 58, 59);
1807 LE64_BITMASK(BCH_SB_PRJQUOTA, struct bch_sb, flags[0], 59, 60);
1809 LE64_BITMASK(BCH_SB_HAS_ERRORS, struct bch_sb, flags[0], 60, 61);
1810 LE64_BITMASK(BCH_SB_HAS_TOPOLOGY_ERRORS,struct bch_sb, flags[0], 61, 62);
1812 LE64_BITMASK(BCH_SB_BIG_ENDIAN, struct bch_sb, flags[0], 62, 63);
1814 LE64_BITMASK(BCH_SB_STR_HASH_TYPE, struct bch_sb, flags[1], 0, 4);
1815 LE64_BITMASK(BCH_SB_COMPRESSION_TYPE_LO,struct bch_sb, flags[1], 4, 8);
1816 LE64_BITMASK(BCH_SB_INODE_32BIT, struct bch_sb, flags[1], 8, 9);
1818 LE64_BITMASK(BCH_SB_128_BIT_MACS, struct bch_sb, flags[1], 9, 10);
1819 LE64_BITMASK(BCH_SB_ENCRYPTION_TYPE, struct bch_sb, flags[1], 10, 14);
1822 * Max size of an extent that may require bouncing to read or write
1823 * (checksummed, compressed): 64k
1825 LE64_BITMASK(BCH_SB_ENCODED_EXTENT_MAX_BITS,
1826 struct bch_sb, flags[1], 14, 20);
1828 LE64_BITMASK(BCH_SB_META_REPLICAS_REQ, struct bch_sb, flags[1], 20, 24);
1829 LE64_BITMASK(BCH_SB_DATA_REPLICAS_REQ, struct bch_sb, flags[1], 24, 28);
1831 LE64_BITMASK(BCH_SB_PROMOTE_TARGET, struct bch_sb, flags[1], 28, 40);
1832 LE64_BITMASK(BCH_SB_FOREGROUND_TARGET, struct bch_sb, flags[1], 40, 52);
1833 LE64_BITMASK(BCH_SB_BACKGROUND_TARGET, struct bch_sb, flags[1], 52, 64);
1835 LE64_BITMASK(BCH_SB_BACKGROUND_COMPRESSION_TYPE_LO,
1836 struct bch_sb, flags[2], 0, 4);
1837 LE64_BITMASK(BCH_SB_GC_RESERVE_BYTES, struct bch_sb, flags[2], 4, 64);
1839 LE64_BITMASK(BCH_SB_ERASURE_CODE, struct bch_sb, flags[3], 0, 16);
1840 LE64_BITMASK(BCH_SB_METADATA_TARGET, struct bch_sb, flags[3], 16, 28);
1841 LE64_BITMASK(BCH_SB_SHARD_INUMS, struct bch_sb, flags[3], 28, 29);
1842 LE64_BITMASK(BCH_SB_INODES_USE_KEY_CACHE,struct bch_sb, flags[3], 29, 30);
1843 LE64_BITMASK(BCH_SB_JOURNAL_FLUSH_DELAY,struct bch_sb, flags[3], 30, 62);
1844 LE64_BITMASK(BCH_SB_JOURNAL_FLUSH_DISABLED,struct bch_sb, flags[3], 62, 63);
1845 LE64_BITMASK(BCH_SB_JOURNAL_RECLAIM_DELAY,struct bch_sb, flags[4], 0, 32);
1846 LE64_BITMASK(BCH_SB_JOURNAL_TRANSACTION_NAMES,struct bch_sb, flags[4], 32, 33);
1847 LE64_BITMASK(BCH_SB_NOCOW, struct bch_sb, flags[4], 33, 34);
1848 LE64_BITMASK(BCH_SB_WRITE_BUFFER_SIZE, struct bch_sb, flags[4], 34, 54);
1849 LE64_BITMASK(BCH_SB_VERSION_UPGRADE, struct bch_sb, flags[4], 54, 56);
1851 LE64_BITMASK(BCH_SB_COMPRESSION_TYPE_HI,struct bch_sb, flags[4], 56, 60);
1852 LE64_BITMASK(BCH_SB_BACKGROUND_COMPRESSION_TYPE_HI,
1853 struct bch_sb, flags[4], 60, 64);
1855 LE64_BITMASK(BCH_SB_VERSION_UPGRADE_COMPLETE,
1856 struct bch_sb, flags[5], 0, 16);
1858 static inline __u64 BCH_SB_COMPRESSION_TYPE(const struct bch_sb *sb)
1860 return BCH_SB_COMPRESSION_TYPE_LO(sb) | (BCH_SB_COMPRESSION_TYPE_HI(sb) << 4);
1863 static inline void SET_BCH_SB_COMPRESSION_TYPE(struct bch_sb *sb, __u64 v)
1865 SET_BCH_SB_COMPRESSION_TYPE_LO(sb, v);
1866 SET_BCH_SB_COMPRESSION_TYPE_HI(sb, v >> 4);
1869 static inline __u64 BCH_SB_BACKGROUND_COMPRESSION_TYPE(const struct bch_sb *sb)
1871 return BCH_SB_BACKGROUND_COMPRESSION_TYPE_LO(sb) |
1872 (BCH_SB_BACKGROUND_COMPRESSION_TYPE_HI(sb) << 4);
1875 static inline void SET_BCH_SB_BACKGROUND_COMPRESSION_TYPE(struct bch_sb *sb, __u64 v)
1877 SET_BCH_SB_BACKGROUND_COMPRESSION_TYPE_LO(sb, v);
1878 SET_BCH_SB_BACKGROUND_COMPRESSION_TYPE_HI(sb, v >> 4);
1884 * journal_seq_blacklist_v3: gates BCH_SB_FIELD_journal_seq_blacklist
1885 * reflink: gates KEY_TYPE_reflink
1886 * inline_data: gates KEY_TYPE_inline_data
1887 * new_siphash: gates BCH_STR_HASH_siphash
1888 * new_extent_overwrite: gates BTREE_NODE_NEW_EXTENT_OVERWRITE
1890 #define BCH_SB_FEATURES() \
1894 x(atomic_nlink, 3) \
1896 x(journal_seq_blacklist_v3, 5) \
1900 x(new_extent_overwrite, 9) \
1901 x(incompressible, 10) \
1902 x(btree_ptr_v2, 11) \
1903 x(extents_above_btree_updates, 12) \
1904 x(btree_updates_journalled, 13) \
1905 x(reflink_inline_data, 14) \
1907 x(journal_no_flush, 16) \
1909 x(extents_across_btree_nodes, 18)
1911 #define BCH_SB_FEATURES_ALWAYS \
1912 ((1ULL << BCH_FEATURE_new_extent_overwrite)| \
1913 (1ULL << BCH_FEATURE_extents_above_btree_updates)|\
1914 (1ULL << BCH_FEATURE_btree_updates_journalled)|\
1915 (1ULL << BCH_FEATURE_alloc_v2)|\
1916 (1ULL << BCH_FEATURE_extents_across_btree_nodes))
1918 #define BCH_SB_FEATURES_ALL \
1919 (BCH_SB_FEATURES_ALWAYS| \
1920 (1ULL << BCH_FEATURE_new_siphash)| \
1921 (1ULL << BCH_FEATURE_btree_ptr_v2)| \
1922 (1ULL << BCH_FEATURE_new_varint)| \
1923 (1ULL << BCH_FEATURE_journal_no_flush))
1925 enum bch_sb_feature {
1926 #define x(f, n) BCH_FEATURE_##f,
1932 #define BCH_SB_COMPAT() \
1934 x(alloc_metadata, 1) \
1935 x(extents_above_btree_updates_done, 2) \
1936 x(bformat_overflow_done, 3)
1938 enum bch_sb_compat {
1939 #define x(f, n) BCH_COMPAT_##f,
1947 #define BCH_VERSION_UPGRADE_OPTS() \
1949 x(incompatible, 1) \
1952 enum bch_version_upgrade_opts {
1953 #define x(t, n) BCH_VERSION_UPGRADE_##t = n,
1954 BCH_VERSION_UPGRADE_OPTS()
1958 #define BCH_REPLICAS_MAX 4U
1960 #define BCH_BKEY_PTRS_MAX 16U
1962 #define BCH_ERROR_ACTIONS() \
1967 enum bch_error_actions {
1968 #define x(t, n) BCH_ON_ERROR_##t = n,
1974 #define BCH_STR_HASH_TYPES() \
1980 enum bch_str_hash_type {
1981 #define x(t, n) BCH_STR_HASH_##t = n,
1982 BCH_STR_HASH_TYPES()
1987 #define BCH_STR_HASH_OPTS() \
1992 enum bch_str_hash_opts {
1993 #define x(t, n) BCH_STR_HASH_OPT_##t = n,
1999 #define BCH_CSUM_TYPES() \
2001 x(crc32c_nonzero, 1) \
2002 x(crc64_nonzero, 2) \
2003 x(chacha20_poly1305_80, 3) \
2004 x(chacha20_poly1305_128, 4) \
2009 enum bch_csum_type {
2010 #define x(t, n) BCH_CSUM_##t = n,
2016 static const __maybe_unused unsigned bch_crc_bytes[] = {
2017 [BCH_CSUM_none] = 0,
2018 [BCH_CSUM_crc32c_nonzero] = 4,
2019 [BCH_CSUM_crc32c] = 4,
2020 [BCH_CSUM_crc64_nonzero] = 8,
2021 [BCH_CSUM_crc64] = 8,
2022 [BCH_CSUM_xxhash] = 8,
2023 [BCH_CSUM_chacha20_poly1305_80] = 10,
2024 [BCH_CSUM_chacha20_poly1305_128] = 16,
2027 static inline _Bool bch2_csum_type_is_encryption(enum bch_csum_type type)
2030 case BCH_CSUM_chacha20_poly1305_80:
2031 case BCH_CSUM_chacha20_poly1305_128:
2038 #define BCH_CSUM_OPTS() \
2044 enum bch_csum_opts {
2045 #define x(t, n) BCH_CSUM_OPT_##t = n,
2051 #define BCH_COMPRESSION_TYPES() \
2057 x(incompressible, 5)
2059 enum bch_compression_type {
2060 #define x(t, n) BCH_COMPRESSION_TYPE_##t = n,
2061 BCH_COMPRESSION_TYPES()
2063 BCH_COMPRESSION_TYPE_NR
2066 #define BCH_COMPRESSION_OPTS() \
2072 enum bch_compression_opts {
2073 #define x(t, n) BCH_COMPRESSION_OPT_##t = n,
2074 BCH_COMPRESSION_OPTS()
2076 BCH_COMPRESSION_OPT_NR
2082 * The various other data structures have their own magic numbers, which are
2083 * xored with the first part of the cache set's UUID
2086 #define BCACHE_MAGIC \
2087 UUID_INIT(0xc68573f6, 0x4e1a, 0x45ca, \
2088 0x82, 0x65, 0xf5, 0x7f, 0x48, 0xba, 0x6d, 0x81)
2089 #define BCHFS_MAGIC \
2090 UUID_INIT(0xc68573f6, 0x66ce, 0x90a9, \
2091 0xd9, 0x6a, 0x60, 0xcf, 0x80, 0x3d, 0xf7, 0xef)
2093 #define BCACHEFS_STATFS_MAGIC 0xca451a4e
2095 #define JSET_MAGIC __cpu_to_le64(0x245235c1a3625032ULL)
2096 #define BSET_MAGIC __cpu_to_le64(0x90135c78b99e07f5ULL)
2098 static inline __le64 __bch2_sb_magic(struct bch_sb *sb)
2102 memcpy(&ret, &sb->uuid, sizeof(ret));
2106 static inline __u64 __jset_magic(struct bch_sb *sb)
2108 return __le64_to_cpu(__bch2_sb_magic(sb) ^ JSET_MAGIC);
2111 static inline __u64 __bset_magic(struct bch_sb *sb)
2113 return __le64_to_cpu(__bch2_sb_magic(sb) ^ BSET_MAGIC);
2118 #define JSET_KEYS_U64s (sizeof(struct jset_entry) / sizeof(__u64))
2120 #define BCH_JSET_ENTRY_TYPES() \
2125 x(blacklist_v2, 4) \
2134 #define x(f, nr) BCH_JSET_ENTRY_##f = nr,
2135 BCH_JSET_ENTRY_TYPES()
2141 * Journal sequence numbers can be blacklisted: bsets record the max sequence
2142 * number of all the journal entries they contain updates for, so that on
2143 * recovery we can ignore those bsets that contain index updates newer that what
2144 * made it into the journal.
2146 * This means that we can't reuse that journal_seq - we have to skip it, and
2147 * then record that we skipped it so that the next time we crash and recover we
2148 * don't think there was a missing journal entry.
2150 struct jset_entry_blacklist {
2151 struct jset_entry entry;
2155 struct jset_entry_blacklist_v2 {
2156 struct jset_entry entry;
2161 #define BCH_FS_USAGE_TYPES() \
2167 #define x(f, nr) BCH_FS_USAGE_##f = nr,
2168 BCH_FS_USAGE_TYPES()
2173 struct jset_entry_usage {
2174 struct jset_entry entry;
2178 struct jset_entry_data_usage {
2179 struct jset_entry entry;
2181 struct bch_replicas_entry r;
2184 struct jset_entry_clock {
2185 struct jset_entry entry;
2191 struct jset_entry_dev_usage_type {
2197 struct jset_entry_dev_usage {
2198 struct jset_entry entry;
2203 __le64 _buckets_unavailable; /* No longer used */
2205 struct jset_entry_dev_usage_type d[];
2208 static inline unsigned jset_entry_dev_usage_nr_types(struct jset_entry_dev_usage *u)
2210 return (vstruct_bytes(&u->entry) - sizeof(struct jset_entry_dev_usage)) /
2211 sizeof(struct jset_entry_dev_usage_type);
2214 struct jset_entry_log {
2215 struct jset_entry entry;
2220 * On disk format for a journal entry:
2221 * seq is monotonically increasing; every journal entry has its own unique
2224 * last_seq is the oldest journal entry that still has keys the btree hasn't
2225 * flushed to disk yet.
2227 * version is for on disk format changes.
2230 struct bch_csum csum;
2237 __le32 u64s; /* size of d[] in u64s */
2239 __u8 encrypted_start[0];
2241 __le16 _read_clock; /* no longer used */
2242 __le16 _write_clock;
2244 /* Sequence number of oldest dirty journal entry */
2248 struct jset_entry start[0];
2250 } __packed __aligned(8);
2252 LE32_BITMASK(JSET_CSUM_TYPE, struct jset, flags, 0, 4);
2253 LE32_BITMASK(JSET_BIG_ENDIAN, struct jset, flags, 4, 5);
2254 LE32_BITMASK(JSET_NO_FLUSH, struct jset, flags, 5, 6);
2256 #define BCH_JOURNAL_BUCKETS_MIN 8
2260 enum btree_id_flags {
2261 BTREE_ID_EXTENTS = BIT(0),
2262 BTREE_ID_SNAPSHOTS = BIT(1),
2263 BTREE_ID_SNAPSHOT_FIELD = BIT(2),
2264 BTREE_ID_DATA = BIT(3),
2267 #define BCH_BTREE_IDS() \
2268 x(extents, 0, BTREE_ID_EXTENTS|BTREE_ID_SNAPSHOTS|BTREE_ID_DATA,\
2269 BIT_ULL(KEY_TYPE_whiteout)| \
2270 BIT_ULL(KEY_TYPE_error)| \
2271 BIT_ULL(KEY_TYPE_cookie)| \
2272 BIT_ULL(KEY_TYPE_extent)| \
2273 BIT_ULL(KEY_TYPE_reservation)| \
2274 BIT_ULL(KEY_TYPE_reflink_p)| \
2275 BIT_ULL(KEY_TYPE_inline_data)) \
2276 x(inodes, 1, BTREE_ID_SNAPSHOTS, \
2277 BIT_ULL(KEY_TYPE_whiteout)| \
2278 BIT_ULL(KEY_TYPE_inode)| \
2279 BIT_ULL(KEY_TYPE_inode_v2)| \
2280 BIT_ULL(KEY_TYPE_inode_v3)| \
2281 BIT_ULL(KEY_TYPE_inode_generation)) \
2282 x(dirents, 2, BTREE_ID_SNAPSHOTS, \
2283 BIT_ULL(KEY_TYPE_whiteout)| \
2284 BIT_ULL(KEY_TYPE_hash_whiteout)| \
2285 BIT_ULL(KEY_TYPE_dirent)) \
2286 x(xattrs, 3, BTREE_ID_SNAPSHOTS, \
2287 BIT_ULL(KEY_TYPE_whiteout)| \
2288 BIT_ULL(KEY_TYPE_cookie)| \
2289 BIT_ULL(KEY_TYPE_hash_whiteout)| \
2290 BIT_ULL(KEY_TYPE_xattr)) \
2292 BIT_ULL(KEY_TYPE_alloc)| \
2293 BIT_ULL(KEY_TYPE_alloc_v2)| \
2294 BIT_ULL(KEY_TYPE_alloc_v3)| \
2295 BIT_ULL(KEY_TYPE_alloc_v4)) \
2297 BIT_ULL(KEY_TYPE_quota)) \
2299 BIT_ULL(KEY_TYPE_stripe)) \
2300 x(reflink, 7, BTREE_ID_EXTENTS|BTREE_ID_DATA, \
2301 BIT_ULL(KEY_TYPE_reflink_v)| \
2302 BIT_ULL(KEY_TYPE_indirect_inline_data)) \
2303 x(subvolumes, 8, 0, \
2304 BIT_ULL(KEY_TYPE_subvolume)) \
2305 x(snapshots, 9, 0, \
2306 BIT_ULL(KEY_TYPE_snapshot)) \
2308 BIT_ULL(KEY_TYPE_set)) \
2309 x(freespace, 11, BTREE_ID_EXTENTS, \
2310 BIT_ULL(KEY_TYPE_set)) \
2311 x(need_discard, 12, 0, \
2312 BIT_ULL(KEY_TYPE_set)) \
2313 x(backpointers, 13, 0, \
2314 BIT_ULL(KEY_TYPE_backpointer)) \
2315 x(bucket_gens, 14, 0, \
2316 BIT_ULL(KEY_TYPE_bucket_gens)) \
2317 x(snapshot_trees, 15, 0, \
2318 BIT_ULL(KEY_TYPE_snapshot_tree)) \
2319 x(deleted_inodes, 16, BTREE_ID_SNAPSHOT_FIELD, \
2320 BIT_ULL(KEY_TYPE_set)) \
2321 x(logged_ops, 17, 0, \
2322 BIT_ULL(KEY_TYPE_logged_op_truncate)| \
2323 BIT_ULL(KEY_TYPE_logged_op_finsert)) \
2324 x(rebalance_work, 18, BTREE_ID_SNAPSHOT_FIELD, \
2325 BIT_ULL(KEY_TYPE_set)|BIT_ULL(KEY_TYPE_cookie))
2328 #define x(name, nr, ...) BTREE_ID_##name = nr,
2334 #define BTREE_MAX_DEPTH 4U
2341 * On disk a btree node is a list/log of these; within each set the keys are
2348 * Highest journal entry this bset contains keys for.
2349 * If on recovery we don't see that journal entry, this bset is ignored:
2350 * this allows us to preserve the order of all index updates after a
2351 * crash, since the journal records a total order of all index updates
2352 * and anything that didn't make it to the journal doesn't get used.
2358 __le16 u64s; /* count of d[] in u64s */
2360 struct bkey_packed start[0];
2362 } __packed __aligned(8);
2364 LE32_BITMASK(BSET_CSUM_TYPE, struct bset, flags, 0, 4);
2366 LE32_BITMASK(BSET_BIG_ENDIAN, struct bset, flags, 4, 5);
2367 LE32_BITMASK(BSET_SEPARATE_WHITEOUTS,
2368 struct bset, flags, 5, 6);
2370 /* Sector offset within the btree node: */
2371 LE32_BITMASK(BSET_OFFSET, struct bset, flags, 16, 32);
2374 struct bch_csum csum;
2377 /* this flags field is encrypted, unlike bset->flags: */
2380 /* Closed interval: */
2381 struct bpos min_key;
2382 struct bpos max_key;
2383 struct bch_extent_ptr _ptr; /* not used anymore */
2384 struct bkey_format format;
2395 } __packed __aligned(8);
2397 LE64_BITMASK(BTREE_NODE_ID_LO, struct btree_node, flags, 0, 4);
2398 LE64_BITMASK(BTREE_NODE_LEVEL, struct btree_node, flags, 4, 8);
2399 LE64_BITMASK(BTREE_NODE_NEW_EXTENT_OVERWRITE,
2400 struct btree_node, flags, 8, 9);
2401 LE64_BITMASK(BTREE_NODE_ID_HI, struct btree_node, flags, 9, 25);
2403 LE64_BITMASK(BTREE_NODE_SEQ, struct btree_node, flags, 32, 64);
2405 static inline __u64 BTREE_NODE_ID(struct btree_node *n)
2407 return BTREE_NODE_ID_LO(n) | (BTREE_NODE_ID_HI(n) << 4);
2410 static inline void SET_BTREE_NODE_ID(struct btree_node *n, __u64 v)
2412 SET_BTREE_NODE_ID_LO(n, v);
2413 SET_BTREE_NODE_ID_HI(n, v >> 4);
2416 struct btree_node_entry {
2417 struct bch_csum csum;
2427 } __packed __aligned(8);
2429 #endif /* _BCACHEFS_FORMAT_H */