x86/asm: Add DB flag to 32-bit percpu GDT entry
[linux-2.6-microblaze.git] / fs / bcachefs / bcachefs_format.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHEFS_FORMAT_H
3 #define _BCACHEFS_FORMAT_H
4
5 /*
6  * bcachefs on disk data structures
7  *
8  * OVERVIEW:
9  *
10  * There are three main types of on disk data structures in bcachefs (this is
11  * reduced from 5 in bcache)
12  *
13  *  - superblock
14  *  - journal
15  *  - btree
16  *
17  * The btree is the primary structure; most metadata exists as keys in the
18  * various btrees. There are only a small number of btrees, they're not
19  * sharded - we have one btree for extents, another for inodes, et cetera.
20  *
21  * SUPERBLOCK:
22  *
23  * The superblock contains the location of the journal, the list of devices in
24  * the filesystem, and in general any metadata we need in order to decide
25  * whether we can start a filesystem or prior to reading the journal/btree
26  * roots.
27  *
28  * The superblock is extensible, and most of the contents of the superblock are
29  * in variable length, type tagged fields; see struct bch_sb_field.
30  *
31  * Backup superblocks do not reside in a fixed location; also, superblocks do
32  * not have a fixed size. To locate backup superblocks we have struct
33  * bch_sb_layout; we store a copy of this inside every superblock, and also
34  * before the first superblock.
35  *
36  * JOURNAL:
37  *
38  * The journal primarily records btree updates in the order they occurred;
39  * journal replay consists of just iterating over all the keys in the open
40  * journal entries and re-inserting them into the btrees.
41  *
42  * The journal also contains entry types for the btree roots, and blacklisted
43  * journal sequence numbers (see journal_seq_blacklist.c).
44  *
45  * BTREE:
46  *
47  * bcachefs btrees are copy on write b+ trees, where nodes are big (typically
48  * 128k-256k) and log structured. We use struct btree_node for writing the first
49  * entry in a given node (offset 0), and struct btree_node_entry for all
50  * subsequent writes.
51  *
52  * After the header, btree node entries contain a list of keys in sorted order.
53  * Values are stored inline with the keys; since values are variable length (and
54  * keys effectively are variable length too, due to packing) we can't do random
55  * access without building up additional in memory tables in the btree node read
56  * path.
57  *
58  * BTREE KEYS (struct bkey):
59  *
60  * The various btrees share a common format for the key - so as to avoid
61  * switching in fastpath lookup/comparison code - but define their own
62  * structures for the key values.
63  *
64  * The size of a key/value pair is stored as a u8 in units of u64s, so the max
65  * size is just under 2k. The common part also contains a type tag for the
66  * value, and a format field indicating whether the key is packed or not (and
67  * also meant to allow adding new key fields in the future, if desired).
68  *
69  * bkeys, when stored within a btree node, may also be packed. In that case, the
70  * bkey_format in that node is used to unpack it. Packed bkeys mean that we can
71  * be generous with field sizes in the common part of the key format (64 bit
72  * inode number, 64 bit offset, 96 bit version field, etc.) for negligible cost.
73  */
74
75 #include <asm/types.h>
76 #include <asm/byteorder.h>
77 #include <linux/kernel.h>
78 #include <linux/uuid.h>
79 #include "vstructs.h"
80
81 #ifdef __KERNEL__
82 typedef uuid_t __uuid_t;
83 #endif
84
85 #define BITMASK(name, type, field, offset, end)                         \
86 static const __maybe_unused unsigned    name##_OFFSET = offset;         \
87 static const __maybe_unused unsigned    name##_BITS = (end - offset);   \
88                                                                         \
89 static inline __u64 name(const type *k)                                 \
90 {                                                                       \
91         return (k->field >> offset) & ~(~0ULL << (end - offset));       \
92 }                                                                       \
93                                                                         \
94 static inline void SET_##name(type *k, __u64 v)                         \
95 {                                                                       \
96         k->field &= ~(~(~0ULL << (end - offset)) << offset);            \
97         k->field |= (v & ~(~0ULL << (end - offset))) << offset;         \
98 }
99
100 #define LE_BITMASK(_bits, name, type, field, offset, end)               \
101 static const __maybe_unused unsigned    name##_OFFSET = offset;         \
102 static const __maybe_unused unsigned    name##_BITS = (end - offset);   \
103 static const __maybe_unused __u##_bits  name##_MAX = (1ULL << (end - offset)) - 1;\
104                                                                         \
105 static inline __u64 name(const type *k)                                 \
106 {                                                                       \
107         return (__le##_bits##_to_cpu(k->field) >> offset) &             \
108                 ~(~0ULL << (end - offset));                             \
109 }                                                                       \
110                                                                         \
111 static inline void SET_##name(type *k, __u64 v)                         \
112 {                                                                       \
113         __u##_bits new = __le##_bits##_to_cpu(k->field);                \
114                                                                         \
115         new &= ~(~(~0ULL << (end - offset)) << offset);                 \
116         new |= (v & ~(~0ULL << (end - offset))) << offset;              \
117         k->field = __cpu_to_le##_bits(new);                             \
118 }
119
120 #define LE16_BITMASK(n, t, f, o, e)     LE_BITMASK(16, n, t, f, o, e)
121 #define LE32_BITMASK(n, t, f, o, e)     LE_BITMASK(32, n, t, f, o, e)
122 #define LE64_BITMASK(n, t, f, o, e)     LE_BITMASK(64, n, t, f, o, e)
123
124 struct bkey_format {
125         __u8            key_u64s;
126         __u8            nr_fields;
127         /* One unused slot for now: */
128         __u8            bits_per_field[6];
129         __le64          field_offset[6];
130 };
131
132 /* Btree keys - all units are in sectors */
133
134 struct bpos {
135         /*
136          * Word order matches machine byte order - btree code treats a bpos as a
137          * single large integer, for search/comparison purposes
138          *
139          * Note that wherever a bpos is embedded in another on disk data
140          * structure, it has to be byte swabbed when reading in metadata that
141          * wasn't written in native endian order:
142          */
143 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
144         __u32           snapshot;
145         __u64           offset;
146         __u64           inode;
147 #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
148         __u64           inode;
149         __u64           offset;         /* Points to end of extent - sectors */
150         __u32           snapshot;
151 #else
152 #error edit for your odd byteorder.
153 #endif
154 } __packed
155 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
156 __aligned(4)
157 #endif
158 ;
159
160 #define KEY_INODE_MAX                   ((__u64)~0ULL)
161 #define KEY_OFFSET_MAX                  ((__u64)~0ULL)
162 #define KEY_SNAPSHOT_MAX                ((__u32)~0U)
163 #define KEY_SIZE_MAX                    ((__u32)~0U)
164
165 static inline struct bpos SPOS(__u64 inode, __u64 offset, __u32 snapshot)
166 {
167         return (struct bpos) {
168                 .inode          = inode,
169                 .offset         = offset,
170                 .snapshot       = snapshot,
171         };
172 }
173
174 #define POS_MIN                         SPOS(0, 0, 0)
175 #define POS_MAX                         SPOS(KEY_INODE_MAX, KEY_OFFSET_MAX, 0)
176 #define SPOS_MAX                        SPOS(KEY_INODE_MAX, KEY_OFFSET_MAX, KEY_SNAPSHOT_MAX)
177 #define POS(_inode, _offset)            SPOS(_inode, _offset, 0)
178
179 /* Empty placeholder struct, for container_of() */
180 struct bch_val {
181         __u64           __nothing[0];
182 };
183
184 struct bversion {
185 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
186         __u64           lo;
187         __u32           hi;
188 #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
189         __u32           hi;
190         __u64           lo;
191 #endif
192 } __packed __aligned(4);
193
194 struct bkey {
195         /* Size of combined key and value, in u64s */
196         __u8            u64s;
197
198         /* Format of key (0 for format local to btree node) */
199 #if defined(__LITTLE_ENDIAN_BITFIELD)
200         __u8            format:7,
201                         needs_whiteout:1;
202 #elif defined (__BIG_ENDIAN_BITFIELD)
203         __u8            needs_whiteout:1,
204                         format:7;
205 #else
206 #error edit for your odd byteorder.
207 #endif
208
209         /* Type of the value */
210         __u8            type;
211
212 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
213         __u8            pad[1];
214
215         struct bversion version;
216         __u32           size;           /* extent size, in sectors */
217         struct bpos     p;
218 #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
219         struct bpos     p;
220         __u32           size;           /* extent size, in sectors */
221         struct bversion version;
222
223         __u8            pad[1];
224 #endif
225 } __packed __aligned(8);
226
227 struct bkey_packed {
228         __u64           _data[0];
229
230         /* Size of combined key and value, in u64s */
231         __u8            u64s;
232
233         /* Format of key (0 for format local to btree node) */
234
235         /*
236          * XXX: next incompat on disk format change, switch format and
237          * needs_whiteout - bkey_packed() will be cheaper if format is the high
238          * bits of the bitfield
239          */
240 #if defined(__LITTLE_ENDIAN_BITFIELD)
241         __u8            format:7,
242                         needs_whiteout:1;
243 #elif defined (__BIG_ENDIAN_BITFIELD)
244         __u8            needs_whiteout:1,
245                         format:7;
246 #endif
247
248         /* Type of the value */
249         __u8            type;
250         __u8            key_start[0];
251
252         /*
253          * We copy bkeys with struct assignment in various places, and while
254          * that shouldn't be done with packed bkeys we can't disallow it in C,
255          * and it's legal to cast a bkey to a bkey_packed  - so padding it out
256          * to the same size as struct bkey should hopefully be safest.
257          */
258         __u8            pad[sizeof(struct bkey) - 3];
259 } __packed __aligned(8);
260
261 typedef struct {
262         __le64                  lo;
263         __le64                  hi;
264 } bch_le128;
265
266 #define BKEY_U64s                       (sizeof(struct bkey) / sizeof(__u64))
267 #define BKEY_U64s_MAX                   U8_MAX
268 #define BKEY_VAL_U64s_MAX               (BKEY_U64s_MAX - BKEY_U64s)
269
270 #define KEY_PACKED_BITS_START           24
271
272 #define KEY_FORMAT_LOCAL_BTREE          0
273 #define KEY_FORMAT_CURRENT              1
274
275 enum bch_bkey_fields {
276         BKEY_FIELD_INODE,
277         BKEY_FIELD_OFFSET,
278         BKEY_FIELD_SNAPSHOT,
279         BKEY_FIELD_SIZE,
280         BKEY_FIELD_VERSION_HI,
281         BKEY_FIELD_VERSION_LO,
282         BKEY_NR_FIELDS,
283 };
284
285 #define bkey_format_field(name, field)                                  \
286         [BKEY_FIELD_##name] = (sizeof(((struct bkey *) NULL)->field) * 8)
287
288 #define BKEY_FORMAT_CURRENT                                             \
289 ((struct bkey_format) {                                                 \
290         .key_u64s       = BKEY_U64s,                                    \
291         .nr_fields      = BKEY_NR_FIELDS,                               \
292         .bits_per_field = {                                             \
293                 bkey_format_field(INODE,        p.inode),               \
294                 bkey_format_field(OFFSET,       p.offset),              \
295                 bkey_format_field(SNAPSHOT,     p.snapshot),            \
296                 bkey_format_field(SIZE,         size),                  \
297                 bkey_format_field(VERSION_HI,   version.hi),            \
298                 bkey_format_field(VERSION_LO,   version.lo),            \
299         },                                                              \
300 })
301
302 /* bkey with inline value */
303 struct bkey_i {
304         __u64                   _data[0];
305
306         struct bkey     k;
307         struct bch_val  v;
308 };
309
310 #define KEY(_inode, _offset, _size)                                     \
311 ((struct bkey) {                                                        \
312         .u64s           = BKEY_U64s,                                    \
313         .format         = KEY_FORMAT_CURRENT,                           \
314         .p              = POS(_inode, _offset),                         \
315         .size           = _size,                                        \
316 })
317
318 static inline void bkey_init(struct bkey *k)
319 {
320         *k = KEY(0, 0, 0);
321 }
322
323 #define bkey_bytes(_k)          ((_k)->u64s * sizeof(__u64))
324
325 #define __BKEY_PADDED(key, pad)                                 \
326         struct bkey_i key; __u64 key ## _pad[pad]
327
328 /*
329  * - DELETED keys are used internally to mark keys that should be ignored but
330  *   override keys in composition order.  Their version number is ignored.
331  *
332  * - DISCARDED keys indicate that the data is all 0s because it has been
333  *   discarded. DISCARDs may have a version; if the version is nonzero the key
334  *   will be persistent, otherwise the key will be dropped whenever the btree
335  *   node is rewritten (like DELETED keys).
336  *
337  * - ERROR: any read of the data returns a read error, as the data was lost due
338  *   to a failing device. Like DISCARDED keys, they can be removed (overridden)
339  *   by new writes or cluster-wide GC. Node repair can also overwrite them with
340  *   the same or a more recent version number, but not with an older version
341  *   number.
342  *
343  * - WHITEOUT: for hash table btrees
344  */
345 #define BCH_BKEY_TYPES()                                \
346         x(deleted,              0)                      \
347         x(whiteout,             1)                      \
348         x(error,                2)                      \
349         x(cookie,               3)                      \
350         x(hash_whiteout,        4)                      \
351         x(btree_ptr,            5)                      \
352         x(extent,               6)                      \
353         x(reservation,          7)                      \
354         x(inode,                8)                      \
355         x(inode_generation,     9)                      \
356         x(dirent,               10)                     \
357         x(xattr,                11)                     \
358         x(alloc,                12)                     \
359         x(quota,                13)                     \
360         x(stripe,               14)                     \
361         x(reflink_p,            15)                     \
362         x(reflink_v,            16)                     \
363         x(inline_data,          17)                     \
364         x(btree_ptr_v2,         18)                     \
365         x(indirect_inline_data, 19)                     \
366         x(alloc_v2,             20)                     \
367         x(subvolume,            21)                     \
368         x(snapshot,             22)                     \
369         x(inode_v2,             23)                     \
370         x(alloc_v3,             24)                     \
371         x(set,                  25)                     \
372         x(lru,                  26)                     \
373         x(alloc_v4,             27)                     \
374         x(backpointer,          28)                     \
375         x(inode_v3,             29)                     \
376         x(bucket_gens,          30)                     \
377         x(snapshot_tree,        31)                     \
378         x(logged_op_truncate,   32)                     \
379         x(logged_op_finsert,    33)
380
381 enum bch_bkey_type {
382 #define x(name, nr) KEY_TYPE_##name     = nr,
383         BCH_BKEY_TYPES()
384 #undef x
385         KEY_TYPE_MAX,
386 };
387
388 struct bch_deleted {
389         struct bch_val          v;
390 };
391
392 struct bch_whiteout {
393         struct bch_val          v;
394 };
395
396 struct bch_error {
397         struct bch_val          v;
398 };
399
400 struct bch_cookie {
401         struct bch_val          v;
402         __le64                  cookie;
403 };
404
405 struct bch_hash_whiteout {
406         struct bch_val          v;
407 };
408
409 struct bch_set {
410         struct bch_val          v;
411 };
412
413 /* Extents */
414
415 /*
416  * In extent bkeys, the value is a list of pointers (bch_extent_ptr), optionally
417  * preceded by checksum/compression information (bch_extent_crc32 or
418  * bch_extent_crc64).
419  *
420  * One major determining factor in the format of extents is how we handle and
421  * represent extents that have been partially overwritten and thus trimmed:
422  *
423  * If an extent is not checksummed or compressed, when the extent is trimmed we
424  * don't have to remember the extent we originally allocated and wrote: we can
425  * merely adjust ptr->offset to point to the start of the data that is currently
426  * live. The size field in struct bkey records the current (live) size of the
427  * extent, and is also used to mean "size of region on disk that we point to" in
428  * this case.
429  *
430  * Thus an extent that is not checksummed or compressed will consist only of a
431  * list of bch_extent_ptrs, with none of the fields in
432  * bch_extent_crc32/bch_extent_crc64.
433  *
434  * When an extent is checksummed or compressed, it's not possible to read only
435  * the data that is currently live: we have to read the entire extent that was
436  * originally written, and then return only the part of the extent that is
437  * currently live.
438  *
439  * Thus, in addition to the current size of the extent in struct bkey, we need
440  * to store the size of the originally allocated space - this is the
441  * compressed_size and uncompressed_size fields in bch_extent_crc32/64. Also,
442  * when the extent is trimmed, instead of modifying the offset field of the
443  * pointer, we keep a second smaller offset field - "offset into the original
444  * extent of the currently live region".
445  *
446  * The other major determining factor is replication and data migration:
447  *
448  * Each pointer may have its own bch_extent_crc32/64. When doing a replicated
449  * write, we will initially write all the replicas in the same format, with the
450  * same checksum type and compression format - however, when copygc runs later (or
451  * tiering/cache promotion, anything that moves data), it is not in general
452  * going to rewrite all the pointers at once - one of the replicas may be in a
453  * bucket on one device that has very little fragmentation while another lives
454  * in a bucket that has become heavily fragmented, and thus is being rewritten
455  * sooner than the rest.
456  *
457  * Thus it will only move a subset of the pointers (or in the case of
458  * tiering/cache promotion perhaps add a single pointer without dropping any
459  * current pointers), and if the extent has been partially overwritten it must
460  * write only the currently live portion (or copygc would not be able to reduce
461  * fragmentation!) - which necessitates a different bch_extent_crc format for
462  * the new pointer.
463  *
464  * But in the interests of space efficiency, we don't want to store one
465  * bch_extent_crc for each pointer if we don't have to.
466  *
467  * Thus, a bch_extent consists of bch_extent_crc32s, bch_extent_crc64s, and
468  * bch_extent_ptrs appended arbitrarily one after the other. We determine the
469  * type of a given entry with a scheme similar to utf8 (except we're encoding a
470  * type, not a size), encoding the type in the position of the first set bit:
471  *
472  * bch_extent_crc32     - 0b1
473  * bch_extent_ptr       - 0b10
474  * bch_extent_crc64     - 0b100
475  *
476  * We do it this way because bch_extent_crc32 is _very_ constrained on bits (and
477  * bch_extent_crc64 is the least constrained).
478  *
479  * Then, each bch_extent_crc32/64 applies to the pointers that follow after it,
480  * until the next bch_extent_crc32/64.
481  *
482  * If there are no bch_extent_crcs preceding a bch_extent_ptr, then that pointer
483  * is neither checksummed nor compressed.
484  */
485
486 /* 128 bits, sufficient for cryptographic MACs: */
487 struct bch_csum {
488         __le64                  lo;
489         __le64                  hi;
490 } __packed __aligned(8);
491
492 #define BCH_EXTENT_ENTRY_TYPES()                \
493         x(ptr,                  0)              \
494         x(crc32,                1)              \
495         x(crc64,                2)              \
496         x(crc128,               3)              \
497         x(stripe_ptr,           4)              \
498         x(rebalance,            5)
499 #define BCH_EXTENT_ENTRY_MAX    6
500
501 enum bch_extent_entry_type {
502 #define x(f, n) BCH_EXTENT_ENTRY_##f = n,
503         BCH_EXTENT_ENTRY_TYPES()
504 #undef x
505 };
506
507 /* Compressed/uncompressed size are stored biased by 1: */
508 struct bch_extent_crc32 {
509 #if defined(__LITTLE_ENDIAN_BITFIELD)
510         __u32                   type:2,
511                                 _compressed_size:7,
512                                 _uncompressed_size:7,
513                                 offset:7,
514                                 _unused:1,
515                                 csum_type:4,
516                                 compression_type:4;
517         __u32                   csum;
518 #elif defined (__BIG_ENDIAN_BITFIELD)
519         __u32                   csum;
520         __u32                   compression_type:4,
521                                 csum_type:4,
522                                 _unused:1,
523                                 offset:7,
524                                 _uncompressed_size:7,
525                                 _compressed_size:7,
526                                 type:2;
527 #endif
528 } __packed __aligned(8);
529
530 #define CRC32_SIZE_MAX          (1U << 7)
531 #define CRC32_NONCE_MAX         0
532
533 struct bch_extent_crc64 {
534 #if defined(__LITTLE_ENDIAN_BITFIELD)
535         __u64                   type:3,
536                                 _compressed_size:9,
537                                 _uncompressed_size:9,
538                                 offset:9,
539                                 nonce:10,
540                                 csum_type:4,
541                                 compression_type:4,
542                                 csum_hi:16;
543 #elif defined (__BIG_ENDIAN_BITFIELD)
544         __u64                   csum_hi:16,
545                                 compression_type:4,
546                                 csum_type:4,
547                                 nonce:10,
548                                 offset:9,
549                                 _uncompressed_size:9,
550                                 _compressed_size:9,
551                                 type:3;
552 #endif
553         __u64                   csum_lo;
554 } __packed __aligned(8);
555
556 #define CRC64_SIZE_MAX          (1U << 9)
557 #define CRC64_NONCE_MAX         ((1U << 10) - 1)
558
559 struct bch_extent_crc128 {
560 #if defined(__LITTLE_ENDIAN_BITFIELD)
561         __u64                   type:4,
562                                 _compressed_size:13,
563                                 _uncompressed_size:13,
564                                 offset:13,
565                                 nonce:13,
566                                 csum_type:4,
567                                 compression_type:4;
568 #elif defined (__BIG_ENDIAN_BITFIELD)
569         __u64                   compression_type:4,
570                                 csum_type:4,
571                                 nonce:13,
572                                 offset:13,
573                                 _uncompressed_size:13,
574                                 _compressed_size:13,
575                                 type:4;
576 #endif
577         struct bch_csum         csum;
578 } __packed __aligned(8);
579
580 #define CRC128_SIZE_MAX         (1U << 13)
581 #define CRC128_NONCE_MAX        ((1U << 13) - 1)
582
583 /*
584  * @reservation - pointer hasn't been written to, just reserved
585  */
586 struct bch_extent_ptr {
587 #if defined(__LITTLE_ENDIAN_BITFIELD)
588         __u64                   type:1,
589                                 cached:1,
590                                 unused:1,
591                                 unwritten:1,
592                                 offset:44, /* 8 petabytes */
593                                 dev:8,
594                                 gen:8;
595 #elif defined (__BIG_ENDIAN_BITFIELD)
596         __u64                   gen:8,
597                                 dev:8,
598                                 offset:44,
599                                 unwritten:1,
600                                 unused:1,
601                                 cached:1,
602                                 type:1;
603 #endif
604 } __packed __aligned(8);
605
606 struct bch_extent_stripe_ptr {
607 #if defined(__LITTLE_ENDIAN_BITFIELD)
608         __u64                   type:5,
609                                 block:8,
610                                 redundancy:4,
611                                 idx:47;
612 #elif defined (__BIG_ENDIAN_BITFIELD)
613         __u64                   idx:47,
614                                 redundancy:4,
615                                 block:8,
616                                 type:5;
617 #endif
618 };
619
620 struct bch_extent_rebalance {
621 #if defined(__LITTLE_ENDIAN_BITFIELD)
622         __u64                   type:6,
623                                 unused:34,
624                                 compression:8, /* enum bch_compression_opt */
625                                 target:16;
626 #elif defined (__BIG_ENDIAN_BITFIELD)
627         __u64                   target:16,
628                                 compression:8,
629                                 unused:34,
630                                 type:6;
631 #endif
632 };
633
634 union bch_extent_entry {
635 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ ||  __BITS_PER_LONG == 64
636         unsigned long                   type;
637 #elif __BITS_PER_LONG == 32
638         struct {
639                 unsigned long           pad;
640                 unsigned long           type;
641         };
642 #else
643 #error edit for your odd byteorder.
644 #endif
645
646 #define x(f, n) struct bch_extent_##f   f;
647         BCH_EXTENT_ENTRY_TYPES()
648 #undef x
649 };
650
651 struct bch_btree_ptr {
652         struct bch_val          v;
653
654         __u64                   _data[0];
655         struct bch_extent_ptr   start[];
656 } __packed __aligned(8);
657
658 struct bch_btree_ptr_v2 {
659         struct bch_val          v;
660
661         __u64                   mem_ptr;
662         __le64                  seq;
663         __le16                  sectors_written;
664         __le16                  flags;
665         struct bpos             min_key;
666         __u64                   _data[0];
667         struct bch_extent_ptr   start[];
668 } __packed __aligned(8);
669
670 LE16_BITMASK(BTREE_PTR_RANGE_UPDATED,   struct bch_btree_ptr_v2, flags, 0, 1);
671
672 struct bch_extent {
673         struct bch_val          v;
674
675         __u64                   _data[0];
676         union bch_extent_entry  start[];
677 } __packed __aligned(8);
678
679 struct bch_reservation {
680         struct bch_val          v;
681
682         __le32                  generation;
683         __u8                    nr_replicas;
684         __u8                    pad[3];
685 } __packed __aligned(8);
686
687 /* Maximum size (in u64s) a single pointer could be: */
688 #define BKEY_EXTENT_PTR_U64s_MAX\
689         ((sizeof(struct bch_extent_crc128) +                    \
690           sizeof(struct bch_extent_ptr)) / sizeof(__u64))
691
692 /* Maximum possible size of an entire extent value: */
693 #define BKEY_EXTENT_VAL_U64s_MAX                                \
694         (1 + BKEY_EXTENT_PTR_U64s_MAX * (BCH_REPLICAS_MAX + 1))
695
696 /* * Maximum possible size of an entire extent, key + value: */
697 #define BKEY_EXTENT_U64s_MAX            (BKEY_U64s + BKEY_EXTENT_VAL_U64s_MAX)
698
699 /* Btree pointers don't carry around checksums: */
700 #define BKEY_BTREE_PTR_VAL_U64s_MAX                             \
701         ((sizeof(struct bch_btree_ptr_v2) +                     \
702           sizeof(struct bch_extent_ptr) * BCH_REPLICAS_MAX) / sizeof(__u64))
703 #define BKEY_BTREE_PTR_U64s_MAX                                 \
704         (BKEY_U64s + BKEY_BTREE_PTR_VAL_U64s_MAX)
705
706 /* Inodes */
707
708 #define BLOCKDEV_INODE_MAX      4096
709
710 #define BCACHEFS_ROOT_INO       4096
711
712 struct bch_inode {
713         struct bch_val          v;
714
715         __le64                  bi_hash_seed;
716         __le32                  bi_flags;
717         __le16                  bi_mode;
718         __u8                    fields[];
719 } __packed __aligned(8);
720
721 struct bch_inode_v2 {
722         struct bch_val          v;
723
724         __le64                  bi_journal_seq;
725         __le64                  bi_hash_seed;
726         __le64                  bi_flags;
727         __le16                  bi_mode;
728         __u8                    fields[];
729 } __packed __aligned(8);
730
731 struct bch_inode_v3 {
732         struct bch_val          v;
733
734         __le64                  bi_journal_seq;
735         __le64                  bi_hash_seed;
736         __le64                  bi_flags;
737         __le64                  bi_sectors;
738         __le64                  bi_size;
739         __le64                  bi_version;
740         __u8                    fields[];
741 } __packed __aligned(8);
742
743 #define INODEv3_FIELDS_START_INITIAL    6
744 #define INODEv3_FIELDS_START_CUR        (offsetof(struct bch_inode_v3, fields) / sizeof(__u64))
745
746 struct bch_inode_generation {
747         struct bch_val          v;
748
749         __le32                  bi_generation;
750         __le32                  pad;
751 } __packed __aligned(8);
752
753 /*
754  * bi_subvol and bi_parent_subvol are only set for subvolume roots:
755  */
756
757 #define BCH_INODE_FIELDS_v2()                   \
758         x(bi_atime,                     96)     \
759         x(bi_ctime,                     96)     \
760         x(bi_mtime,                     96)     \
761         x(bi_otime,                     96)     \
762         x(bi_size,                      64)     \
763         x(bi_sectors,                   64)     \
764         x(bi_uid,                       32)     \
765         x(bi_gid,                       32)     \
766         x(bi_nlink,                     32)     \
767         x(bi_generation,                32)     \
768         x(bi_dev,                       32)     \
769         x(bi_data_checksum,             8)      \
770         x(bi_compression,               8)      \
771         x(bi_project,                   32)     \
772         x(bi_background_compression,    8)      \
773         x(bi_data_replicas,             8)      \
774         x(bi_promote_target,            16)     \
775         x(bi_foreground_target,         16)     \
776         x(bi_background_target,         16)     \
777         x(bi_erasure_code,              16)     \
778         x(bi_fields_set,                16)     \
779         x(bi_dir,                       64)     \
780         x(bi_dir_offset,                64)     \
781         x(bi_subvol,                    32)     \
782         x(bi_parent_subvol,             32)
783
784 #define BCH_INODE_FIELDS_v3()                   \
785         x(bi_atime,                     96)     \
786         x(bi_ctime,                     96)     \
787         x(bi_mtime,                     96)     \
788         x(bi_otime,                     96)     \
789         x(bi_uid,                       32)     \
790         x(bi_gid,                       32)     \
791         x(bi_nlink,                     32)     \
792         x(bi_generation,                32)     \
793         x(bi_dev,                       32)     \
794         x(bi_data_checksum,             8)      \
795         x(bi_compression,               8)      \
796         x(bi_project,                   32)     \
797         x(bi_background_compression,    8)      \
798         x(bi_data_replicas,             8)      \
799         x(bi_promote_target,            16)     \
800         x(bi_foreground_target,         16)     \
801         x(bi_background_target,         16)     \
802         x(bi_erasure_code,              16)     \
803         x(bi_fields_set,                16)     \
804         x(bi_dir,                       64)     \
805         x(bi_dir_offset,                64)     \
806         x(bi_subvol,                    32)     \
807         x(bi_parent_subvol,             32)     \
808         x(bi_nocow,                     8)
809
810 /* subset of BCH_INODE_FIELDS */
811 #define BCH_INODE_OPTS()                        \
812         x(data_checksum,                8)      \
813         x(compression,                  8)      \
814         x(project,                      32)     \
815         x(background_compression,       8)      \
816         x(data_replicas,                8)      \
817         x(promote_target,               16)     \
818         x(foreground_target,            16)     \
819         x(background_target,            16)     \
820         x(erasure_code,                 16)     \
821         x(nocow,                        8)
822
823 enum inode_opt_id {
824 #define x(name, ...)                            \
825         Inode_opt_##name,
826         BCH_INODE_OPTS()
827 #undef  x
828         Inode_opt_nr,
829 };
830
831 #define BCH_INODE_FLAGS()                       \
832         x(sync,                         0)      \
833         x(immutable,                    1)      \
834         x(append,                       2)      \
835         x(nodump,                       3)      \
836         x(noatime,                      4)      \
837         x(i_size_dirty,                 5)      \
838         x(i_sectors_dirty,              6)      \
839         x(unlinked,                     7)      \
840         x(backptr_untrusted,            8)
841
842 /* bits 20+ reserved for packed fields below: */
843
844 enum bch_inode_flags {
845 #define x(t, n) BCH_INODE_##t = 1U << n,
846         BCH_INODE_FLAGS()
847 #undef x
848 };
849
850 enum __bch_inode_flags {
851 #define x(t, n) __BCH_INODE_##t = n,
852         BCH_INODE_FLAGS()
853 #undef x
854 };
855
856 LE32_BITMASK(INODE_STR_HASH,    struct bch_inode, bi_flags, 20, 24);
857 LE32_BITMASK(INODE_NR_FIELDS,   struct bch_inode, bi_flags, 24, 31);
858 LE32_BITMASK(INODE_NEW_VARINT,  struct bch_inode, bi_flags, 31, 32);
859
860 LE64_BITMASK(INODEv2_STR_HASH,  struct bch_inode_v2, bi_flags, 20, 24);
861 LE64_BITMASK(INODEv2_NR_FIELDS, struct bch_inode_v2, bi_flags, 24, 31);
862
863 LE64_BITMASK(INODEv3_STR_HASH,  struct bch_inode_v3, bi_flags, 20, 24);
864 LE64_BITMASK(INODEv3_NR_FIELDS, struct bch_inode_v3, bi_flags, 24, 31);
865
866 LE64_BITMASK(INODEv3_FIELDS_START,
867                                 struct bch_inode_v3, bi_flags, 31, 36);
868 LE64_BITMASK(INODEv3_MODE,      struct bch_inode_v3, bi_flags, 36, 52);
869
870 /* Dirents */
871
872 /*
873  * Dirents (and xattrs) have to implement string lookups; since our b-tree
874  * doesn't support arbitrary length strings for the key, we instead index by a
875  * 64 bit hash (currently truncated sha1) of the string, stored in the offset
876  * field of the key - using linear probing to resolve hash collisions. This also
877  * provides us with the readdir cookie posix requires.
878  *
879  * Linear probing requires us to use whiteouts for deletions, in the event of a
880  * collision:
881  */
882
883 struct bch_dirent {
884         struct bch_val          v;
885
886         /* Target inode number: */
887         union {
888         __le64                  d_inum;
889         struct {                /* DT_SUBVOL */
890         __le32                  d_child_subvol;
891         __le32                  d_parent_subvol;
892         };
893         };
894
895         /*
896          * Copy of mode bits 12-15 from the target inode - so userspace can get
897          * the filetype without having to do a stat()
898          */
899         __u8                    d_type;
900
901         __u8                    d_name[];
902 } __packed __aligned(8);
903
904 #define DT_SUBVOL       16
905 #define BCH_DT_MAX      17
906
907 #define BCH_NAME_MAX    512
908
909 /* Xattrs */
910
911 #define KEY_TYPE_XATTR_INDEX_USER                       0
912 #define KEY_TYPE_XATTR_INDEX_POSIX_ACL_ACCESS   1
913 #define KEY_TYPE_XATTR_INDEX_POSIX_ACL_DEFAULT  2
914 #define KEY_TYPE_XATTR_INDEX_TRUSTED                    3
915 #define KEY_TYPE_XATTR_INDEX_SECURITY           4
916
917 struct bch_xattr {
918         struct bch_val          v;
919         __u8                    x_type;
920         __u8                    x_name_len;
921         __le16                  x_val_len;
922         __u8                    x_name[];
923 } __packed __aligned(8);
924
925 /* Bucket/allocation information: */
926
927 struct bch_alloc {
928         struct bch_val          v;
929         __u8                    fields;
930         __u8                    gen;
931         __u8                    data[];
932 } __packed __aligned(8);
933
934 #define BCH_ALLOC_FIELDS_V1()                   \
935         x(read_time,            16)             \
936         x(write_time,           16)             \
937         x(data_type,            8)              \
938         x(dirty_sectors,        16)             \
939         x(cached_sectors,       16)             \
940         x(oldest_gen,           8)              \
941         x(stripe,               32)             \
942         x(stripe_redundancy,    8)
943
944 enum {
945 #define x(name, _bits) BCH_ALLOC_FIELD_V1_##name,
946         BCH_ALLOC_FIELDS_V1()
947 #undef x
948 };
949
950 struct bch_alloc_v2 {
951         struct bch_val          v;
952         __u8                    nr_fields;
953         __u8                    gen;
954         __u8                    oldest_gen;
955         __u8                    data_type;
956         __u8                    data[];
957 } __packed __aligned(8);
958
959 #define BCH_ALLOC_FIELDS_V2()                   \
960         x(read_time,            64)             \
961         x(write_time,           64)             \
962         x(dirty_sectors,        32)             \
963         x(cached_sectors,       32)             \
964         x(stripe,               32)             \
965         x(stripe_redundancy,    8)
966
967 struct bch_alloc_v3 {
968         struct bch_val          v;
969         __le64                  journal_seq;
970         __le32                  flags;
971         __u8                    nr_fields;
972         __u8                    gen;
973         __u8                    oldest_gen;
974         __u8                    data_type;
975         __u8                    data[];
976 } __packed __aligned(8);
977
978 LE32_BITMASK(BCH_ALLOC_V3_NEED_DISCARD,struct bch_alloc_v3, flags,  0,  1)
979 LE32_BITMASK(BCH_ALLOC_V3_NEED_INC_GEN,struct bch_alloc_v3, flags,  1,  2)
980
981 struct bch_alloc_v4 {
982         struct bch_val          v;
983         __u64                   journal_seq;
984         __u32                   flags;
985         __u8                    gen;
986         __u8                    oldest_gen;
987         __u8                    data_type;
988         __u8                    stripe_redundancy;
989         __u32                   dirty_sectors;
990         __u32                   cached_sectors;
991         __u64                   io_time[2];
992         __u32                   stripe;
993         __u32                   nr_external_backpointers;
994         __u64                   fragmentation_lru;
995 } __packed __aligned(8);
996
997 #define BCH_ALLOC_V4_U64s_V0    6
998 #define BCH_ALLOC_V4_U64s       (sizeof(struct bch_alloc_v4) / sizeof(__u64))
999
1000 BITMASK(BCH_ALLOC_V4_NEED_DISCARD,      struct bch_alloc_v4, flags,  0,  1)
1001 BITMASK(BCH_ALLOC_V4_NEED_INC_GEN,      struct bch_alloc_v4, flags,  1,  2)
1002 BITMASK(BCH_ALLOC_V4_BACKPOINTERS_START,struct bch_alloc_v4, flags,  2,  8)
1003 BITMASK(BCH_ALLOC_V4_NR_BACKPOINTERS,   struct bch_alloc_v4, flags,  8,  14)
1004
1005 #define BCH_ALLOC_V4_NR_BACKPOINTERS_MAX        40
1006
1007 struct bch_backpointer {
1008         struct bch_val          v;
1009         __u8                    btree_id;
1010         __u8                    level;
1011         __u8                    data_type;
1012         __u64                   bucket_offset:40;
1013         __u32                   bucket_len;
1014         struct bpos             pos;
1015 } __packed __aligned(8);
1016
1017 #define KEY_TYPE_BUCKET_GENS_BITS       8
1018 #define KEY_TYPE_BUCKET_GENS_NR         (1U << KEY_TYPE_BUCKET_GENS_BITS)
1019 #define KEY_TYPE_BUCKET_GENS_MASK       (KEY_TYPE_BUCKET_GENS_NR - 1)
1020
1021 struct bch_bucket_gens {
1022         struct bch_val          v;
1023         u8                      gens[KEY_TYPE_BUCKET_GENS_NR];
1024 } __packed __aligned(8);
1025
1026 /* Quotas: */
1027
1028 enum quota_types {
1029         QTYP_USR                = 0,
1030         QTYP_GRP                = 1,
1031         QTYP_PRJ                = 2,
1032         QTYP_NR                 = 3,
1033 };
1034
1035 enum quota_counters {
1036         Q_SPC                   = 0,
1037         Q_INO                   = 1,
1038         Q_COUNTERS              = 2,
1039 };
1040
1041 struct bch_quota_counter {
1042         __le64                  hardlimit;
1043         __le64                  softlimit;
1044 };
1045
1046 struct bch_quota {
1047         struct bch_val          v;
1048         struct bch_quota_counter c[Q_COUNTERS];
1049 } __packed __aligned(8);
1050
1051 /* Erasure coding */
1052
1053 struct bch_stripe {
1054         struct bch_val          v;
1055         __le16                  sectors;
1056         __u8                    algorithm;
1057         __u8                    nr_blocks;
1058         __u8                    nr_redundant;
1059
1060         __u8                    csum_granularity_bits;
1061         __u8                    csum_type;
1062         __u8                    pad;
1063
1064         struct bch_extent_ptr   ptrs[];
1065 } __packed __aligned(8);
1066
1067 /* Reflink: */
1068
1069 struct bch_reflink_p {
1070         struct bch_val          v;
1071         __le64                  idx;
1072         /*
1073          * A reflink pointer might point to an indirect extent which is then
1074          * later split (by copygc or rebalance). If we only pointed to part of
1075          * the original indirect extent, and then one of the fragments is
1076          * outside the range we point to, we'd leak a refcount: so when creating
1077          * reflink pointers, we need to store pad values to remember the full
1078          * range we were taking a reference on.
1079          */
1080         __le32                  front_pad;
1081         __le32                  back_pad;
1082 } __packed __aligned(8);
1083
1084 struct bch_reflink_v {
1085         struct bch_val          v;
1086         __le64                  refcount;
1087         union bch_extent_entry  start[0];
1088         __u64                   _data[];
1089 } __packed __aligned(8);
1090
1091 struct bch_indirect_inline_data {
1092         struct bch_val          v;
1093         __le64                  refcount;
1094         u8                      data[];
1095 };
1096
1097 /* Inline data */
1098
1099 struct bch_inline_data {
1100         struct bch_val          v;
1101         u8                      data[];
1102 };
1103
1104 /* Subvolumes: */
1105
1106 #define SUBVOL_POS_MIN          POS(0, 1)
1107 #define SUBVOL_POS_MAX          POS(0, S32_MAX)
1108 #define BCACHEFS_ROOT_SUBVOL    1
1109
1110 struct bch_subvolume {
1111         struct bch_val          v;
1112         __le32                  flags;
1113         __le32                  snapshot;
1114         __le64                  inode;
1115         /*
1116          * Snapshot subvolumes form a tree, separate from the snapshot nodes
1117          * tree - if this subvolume is a snapshot, this is the ID of the
1118          * subvolume it was created from:
1119          */
1120         __le32                  parent;
1121         __le32                  pad;
1122         bch_le128               otime;
1123 };
1124
1125 LE32_BITMASK(BCH_SUBVOLUME_RO,          struct bch_subvolume, flags,  0,  1)
1126 /*
1127  * We need to know whether a subvolume is a snapshot so we can know whether we
1128  * can delete it (or whether it should just be rm -rf'd)
1129  */
1130 LE32_BITMASK(BCH_SUBVOLUME_SNAP,        struct bch_subvolume, flags,  1,  2)
1131 LE32_BITMASK(BCH_SUBVOLUME_UNLINKED,    struct bch_subvolume, flags,  2,  3)
1132
1133 /* Snapshots */
1134
1135 struct bch_snapshot {
1136         struct bch_val          v;
1137         __le32                  flags;
1138         __le32                  parent;
1139         __le32                  children[2];
1140         __le32                  subvol;
1141         /* corresponds to a bch_snapshot_tree in BTREE_ID_snapshot_trees */
1142         __le32                  tree;
1143         __le32                  depth;
1144         __le32                  skip[3];
1145 };
1146
1147 LE32_BITMASK(BCH_SNAPSHOT_DELETED,      struct bch_snapshot, flags,  0,  1)
1148
1149 /* True if a subvolume points to this snapshot node: */
1150 LE32_BITMASK(BCH_SNAPSHOT_SUBVOL,       struct bch_snapshot, flags,  1,  2)
1151
1152 /*
1153  * Snapshot trees:
1154  *
1155  * The snapshot_trees btree gives us persistent indentifier for each tree of
1156  * bch_snapshot nodes, and allow us to record and easily find the root/master
1157  * subvolume that other snapshots were created from:
1158  */
1159 struct bch_snapshot_tree {
1160         struct bch_val          v;
1161         __le32                  master_subvol;
1162         __le32                  root_snapshot;
1163 };
1164
1165 /* LRU btree: */
1166
1167 struct bch_lru {
1168         struct bch_val          v;
1169         __le64                  idx;
1170 } __packed __aligned(8);
1171
1172 #define LRU_ID_STRIPES          (1U << 16)
1173
1174 /* Logged operations btree: */
1175
1176 struct bch_logged_op_truncate {
1177         struct bch_val          v;
1178         __le32                  subvol;
1179         __le32                  pad;
1180         __le64                  inum;
1181         __le64                  new_i_size;
1182 };
1183
1184 enum logged_op_finsert_state {
1185         LOGGED_OP_FINSERT_start,
1186         LOGGED_OP_FINSERT_shift_extents,
1187         LOGGED_OP_FINSERT_finish,
1188 };
1189
1190 struct bch_logged_op_finsert {
1191         struct bch_val          v;
1192         __u8                    state;
1193         __u8                    pad[3];
1194         __le32                  subvol;
1195         __le64                  inum;
1196         __le64                  dst_offset;
1197         __le64                  src_offset;
1198         __le64                  pos;
1199 };
1200
1201 /* Optional/variable size superblock sections: */
1202
1203 struct bch_sb_field {
1204         __u64                   _data[0];
1205         __le32                  u64s;
1206         __le32                  type;
1207 };
1208
1209 #define BCH_SB_FIELDS()                         \
1210         x(journal,      0)                      \
1211         x(members_v1,   1)                      \
1212         x(crypt,        2)                      \
1213         x(replicas_v0,  3)                      \
1214         x(quota,        4)                      \
1215         x(disk_groups,  5)                      \
1216         x(clean,        6)                      \
1217         x(replicas,     7)                      \
1218         x(journal_seq_blacklist, 8)             \
1219         x(journal_v2,   9)                      \
1220         x(counters,     10)                     \
1221         x(members_v2,   11)                     \
1222         x(errors,       12)
1223
1224 enum bch_sb_field_type {
1225 #define x(f, nr)        BCH_SB_FIELD_##f = nr,
1226         BCH_SB_FIELDS()
1227 #undef x
1228         BCH_SB_FIELD_NR
1229 };
1230
1231 /*
1232  * Most superblock fields are replicated in all device's superblocks - a few are
1233  * not:
1234  */
1235 #define BCH_SINGLE_DEVICE_SB_FIELDS             \
1236         ((1U << BCH_SB_FIELD_journal)|          \
1237          (1U << BCH_SB_FIELD_journal_v2))
1238
1239 /* BCH_SB_FIELD_journal: */
1240
1241 struct bch_sb_field_journal {
1242         struct bch_sb_field     field;
1243         __le64                  buckets[];
1244 };
1245
1246 struct bch_sb_field_journal_v2 {
1247         struct bch_sb_field     field;
1248
1249         struct bch_sb_field_journal_v2_entry {
1250                 __le64          start;
1251                 __le64          nr;
1252         }                       d[];
1253 };
1254
1255 /* BCH_SB_FIELD_members_v1: */
1256
1257 #define BCH_MIN_NR_NBUCKETS     (1 << 6)
1258
1259 #define BCH_IOPS_MEASUREMENTS()                 \
1260         x(seqread,      0)                      \
1261         x(seqwrite,     1)                      \
1262         x(randread,     2)                      \
1263         x(randwrite,    3)
1264
1265 enum bch_iops_measurement {
1266 #define x(t, n) BCH_IOPS_##t = n,
1267         BCH_IOPS_MEASUREMENTS()
1268 #undef x
1269         BCH_IOPS_NR
1270 };
1271
1272 #define BCH_MEMBER_ERROR_TYPES()                \
1273         x(read,         0)                      \
1274         x(write,        1)                      \
1275         x(checksum,     2)
1276
1277 enum bch_member_error_type {
1278 #define x(t, n) BCH_MEMBER_ERROR_##t = n,
1279         BCH_MEMBER_ERROR_TYPES()
1280 #undef x
1281         BCH_MEMBER_ERROR_NR
1282 };
1283
1284 struct bch_member {
1285         __uuid_t                uuid;
1286         __le64                  nbuckets;       /* device size */
1287         __le16                  first_bucket;   /* index of first bucket used */
1288         __le16                  bucket_size;    /* sectors */
1289         __le32                  pad;
1290         __le64                  last_mount;     /* time_t */
1291
1292         __le64                  flags;
1293         __le32                  iops[4];
1294         __le64                  errors[BCH_MEMBER_ERROR_NR];
1295         __le64                  errors_at_reset[BCH_MEMBER_ERROR_NR];
1296         __le64                  errors_reset_time;
1297 };
1298
1299 #define BCH_MEMBER_V1_BYTES     56
1300
1301 LE64_BITMASK(BCH_MEMBER_STATE,          struct bch_member, flags,  0,  4)
1302 /* 4-14 unused, was TIER, HAS_(META)DATA, REPLACEMENT */
1303 LE64_BITMASK(BCH_MEMBER_DISCARD,        struct bch_member, flags, 14, 15)
1304 LE64_BITMASK(BCH_MEMBER_DATA_ALLOWED,   struct bch_member, flags, 15, 20)
1305 LE64_BITMASK(BCH_MEMBER_GROUP,          struct bch_member, flags, 20, 28)
1306 LE64_BITMASK(BCH_MEMBER_DURABILITY,     struct bch_member, flags, 28, 30)
1307 LE64_BITMASK(BCH_MEMBER_FREESPACE_INITIALIZED,
1308                                         struct bch_member, flags, 30, 31)
1309
1310 #if 0
1311 LE64_BITMASK(BCH_MEMBER_NR_READ_ERRORS, struct bch_member, flags[1], 0,  20);
1312 LE64_BITMASK(BCH_MEMBER_NR_WRITE_ERRORS,struct bch_member, flags[1], 20, 40);
1313 #endif
1314
1315 #define BCH_MEMBER_STATES()                     \
1316         x(rw,           0)                      \
1317         x(ro,           1)                      \
1318         x(failed,       2)                      \
1319         x(spare,        3)
1320
1321 enum bch_member_state {
1322 #define x(t, n) BCH_MEMBER_STATE_##t = n,
1323         BCH_MEMBER_STATES()
1324 #undef x
1325         BCH_MEMBER_STATE_NR
1326 };
1327
1328 struct bch_sb_field_members_v1 {
1329         struct bch_sb_field     field;
1330         struct bch_member       _members[]; //Members are now variable size
1331 };
1332
1333 struct bch_sb_field_members_v2 {
1334         struct bch_sb_field     field;
1335         __le16                  member_bytes; //size of single member entry
1336         u8                      pad[6];
1337         struct bch_member       _members[];
1338 };
1339
1340 /* BCH_SB_FIELD_crypt: */
1341
1342 struct nonce {
1343         __le32                  d[4];
1344 };
1345
1346 struct bch_key {
1347         __le64                  key[4];
1348 };
1349
1350 #define BCH_KEY_MAGIC                                   \
1351         (((__u64) 'b' <<  0)|((__u64) 'c' <<  8)|               \
1352          ((__u64) 'h' << 16)|((__u64) '*' << 24)|               \
1353          ((__u64) '*' << 32)|((__u64) 'k' << 40)|               \
1354          ((__u64) 'e' << 48)|((__u64) 'y' << 56))
1355
1356 struct bch_encrypted_key {
1357         __le64                  magic;
1358         struct bch_key          key;
1359 };
1360
1361 /*
1362  * If this field is present in the superblock, it stores an encryption key which
1363  * is used encrypt all other data/metadata. The key will normally be encrypted
1364  * with the key userspace provides, but if encryption has been turned off we'll
1365  * just store the master key unencrypted in the superblock so we can access the
1366  * previously encrypted data.
1367  */
1368 struct bch_sb_field_crypt {
1369         struct bch_sb_field     field;
1370
1371         __le64                  flags;
1372         __le64                  kdf_flags;
1373         struct bch_encrypted_key key;
1374 };
1375
1376 LE64_BITMASK(BCH_CRYPT_KDF_TYPE,        struct bch_sb_field_crypt, flags, 0, 4);
1377
1378 enum bch_kdf_types {
1379         BCH_KDF_SCRYPT          = 0,
1380         BCH_KDF_NR              = 1,
1381 };
1382
1383 /* stored as base 2 log of scrypt params: */
1384 LE64_BITMASK(BCH_KDF_SCRYPT_N,  struct bch_sb_field_crypt, kdf_flags,  0, 16);
1385 LE64_BITMASK(BCH_KDF_SCRYPT_R,  struct bch_sb_field_crypt, kdf_flags, 16, 32);
1386 LE64_BITMASK(BCH_KDF_SCRYPT_P,  struct bch_sb_field_crypt, kdf_flags, 32, 48);
1387
1388 /* BCH_SB_FIELD_replicas: */
1389
1390 #define BCH_DATA_TYPES()                \
1391         x(free,         0)              \
1392         x(sb,           1)              \
1393         x(journal,      2)              \
1394         x(btree,        3)              \
1395         x(user,         4)              \
1396         x(cached,       5)              \
1397         x(parity,       6)              \
1398         x(stripe,       7)              \
1399         x(need_gc_gens, 8)              \
1400         x(need_discard, 9)
1401
1402 enum bch_data_type {
1403 #define x(t, n) BCH_DATA_##t,
1404         BCH_DATA_TYPES()
1405 #undef x
1406         BCH_DATA_NR
1407 };
1408
1409 static inline bool data_type_is_empty(enum bch_data_type type)
1410 {
1411         switch (type) {
1412         case BCH_DATA_free:
1413         case BCH_DATA_need_gc_gens:
1414         case BCH_DATA_need_discard:
1415                 return true;
1416         default:
1417                 return false;
1418         }
1419 }
1420
1421 static inline bool data_type_is_hidden(enum bch_data_type type)
1422 {
1423         switch (type) {
1424         case BCH_DATA_sb:
1425         case BCH_DATA_journal:
1426                 return true;
1427         default:
1428                 return false;
1429         }
1430 }
1431
1432 struct bch_replicas_entry_v0 {
1433         __u8                    data_type;
1434         __u8                    nr_devs;
1435         __u8                    devs[];
1436 } __packed;
1437
1438 struct bch_sb_field_replicas_v0 {
1439         struct bch_sb_field     field;
1440         struct bch_replicas_entry_v0 entries[];
1441 } __packed __aligned(8);
1442
1443 struct bch_replicas_entry {
1444         __u8                    data_type;
1445         __u8                    nr_devs;
1446         __u8                    nr_required;
1447         __u8                    devs[];
1448 } __packed;
1449
1450 #define replicas_entry_bytes(_i)                                        \
1451         (offsetof(typeof(*(_i)), devs) + (_i)->nr_devs)
1452
1453 struct bch_sb_field_replicas {
1454         struct bch_sb_field     field;
1455         struct bch_replicas_entry entries[];
1456 } __packed __aligned(8);
1457
1458 /* BCH_SB_FIELD_quota: */
1459
1460 struct bch_sb_quota_counter {
1461         __le32                          timelimit;
1462         __le32                          warnlimit;
1463 };
1464
1465 struct bch_sb_quota_type {
1466         __le64                          flags;
1467         struct bch_sb_quota_counter     c[Q_COUNTERS];
1468 };
1469
1470 struct bch_sb_field_quota {
1471         struct bch_sb_field             field;
1472         struct bch_sb_quota_type        q[QTYP_NR];
1473 } __packed __aligned(8);
1474
1475 /* BCH_SB_FIELD_disk_groups: */
1476
1477 #define BCH_SB_LABEL_SIZE               32
1478
1479 struct bch_disk_group {
1480         __u8                    label[BCH_SB_LABEL_SIZE];
1481         __le64                  flags[2];
1482 } __packed __aligned(8);
1483
1484 LE64_BITMASK(BCH_GROUP_DELETED,         struct bch_disk_group, flags[0], 0,  1)
1485 LE64_BITMASK(BCH_GROUP_DATA_ALLOWED,    struct bch_disk_group, flags[0], 1,  6)
1486 LE64_BITMASK(BCH_GROUP_PARENT,          struct bch_disk_group, flags[0], 6, 24)
1487
1488 struct bch_sb_field_disk_groups {
1489         struct bch_sb_field     field;
1490         struct bch_disk_group   entries[];
1491 } __packed __aligned(8);
1492
1493 /* BCH_SB_FIELD_counters */
1494
1495 #define BCH_PERSISTENT_COUNTERS()                               \
1496         x(io_read,                                      0)      \
1497         x(io_write,                                     1)      \
1498         x(io_move,                                      2)      \
1499         x(bucket_invalidate,                            3)      \
1500         x(bucket_discard,                               4)      \
1501         x(bucket_alloc,                                 5)      \
1502         x(bucket_alloc_fail,                            6)      \
1503         x(btree_cache_scan,                             7)      \
1504         x(btree_cache_reap,                             8)      \
1505         x(btree_cache_cannibalize,                      9)      \
1506         x(btree_cache_cannibalize_lock,                 10)     \
1507         x(btree_cache_cannibalize_lock_fail,            11)     \
1508         x(btree_cache_cannibalize_unlock,               12)     \
1509         x(btree_node_write,                             13)     \
1510         x(btree_node_read,                              14)     \
1511         x(btree_node_compact,                           15)     \
1512         x(btree_node_merge,                             16)     \
1513         x(btree_node_split,                             17)     \
1514         x(btree_node_rewrite,                           18)     \
1515         x(btree_node_alloc,                             19)     \
1516         x(btree_node_free,                              20)     \
1517         x(btree_node_set_root,                          21)     \
1518         x(btree_path_relock_fail,                       22)     \
1519         x(btree_path_upgrade_fail,                      23)     \
1520         x(btree_reserve_get_fail,                       24)     \
1521         x(journal_entry_full,                           25)     \
1522         x(journal_full,                                 26)     \
1523         x(journal_reclaim_finish,                       27)     \
1524         x(journal_reclaim_start,                        28)     \
1525         x(journal_write,                                29)     \
1526         x(read_promote,                                 30)     \
1527         x(read_bounce,                                  31)     \
1528         x(read_split,                                   33)     \
1529         x(read_retry,                                   32)     \
1530         x(read_reuse_race,                              34)     \
1531         x(move_extent_read,                             35)     \
1532         x(move_extent_write,                            36)     \
1533         x(move_extent_finish,                           37)     \
1534         x(move_extent_fail,                             38)     \
1535         x(move_extent_start_fail,                       39)     \
1536         x(copygc,                                       40)     \
1537         x(copygc_wait,                                  41)     \
1538         x(gc_gens_end,                                  42)     \
1539         x(gc_gens_start,                                43)     \
1540         x(trans_blocked_journal_reclaim,                44)     \
1541         x(trans_restart_btree_node_reused,              45)     \
1542         x(trans_restart_btree_node_split,               46)     \
1543         x(trans_restart_fault_inject,                   47)     \
1544         x(trans_restart_iter_upgrade,                   48)     \
1545         x(trans_restart_journal_preres_get,             49)     \
1546         x(trans_restart_journal_reclaim,                50)     \
1547         x(trans_restart_journal_res_get,                51)     \
1548         x(trans_restart_key_cache_key_realloced,        52)     \
1549         x(trans_restart_key_cache_raced,                53)     \
1550         x(trans_restart_mark_replicas,                  54)     \
1551         x(trans_restart_mem_realloced,                  55)     \
1552         x(trans_restart_memory_allocation_failure,      56)     \
1553         x(trans_restart_relock,                         57)     \
1554         x(trans_restart_relock_after_fill,              58)     \
1555         x(trans_restart_relock_key_cache_fill,          59)     \
1556         x(trans_restart_relock_next_node,               60)     \
1557         x(trans_restart_relock_parent_for_fill,         61)     \
1558         x(trans_restart_relock_path,                    62)     \
1559         x(trans_restart_relock_path_intent,             63)     \
1560         x(trans_restart_too_many_iters,                 64)     \
1561         x(trans_restart_traverse,                       65)     \
1562         x(trans_restart_upgrade,                        66)     \
1563         x(trans_restart_would_deadlock,                 67)     \
1564         x(trans_restart_would_deadlock_write,           68)     \
1565         x(trans_restart_injected,                       69)     \
1566         x(trans_restart_key_cache_upgrade,              70)     \
1567         x(trans_traverse_all,                           71)     \
1568         x(transaction_commit,                           72)     \
1569         x(write_super,                                  73)     \
1570         x(trans_restart_would_deadlock_recursion_limit, 74)     \
1571         x(trans_restart_write_buffer_flush,             75)     \
1572         x(trans_restart_split_race,                     76)
1573
1574 enum bch_persistent_counters {
1575 #define x(t, n, ...) BCH_COUNTER_##t,
1576         BCH_PERSISTENT_COUNTERS()
1577 #undef x
1578         BCH_COUNTER_NR
1579 };
1580
1581 struct bch_sb_field_counters {
1582         struct bch_sb_field     field;
1583         __le64                  d[];
1584 };
1585
1586 /*
1587  * On clean shutdown, store btree roots and current journal sequence number in
1588  * the superblock:
1589  */
1590 struct jset_entry {
1591         __le16                  u64s;
1592         __u8                    btree_id;
1593         __u8                    level;
1594         __u8                    type; /* designates what this jset holds */
1595         __u8                    pad[3];
1596
1597         struct bkey_i           start[0];
1598         __u64                   _data[];
1599 };
1600
1601 struct bch_sb_field_clean {
1602         struct bch_sb_field     field;
1603
1604         __le32                  flags;
1605         __le16                  _read_clock; /* no longer used */
1606         __le16                  _write_clock;
1607         __le64                  journal_seq;
1608
1609         struct jset_entry       start[0];
1610         __u64                   _data[];
1611 };
1612
1613 struct journal_seq_blacklist_entry {
1614         __le64                  start;
1615         __le64                  end;
1616 };
1617
1618 struct bch_sb_field_journal_seq_blacklist {
1619         struct bch_sb_field     field;
1620         struct journal_seq_blacklist_entry start[];
1621 };
1622
1623 struct bch_sb_field_errors {
1624         struct bch_sb_field     field;
1625         struct bch_sb_field_error_entry {
1626                 __le64          v;
1627                 __le64          last_error_time;
1628         }                       entries[];
1629 };
1630
1631 LE64_BITMASK(BCH_SB_ERROR_ENTRY_ID,     struct bch_sb_field_error_entry, v,  0, 16);
1632 LE64_BITMASK(BCH_SB_ERROR_ENTRY_NR,     struct bch_sb_field_error_entry, v, 16, 64);
1633
1634 /* Superblock: */
1635
1636 /*
1637  * New versioning scheme:
1638  * One common version number for all on disk data structures - superblock, btree
1639  * nodes, journal entries
1640  */
1641 #define BCH_VERSION_MAJOR(_v)           ((__u16) ((_v) >> 10))
1642 #define BCH_VERSION_MINOR(_v)           ((__u16) ((_v) & ~(~0U << 10)))
1643 #define BCH_VERSION(_major, _minor)     (((_major) << 10)|(_minor) << 0)
1644
1645 #define RECOVERY_PASS_ALL_FSCK          (1ULL << 63)
1646
1647 #define BCH_METADATA_VERSIONS()                                         \
1648         x(bkey_renumber,                BCH_VERSION(0, 10),             \
1649           RECOVERY_PASS_ALL_FSCK)                                       \
1650         x(inode_btree_change,           BCH_VERSION(0, 11),             \
1651           RECOVERY_PASS_ALL_FSCK)                                       \
1652         x(snapshot,                     BCH_VERSION(0, 12),             \
1653           RECOVERY_PASS_ALL_FSCK)                                       \
1654         x(inode_backpointers,           BCH_VERSION(0, 13),             \
1655           RECOVERY_PASS_ALL_FSCK)                                       \
1656         x(btree_ptr_sectors_written,    BCH_VERSION(0, 14),             \
1657           RECOVERY_PASS_ALL_FSCK)                                       \
1658         x(snapshot_2,                   BCH_VERSION(0, 15),             \
1659           BIT_ULL(BCH_RECOVERY_PASS_fs_upgrade_for_subvolumes)|         \
1660           BIT_ULL(BCH_RECOVERY_PASS_initialize_subvolumes)|             \
1661           RECOVERY_PASS_ALL_FSCK)                                       \
1662         x(reflink_p_fix,                BCH_VERSION(0, 16),             \
1663           BIT_ULL(BCH_RECOVERY_PASS_fix_reflink_p))                     \
1664         x(subvol_dirent,                BCH_VERSION(0, 17),             \
1665           RECOVERY_PASS_ALL_FSCK)                                       \
1666         x(inode_v2,                     BCH_VERSION(0, 18),             \
1667           RECOVERY_PASS_ALL_FSCK)                                       \
1668         x(freespace,                    BCH_VERSION(0, 19),             \
1669           RECOVERY_PASS_ALL_FSCK)                                       \
1670         x(alloc_v4,                     BCH_VERSION(0, 20),             \
1671           RECOVERY_PASS_ALL_FSCK)                                       \
1672         x(new_data_types,               BCH_VERSION(0, 21),             \
1673           RECOVERY_PASS_ALL_FSCK)                                       \
1674         x(backpointers,                 BCH_VERSION(0, 22),             \
1675           RECOVERY_PASS_ALL_FSCK)                                       \
1676         x(inode_v3,                     BCH_VERSION(0, 23),             \
1677           RECOVERY_PASS_ALL_FSCK)                                       \
1678         x(unwritten_extents,            BCH_VERSION(0, 24),             \
1679           RECOVERY_PASS_ALL_FSCK)                                       \
1680         x(bucket_gens,                  BCH_VERSION(0, 25),             \
1681           BIT_ULL(BCH_RECOVERY_PASS_bucket_gens_init)|                  \
1682           RECOVERY_PASS_ALL_FSCK)                                       \
1683         x(lru_v2,                       BCH_VERSION(0, 26),             \
1684           RECOVERY_PASS_ALL_FSCK)                                       \
1685         x(fragmentation_lru,            BCH_VERSION(0, 27),             \
1686           RECOVERY_PASS_ALL_FSCK)                                       \
1687         x(no_bps_in_alloc_keys,         BCH_VERSION(0, 28),             \
1688           RECOVERY_PASS_ALL_FSCK)                                       \
1689         x(snapshot_trees,               BCH_VERSION(0, 29),             \
1690           RECOVERY_PASS_ALL_FSCK)                                       \
1691         x(major_minor,                  BCH_VERSION(1,  0),             \
1692           0)                                                            \
1693         x(snapshot_skiplists,           BCH_VERSION(1,  1),             \
1694           BIT_ULL(BCH_RECOVERY_PASS_check_snapshots))                   \
1695         x(deleted_inodes,               BCH_VERSION(1,  2),             \
1696           BIT_ULL(BCH_RECOVERY_PASS_check_inodes))                      \
1697         x(rebalance_work,               BCH_VERSION(1,  3),             \
1698           BIT_ULL(BCH_RECOVERY_PASS_set_fs_needs_rebalance))
1699
1700 enum bcachefs_metadata_version {
1701         bcachefs_metadata_version_min = 9,
1702 #define x(t, n, upgrade_passes) bcachefs_metadata_version_##t = n,
1703         BCH_METADATA_VERSIONS()
1704 #undef x
1705         bcachefs_metadata_version_max
1706 };
1707
1708 static const __maybe_unused
1709 unsigned bcachefs_metadata_required_upgrade_below = bcachefs_metadata_version_rebalance_work;
1710
1711 #define bcachefs_metadata_version_current       (bcachefs_metadata_version_max - 1)
1712
1713 #define BCH_SB_SECTOR                   8
1714 #define BCH_SB_MEMBERS_MAX              64 /* XXX kill */
1715
1716 struct bch_sb_layout {
1717         __uuid_t                magic;  /* bcachefs superblock UUID */
1718         __u8                    layout_type;
1719         __u8                    sb_max_size_bits; /* base 2 of 512 byte sectors */
1720         __u8                    nr_superblocks;
1721         __u8                    pad[5];
1722         __le64                  sb_offset[61];
1723 } __packed __aligned(8);
1724
1725 #define BCH_SB_LAYOUT_SECTOR    7
1726
1727 /*
1728  * @offset      - sector where this sb was written
1729  * @version     - on disk format version
1730  * @version_min - Oldest metadata version this filesystem contains; so we can
1731  *                safely drop compatibility code and refuse to mount filesystems
1732  *                we'd need it for
1733  * @magic       - identifies as a bcachefs superblock (BCHFS_MAGIC)
1734  * @seq         - incremented each time superblock is written
1735  * @uuid        - used for generating various magic numbers and identifying
1736  *                member devices, never changes
1737  * @user_uuid   - user visible UUID, may be changed
1738  * @label       - filesystem label
1739  * @seq         - identifies most recent superblock, incremented each time
1740  *                superblock is written
1741  * @features    - enabled incompatible features
1742  */
1743 struct bch_sb {
1744         struct bch_csum         csum;
1745         __le16                  version;
1746         __le16                  version_min;
1747         __le16                  pad[2];
1748         __uuid_t                magic;
1749         __uuid_t                uuid;
1750         __uuid_t                user_uuid;
1751         __u8                    label[BCH_SB_LABEL_SIZE];
1752         __le64                  offset;
1753         __le64                  seq;
1754
1755         __le16                  block_size;
1756         __u8                    dev_idx;
1757         __u8                    nr_devices;
1758         __le32                  u64s;
1759
1760         __le64                  time_base_lo;
1761         __le32                  time_base_hi;
1762         __le32                  time_precision;
1763
1764         __le64                  flags[8];
1765         __le64                  features[2];
1766         __le64                  compat[2];
1767
1768         struct bch_sb_layout    layout;
1769
1770         struct bch_sb_field     start[0];
1771         __le64                  _data[];
1772 } __packed __aligned(8);
1773
1774 /*
1775  * Flags:
1776  * BCH_SB_INITALIZED    - set on first mount
1777  * BCH_SB_CLEAN         - did we shut down cleanly? Just a hint, doesn't affect
1778  *                        behaviour of mount/recovery path:
1779  * BCH_SB_INODE_32BIT   - limit inode numbers to 32 bits
1780  * BCH_SB_128_BIT_MACS  - 128 bit macs instead of 80
1781  * BCH_SB_ENCRYPTION_TYPE - if nonzero encryption is enabled; overrides
1782  *                         DATA/META_CSUM_TYPE. Also indicates encryption
1783  *                         algorithm in use, if/when we get more than one
1784  */
1785
1786 LE16_BITMASK(BCH_SB_BLOCK_SIZE,         struct bch_sb, block_size, 0, 16);
1787
1788 LE64_BITMASK(BCH_SB_INITIALIZED,        struct bch_sb, flags[0],  0,  1);
1789 LE64_BITMASK(BCH_SB_CLEAN,              struct bch_sb, flags[0],  1,  2);
1790 LE64_BITMASK(BCH_SB_CSUM_TYPE,          struct bch_sb, flags[0],  2,  8);
1791 LE64_BITMASK(BCH_SB_ERROR_ACTION,       struct bch_sb, flags[0],  8, 12);
1792
1793 LE64_BITMASK(BCH_SB_BTREE_NODE_SIZE,    struct bch_sb, flags[0], 12, 28);
1794
1795 LE64_BITMASK(BCH_SB_GC_RESERVE,         struct bch_sb, flags[0], 28, 33);
1796 LE64_BITMASK(BCH_SB_ROOT_RESERVE,       struct bch_sb, flags[0], 33, 40);
1797
1798 LE64_BITMASK(BCH_SB_META_CSUM_TYPE,     struct bch_sb, flags[0], 40, 44);
1799 LE64_BITMASK(BCH_SB_DATA_CSUM_TYPE,     struct bch_sb, flags[0], 44, 48);
1800
1801 LE64_BITMASK(BCH_SB_META_REPLICAS_WANT, struct bch_sb, flags[0], 48, 52);
1802 LE64_BITMASK(BCH_SB_DATA_REPLICAS_WANT, struct bch_sb, flags[0], 52, 56);
1803
1804 LE64_BITMASK(BCH_SB_POSIX_ACL,          struct bch_sb, flags[0], 56, 57);
1805 LE64_BITMASK(BCH_SB_USRQUOTA,           struct bch_sb, flags[0], 57, 58);
1806 LE64_BITMASK(BCH_SB_GRPQUOTA,           struct bch_sb, flags[0], 58, 59);
1807 LE64_BITMASK(BCH_SB_PRJQUOTA,           struct bch_sb, flags[0], 59, 60);
1808
1809 LE64_BITMASK(BCH_SB_HAS_ERRORS,         struct bch_sb, flags[0], 60, 61);
1810 LE64_BITMASK(BCH_SB_HAS_TOPOLOGY_ERRORS,struct bch_sb, flags[0], 61, 62);
1811
1812 LE64_BITMASK(BCH_SB_BIG_ENDIAN,         struct bch_sb, flags[0], 62, 63);
1813
1814 LE64_BITMASK(BCH_SB_STR_HASH_TYPE,      struct bch_sb, flags[1],  0,  4);
1815 LE64_BITMASK(BCH_SB_COMPRESSION_TYPE_LO,struct bch_sb, flags[1],  4,  8);
1816 LE64_BITMASK(BCH_SB_INODE_32BIT,        struct bch_sb, flags[1],  8,  9);
1817
1818 LE64_BITMASK(BCH_SB_128_BIT_MACS,       struct bch_sb, flags[1],  9, 10);
1819 LE64_BITMASK(BCH_SB_ENCRYPTION_TYPE,    struct bch_sb, flags[1], 10, 14);
1820
1821 /*
1822  * Max size of an extent that may require bouncing to read or write
1823  * (checksummed, compressed): 64k
1824  */
1825 LE64_BITMASK(BCH_SB_ENCODED_EXTENT_MAX_BITS,
1826                                         struct bch_sb, flags[1], 14, 20);
1827
1828 LE64_BITMASK(BCH_SB_META_REPLICAS_REQ,  struct bch_sb, flags[1], 20, 24);
1829 LE64_BITMASK(BCH_SB_DATA_REPLICAS_REQ,  struct bch_sb, flags[1], 24, 28);
1830
1831 LE64_BITMASK(BCH_SB_PROMOTE_TARGET,     struct bch_sb, flags[1], 28, 40);
1832 LE64_BITMASK(BCH_SB_FOREGROUND_TARGET,  struct bch_sb, flags[1], 40, 52);
1833 LE64_BITMASK(BCH_SB_BACKGROUND_TARGET,  struct bch_sb, flags[1], 52, 64);
1834
1835 LE64_BITMASK(BCH_SB_BACKGROUND_COMPRESSION_TYPE_LO,
1836                                         struct bch_sb, flags[2],  0,  4);
1837 LE64_BITMASK(BCH_SB_GC_RESERVE_BYTES,   struct bch_sb, flags[2],  4, 64);
1838
1839 LE64_BITMASK(BCH_SB_ERASURE_CODE,       struct bch_sb, flags[3],  0, 16);
1840 LE64_BITMASK(BCH_SB_METADATA_TARGET,    struct bch_sb, flags[3], 16, 28);
1841 LE64_BITMASK(BCH_SB_SHARD_INUMS,        struct bch_sb, flags[3], 28, 29);
1842 LE64_BITMASK(BCH_SB_INODES_USE_KEY_CACHE,struct bch_sb, flags[3], 29, 30);
1843 LE64_BITMASK(BCH_SB_JOURNAL_FLUSH_DELAY,struct bch_sb, flags[3], 30, 62);
1844 LE64_BITMASK(BCH_SB_JOURNAL_FLUSH_DISABLED,struct bch_sb, flags[3], 62, 63);
1845 LE64_BITMASK(BCH_SB_JOURNAL_RECLAIM_DELAY,struct bch_sb, flags[4], 0, 32);
1846 LE64_BITMASK(BCH_SB_JOURNAL_TRANSACTION_NAMES,struct bch_sb, flags[4], 32, 33);
1847 LE64_BITMASK(BCH_SB_NOCOW,              struct bch_sb, flags[4], 33, 34);
1848 LE64_BITMASK(BCH_SB_WRITE_BUFFER_SIZE,  struct bch_sb, flags[4], 34, 54);
1849 LE64_BITMASK(BCH_SB_VERSION_UPGRADE,    struct bch_sb, flags[4], 54, 56);
1850
1851 LE64_BITMASK(BCH_SB_COMPRESSION_TYPE_HI,struct bch_sb, flags[4], 56, 60);
1852 LE64_BITMASK(BCH_SB_BACKGROUND_COMPRESSION_TYPE_HI,
1853                                         struct bch_sb, flags[4], 60, 64);
1854
1855 LE64_BITMASK(BCH_SB_VERSION_UPGRADE_COMPLETE,
1856                                         struct bch_sb, flags[5],  0, 16);
1857
1858 static inline __u64 BCH_SB_COMPRESSION_TYPE(const struct bch_sb *sb)
1859 {
1860         return BCH_SB_COMPRESSION_TYPE_LO(sb) | (BCH_SB_COMPRESSION_TYPE_HI(sb) << 4);
1861 }
1862
1863 static inline void SET_BCH_SB_COMPRESSION_TYPE(struct bch_sb *sb, __u64 v)
1864 {
1865         SET_BCH_SB_COMPRESSION_TYPE_LO(sb, v);
1866         SET_BCH_SB_COMPRESSION_TYPE_HI(sb, v >> 4);
1867 }
1868
1869 static inline __u64 BCH_SB_BACKGROUND_COMPRESSION_TYPE(const struct bch_sb *sb)
1870 {
1871         return BCH_SB_BACKGROUND_COMPRESSION_TYPE_LO(sb) |
1872                 (BCH_SB_BACKGROUND_COMPRESSION_TYPE_HI(sb) << 4);
1873 }
1874
1875 static inline void SET_BCH_SB_BACKGROUND_COMPRESSION_TYPE(struct bch_sb *sb, __u64 v)
1876 {
1877         SET_BCH_SB_BACKGROUND_COMPRESSION_TYPE_LO(sb, v);
1878         SET_BCH_SB_BACKGROUND_COMPRESSION_TYPE_HI(sb, v >> 4);
1879 }
1880
1881 /*
1882  * Features:
1883  *
1884  * journal_seq_blacklist_v3:    gates BCH_SB_FIELD_journal_seq_blacklist
1885  * reflink:                     gates KEY_TYPE_reflink
1886  * inline_data:                 gates KEY_TYPE_inline_data
1887  * new_siphash:                 gates BCH_STR_HASH_siphash
1888  * new_extent_overwrite:        gates BTREE_NODE_NEW_EXTENT_OVERWRITE
1889  */
1890 #define BCH_SB_FEATURES()                       \
1891         x(lz4,                          0)      \
1892         x(gzip,                         1)      \
1893         x(zstd,                         2)      \
1894         x(atomic_nlink,                 3)      \
1895         x(ec,                           4)      \
1896         x(journal_seq_blacklist_v3,     5)      \
1897         x(reflink,                      6)      \
1898         x(new_siphash,                  7)      \
1899         x(inline_data,                  8)      \
1900         x(new_extent_overwrite,         9)      \
1901         x(incompressible,               10)     \
1902         x(btree_ptr_v2,                 11)     \
1903         x(extents_above_btree_updates,  12)     \
1904         x(btree_updates_journalled,     13)     \
1905         x(reflink_inline_data,          14)     \
1906         x(new_varint,                   15)     \
1907         x(journal_no_flush,             16)     \
1908         x(alloc_v2,                     17)     \
1909         x(extents_across_btree_nodes,   18)
1910
1911 #define BCH_SB_FEATURES_ALWAYS                          \
1912         ((1ULL << BCH_FEATURE_new_extent_overwrite)|    \
1913          (1ULL << BCH_FEATURE_extents_above_btree_updates)|\
1914          (1ULL << BCH_FEATURE_btree_updates_journalled)|\
1915          (1ULL << BCH_FEATURE_alloc_v2)|\
1916          (1ULL << BCH_FEATURE_extents_across_btree_nodes))
1917
1918 #define BCH_SB_FEATURES_ALL                             \
1919         (BCH_SB_FEATURES_ALWAYS|                        \
1920          (1ULL << BCH_FEATURE_new_siphash)|             \
1921          (1ULL << BCH_FEATURE_btree_ptr_v2)|            \
1922          (1ULL << BCH_FEATURE_new_varint)|              \
1923          (1ULL << BCH_FEATURE_journal_no_flush))
1924
1925 enum bch_sb_feature {
1926 #define x(f, n) BCH_FEATURE_##f,
1927         BCH_SB_FEATURES()
1928 #undef x
1929         BCH_FEATURE_NR,
1930 };
1931
1932 #define BCH_SB_COMPAT()                                 \
1933         x(alloc_info,                           0)      \
1934         x(alloc_metadata,                       1)      \
1935         x(extents_above_btree_updates_done,     2)      \
1936         x(bformat_overflow_done,                3)
1937
1938 enum bch_sb_compat {
1939 #define x(f, n) BCH_COMPAT_##f,
1940         BCH_SB_COMPAT()
1941 #undef x
1942         BCH_COMPAT_NR,
1943 };
1944
1945 /* options: */
1946
1947 #define BCH_VERSION_UPGRADE_OPTS()      \
1948         x(compatible,           0)      \
1949         x(incompatible,         1)      \
1950         x(none,                 2)
1951
1952 enum bch_version_upgrade_opts {
1953 #define x(t, n) BCH_VERSION_UPGRADE_##t = n,
1954         BCH_VERSION_UPGRADE_OPTS()
1955 #undef x
1956 };
1957
1958 #define BCH_REPLICAS_MAX                4U
1959
1960 #define BCH_BKEY_PTRS_MAX               16U
1961
1962 #define BCH_ERROR_ACTIONS()             \
1963         x(continue,             0)      \
1964         x(ro,                   1)      \
1965         x(panic,                2)
1966
1967 enum bch_error_actions {
1968 #define x(t, n) BCH_ON_ERROR_##t = n,
1969         BCH_ERROR_ACTIONS()
1970 #undef x
1971         BCH_ON_ERROR_NR
1972 };
1973
1974 #define BCH_STR_HASH_TYPES()            \
1975         x(crc32c,               0)      \
1976         x(crc64,                1)      \
1977         x(siphash_old,          2)      \
1978         x(siphash,              3)
1979
1980 enum bch_str_hash_type {
1981 #define x(t, n) BCH_STR_HASH_##t = n,
1982         BCH_STR_HASH_TYPES()
1983 #undef x
1984         BCH_STR_HASH_NR
1985 };
1986
1987 #define BCH_STR_HASH_OPTS()             \
1988         x(crc32c,               0)      \
1989         x(crc64,                1)      \
1990         x(siphash,              2)
1991
1992 enum bch_str_hash_opts {
1993 #define x(t, n) BCH_STR_HASH_OPT_##t = n,
1994         BCH_STR_HASH_OPTS()
1995 #undef x
1996         BCH_STR_HASH_OPT_NR
1997 };
1998
1999 #define BCH_CSUM_TYPES()                        \
2000         x(none,                         0)      \
2001         x(crc32c_nonzero,               1)      \
2002         x(crc64_nonzero,                2)      \
2003         x(chacha20_poly1305_80,         3)      \
2004         x(chacha20_poly1305_128,        4)      \
2005         x(crc32c,                       5)      \
2006         x(crc64,                        6)      \
2007         x(xxhash,                       7)
2008
2009 enum bch_csum_type {
2010 #define x(t, n) BCH_CSUM_##t = n,
2011         BCH_CSUM_TYPES()
2012 #undef x
2013         BCH_CSUM_NR
2014 };
2015
2016 static const __maybe_unused unsigned bch_crc_bytes[] = {
2017         [BCH_CSUM_none]                         = 0,
2018         [BCH_CSUM_crc32c_nonzero]               = 4,
2019         [BCH_CSUM_crc32c]                       = 4,
2020         [BCH_CSUM_crc64_nonzero]                = 8,
2021         [BCH_CSUM_crc64]                        = 8,
2022         [BCH_CSUM_xxhash]                       = 8,
2023         [BCH_CSUM_chacha20_poly1305_80]         = 10,
2024         [BCH_CSUM_chacha20_poly1305_128]        = 16,
2025 };
2026
2027 static inline _Bool bch2_csum_type_is_encryption(enum bch_csum_type type)
2028 {
2029         switch (type) {
2030         case BCH_CSUM_chacha20_poly1305_80:
2031         case BCH_CSUM_chacha20_poly1305_128:
2032                 return true;
2033         default:
2034                 return false;
2035         }
2036 }
2037
2038 #define BCH_CSUM_OPTS()                 \
2039         x(none,                 0)      \
2040         x(crc32c,               1)      \
2041         x(crc64,                2)      \
2042         x(xxhash,               3)
2043
2044 enum bch_csum_opts {
2045 #define x(t, n) BCH_CSUM_OPT_##t = n,
2046         BCH_CSUM_OPTS()
2047 #undef x
2048         BCH_CSUM_OPT_NR
2049 };
2050
2051 #define BCH_COMPRESSION_TYPES()         \
2052         x(none,                 0)      \
2053         x(lz4_old,              1)      \
2054         x(gzip,                 2)      \
2055         x(lz4,                  3)      \
2056         x(zstd,                 4)      \
2057         x(incompressible,       5)
2058
2059 enum bch_compression_type {
2060 #define x(t, n) BCH_COMPRESSION_TYPE_##t = n,
2061         BCH_COMPRESSION_TYPES()
2062 #undef x
2063         BCH_COMPRESSION_TYPE_NR
2064 };
2065
2066 #define BCH_COMPRESSION_OPTS()          \
2067         x(none,         0)              \
2068         x(lz4,          1)              \
2069         x(gzip,         2)              \
2070         x(zstd,         3)
2071
2072 enum bch_compression_opts {
2073 #define x(t, n) BCH_COMPRESSION_OPT_##t = n,
2074         BCH_COMPRESSION_OPTS()
2075 #undef x
2076         BCH_COMPRESSION_OPT_NR
2077 };
2078
2079 /*
2080  * Magic numbers
2081  *
2082  * The various other data structures have their own magic numbers, which are
2083  * xored with the first part of the cache set's UUID
2084  */
2085
2086 #define BCACHE_MAGIC                                                    \
2087         UUID_INIT(0xc68573f6, 0x4e1a, 0x45ca,                           \
2088                   0x82, 0x65, 0xf5, 0x7f, 0x48, 0xba, 0x6d, 0x81)
2089 #define BCHFS_MAGIC                                                     \
2090         UUID_INIT(0xc68573f6, 0x66ce, 0x90a9,                           \
2091                   0xd9, 0x6a, 0x60, 0xcf, 0x80, 0x3d, 0xf7, 0xef)
2092
2093 #define BCACHEFS_STATFS_MAGIC           0xca451a4e
2094
2095 #define JSET_MAGIC              __cpu_to_le64(0x245235c1a3625032ULL)
2096 #define BSET_MAGIC              __cpu_to_le64(0x90135c78b99e07f5ULL)
2097
2098 static inline __le64 __bch2_sb_magic(struct bch_sb *sb)
2099 {
2100         __le64 ret;
2101
2102         memcpy(&ret, &sb->uuid, sizeof(ret));
2103         return ret;
2104 }
2105
2106 static inline __u64 __jset_magic(struct bch_sb *sb)
2107 {
2108         return __le64_to_cpu(__bch2_sb_magic(sb) ^ JSET_MAGIC);
2109 }
2110
2111 static inline __u64 __bset_magic(struct bch_sb *sb)
2112 {
2113         return __le64_to_cpu(__bch2_sb_magic(sb) ^ BSET_MAGIC);
2114 }
2115
2116 /* Journal */
2117
2118 #define JSET_KEYS_U64s  (sizeof(struct jset_entry) / sizeof(__u64))
2119
2120 #define BCH_JSET_ENTRY_TYPES()                  \
2121         x(btree_keys,           0)              \
2122         x(btree_root,           1)              \
2123         x(prio_ptrs,            2)              \
2124         x(blacklist,            3)              \
2125         x(blacklist_v2,         4)              \
2126         x(usage,                5)              \
2127         x(data_usage,           6)              \
2128         x(clock,                7)              \
2129         x(dev_usage,            8)              \
2130         x(log,                  9)              \
2131         x(overwrite,            10)
2132
2133 enum {
2134 #define x(f, nr)        BCH_JSET_ENTRY_##f      = nr,
2135         BCH_JSET_ENTRY_TYPES()
2136 #undef x
2137         BCH_JSET_ENTRY_NR
2138 };
2139
2140 /*
2141  * Journal sequence numbers can be blacklisted: bsets record the max sequence
2142  * number of all the journal entries they contain updates for, so that on
2143  * recovery we can ignore those bsets that contain index updates newer that what
2144  * made it into the journal.
2145  *
2146  * This means that we can't reuse that journal_seq - we have to skip it, and
2147  * then record that we skipped it so that the next time we crash and recover we
2148  * don't think there was a missing journal entry.
2149  */
2150 struct jset_entry_blacklist {
2151         struct jset_entry       entry;
2152         __le64                  seq;
2153 };
2154
2155 struct jset_entry_blacklist_v2 {
2156         struct jset_entry       entry;
2157         __le64                  start;
2158         __le64                  end;
2159 };
2160
2161 #define BCH_FS_USAGE_TYPES()                    \
2162         x(reserved,             0)              \
2163         x(inodes,               1)              \
2164         x(key_version,          2)
2165
2166 enum {
2167 #define x(f, nr)        BCH_FS_USAGE_##f        = nr,
2168         BCH_FS_USAGE_TYPES()
2169 #undef x
2170         BCH_FS_USAGE_NR
2171 };
2172
2173 struct jset_entry_usage {
2174         struct jset_entry       entry;
2175         __le64                  v;
2176 } __packed;
2177
2178 struct jset_entry_data_usage {
2179         struct jset_entry       entry;
2180         __le64                  v;
2181         struct bch_replicas_entry r;
2182 } __packed;
2183
2184 struct jset_entry_clock {
2185         struct jset_entry       entry;
2186         __u8                    rw;
2187         __u8                    pad[7];
2188         __le64                  time;
2189 } __packed;
2190
2191 struct jset_entry_dev_usage_type {
2192         __le64                  buckets;
2193         __le64                  sectors;
2194         __le64                  fragmented;
2195 } __packed;
2196
2197 struct jset_entry_dev_usage {
2198         struct jset_entry       entry;
2199         __le32                  dev;
2200         __u32                   pad;
2201
2202         __le64                  buckets_ec;
2203         __le64                  _buckets_unavailable; /* No longer used */
2204
2205         struct jset_entry_dev_usage_type d[];
2206 };
2207
2208 static inline unsigned jset_entry_dev_usage_nr_types(struct jset_entry_dev_usage *u)
2209 {
2210         return (vstruct_bytes(&u->entry) - sizeof(struct jset_entry_dev_usage)) /
2211                 sizeof(struct jset_entry_dev_usage_type);
2212 }
2213
2214 struct jset_entry_log {
2215         struct jset_entry       entry;
2216         u8                      d[];
2217 } __packed;
2218
2219 /*
2220  * On disk format for a journal entry:
2221  * seq is monotonically increasing; every journal entry has its own unique
2222  * sequence number.
2223  *
2224  * last_seq is the oldest journal entry that still has keys the btree hasn't
2225  * flushed to disk yet.
2226  *
2227  * version is for on disk format changes.
2228  */
2229 struct jset {
2230         struct bch_csum         csum;
2231
2232         __le64                  magic;
2233         __le64                  seq;
2234         __le32                  version;
2235         __le32                  flags;
2236
2237         __le32                  u64s; /* size of d[] in u64s */
2238
2239         __u8                    encrypted_start[0];
2240
2241         __le16                  _read_clock; /* no longer used */
2242         __le16                  _write_clock;
2243
2244         /* Sequence number of oldest dirty journal entry */
2245         __le64                  last_seq;
2246
2247
2248         struct jset_entry       start[0];
2249         __u64                   _data[];
2250 } __packed __aligned(8);
2251
2252 LE32_BITMASK(JSET_CSUM_TYPE,    struct jset, flags, 0, 4);
2253 LE32_BITMASK(JSET_BIG_ENDIAN,   struct jset, flags, 4, 5);
2254 LE32_BITMASK(JSET_NO_FLUSH,     struct jset, flags, 5, 6);
2255
2256 #define BCH_JOURNAL_BUCKETS_MIN         8
2257
2258 /* Btree: */
2259
2260 enum btree_id_flags {
2261         BTREE_ID_EXTENTS        = BIT(0),
2262         BTREE_ID_SNAPSHOTS      = BIT(1),
2263         BTREE_ID_SNAPSHOT_FIELD = BIT(2),
2264         BTREE_ID_DATA           = BIT(3),
2265 };
2266
2267 #define BCH_BTREE_IDS()                                                         \
2268         x(extents,              0,      BTREE_ID_EXTENTS|BTREE_ID_SNAPSHOTS|BTREE_ID_DATA,\
2269           BIT_ULL(KEY_TYPE_whiteout)|                                           \
2270           BIT_ULL(KEY_TYPE_error)|                                              \
2271           BIT_ULL(KEY_TYPE_cookie)|                                             \
2272           BIT_ULL(KEY_TYPE_extent)|                                             \
2273           BIT_ULL(KEY_TYPE_reservation)|                                        \
2274           BIT_ULL(KEY_TYPE_reflink_p)|                                          \
2275           BIT_ULL(KEY_TYPE_inline_data))                                        \
2276         x(inodes,               1,      BTREE_ID_SNAPSHOTS,                     \
2277           BIT_ULL(KEY_TYPE_whiteout)|                                           \
2278           BIT_ULL(KEY_TYPE_inode)|                                              \
2279           BIT_ULL(KEY_TYPE_inode_v2)|                                           \
2280           BIT_ULL(KEY_TYPE_inode_v3)|                                           \
2281           BIT_ULL(KEY_TYPE_inode_generation))                                   \
2282         x(dirents,              2,      BTREE_ID_SNAPSHOTS,                     \
2283           BIT_ULL(KEY_TYPE_whiteout)|                                           \
2284           BIT_ULL(KEY_TYPE_hash_whiteout)|                                      \
2285           BIT_ULL(KEY_TYPE_dirent))                                             \
2286         x(xattrs,               3,      BTREE_ID_SNAPSHOTS,                     \
2287           BIT_ULL(KEY_TYPE_whiteout)|                                           \
2288           BIT_ULL(KEY_TYPE_cookie)|                                             \
2289           BIT_ULL(KEY_TYPE_hash_whiteout)|                                      \
2290           BIT_ULL(KEY_TYPE_xattr))                                              \
2291         x(alloc,                4,      0,                                      \
2292           BIT_ULL(KEY_TYPE_alloc)|                                              \
2293           BIT_ULL(KEY_TYPE_alloc_v2)|                                           \
2294           BIT_ULL(KEY_TYPE_alloc_v3)|                                           \
2295           BIT_ULL(KEY_TYPE_alloc_v4))                                           \
2296         x(quotas,               5,      0,                                      \
2297           BIT_ULL(KEY_TYPE_quota))                                              \
2298         x(stripes,              6,      0,                                      \
2299           BIT_ULL(KEY_TYPE_stripe))                                             \
2300         x(reflink,              7,      BTREE_ID_EXTENTS|BTREE_ID_DATA,         \
2301           BIT_ULL(KEY_TYPE_reflink_v)|                                          \
2302           BIT_ULL(KEY_TYPE_indirect_inline_data))                               \
2303         x(subvolumes,           8,      0,                                      \
2304           BIT_ULL(KEY_TYPE_subvolume))                                          \
2305         x(snapshots,            9,      0,                                      \
2306           BIT_ULL(KEY_TYPE_snapshot))                                           \
2307         x(lru,                  10,     0,                                      \
2308           BIT_ULL(KEY_TYPE_set))                                                \
2309         x(freespace,            11,     BTREE_ID_EXTENTS,                       \
2310           BIT_ULL(KEY_TYPE_set))                                                \
2311         x(need_discard,         12,     0,                                      \
2312           BIT_ULL(KEY_TYPE_set))                                                \
2313         x(backpointers,         13,     0,                                      \
2314           BIT_ULL(KEY_TYPE_backpointer))                                        \
2315         x(bucket_gens,          14,     0,                                      \
2316           BIT_ULL(KEY_TYPE_bucket_gens))                                        \
2317         x(snapshot_trees,       15,     0,                                      \
2318           BIT_ULL(KEY_TYPE_snapshot_tree))                                      \
2319         x(deleted_inodes,       16,     BTREE_ID_SNAPSHOT_FIELD,                \
2320           BIT_ULL(KEY_TYPE_set))                                                \
2321         x(logged_ops,           17,     0,                                      \
2322           BIT_ULL(KEY_TYPE_logged_op_truncate)|                                 \
2323           BIT_ULL(KEY_TYPE_logged_op_finsert))                                  \
2324         x(rebalance_work,       18,     BTREE_ID_SNAPSHOT_FIELD,                \
2325           BIT_ULL(KEY_TYPE_set)|BIT_ULL(KEY_TYPE_cookie))
2326
2327 enum btree_id {
2328 #define x(name, nr, ...) BTREE_ID_##name = nr,
2329         BCH_BTREE_IDS()
2330 #undef x
2331         BTREE_ID_NR
2332 };
2333
2334 #define BTREE_MAX_DEPTH         4U
2335
2336 /* Btree nodes */
2337
2338 /*
2339  * Btree nodes
2340  *
2341  * On disk a btree node is a list/log of these; within each set the keys are
2342  * sorted
2343  */
2344 struct bset {
2345         __le64                  seq;
2346
2347         /*
2348          * Highest journal entry this bset contains keys for.
2349          * If on recovery we don't see that journal entry, this bset is ignored:
2350          * this allows us to preserve the order of all index updates after a
2351          * crash, since the journal records a total order of all index updates
2352          * and anything that didn't make it to the journal doesn't get used.
2353          */
2354         __le64                  journal_seq;
2355
2356         __le32                  flags;
2357         __le16                  version;
2358         __le16                  u64s; /* count of d[] in u64s */
2359
2360         struct bkey_packed      start[0];
2361         __u64                   _data[];
2362 } __packed __aligned(8);
2363
2364 LE32_BITMASK(BSET_CSUM_TYPE,    struct bset, flags, 0, 4);
2365
2366 LE32_BITMASK(BSET_BIG_ENDIAN,   struct bset, flags, 4, 5);
2367 LE32_BITMASK(BSET_SEPARATE_WHITEOUTS,
2368                                 struct bset, flags, 5, 6);
2369
2370 /* Sector offset within the btree node: */
2371 LE32_BITMASK(BSET_OFFSET,       struct bset, flags, 16, 32);
2372
2373 struct btree_node {
2374         struct bch_csum         csum;
2375         __le64                  magic;
2376
2377         /* this flags field is encrypted, unlike bset->flags: */
2378         __le64                  flags;
2379
2380         /* Closed interval: */
2381         struct bpos             min_key;
2382         struct bpos             max_key;
2383         struct bch_extent_ptr   _ptr; /* not used anymore */
2384         struct bkey_format      format;
2385
2386         union {
2387         struct bset             keys;
2388         struct {
2389                 __u8            pad[22];
2390                 __le16          u64s;
2391                 __u64           _data[0];
2392
2393         };
2394         };
2395 } __packed __aligned(8);
2396
2397 LE64_BITMASK(BTREE_NODE_ID_LO,  struct btree_node, flags,  0,  4);
2398 LE64_BITMASK(BTREE_NODE_LEVEL,  struct btree_node, flags,  4,  8);
2399 LE64_BITMASK(BTREE_NODE_NEW_EXTENT_OVERWRITE,
2400                                 struct btree_node, flags,  8,  9);
2401 LE64_BITMASK(BTREE_NODE_ID_HI,  struct btree_node, flags,  9, 25);
2402 /* 25-32 unused */
2403 LE64_BITMASK(BTREE_NODE_SEQ,    struct btree_node, flags, 32, 64);
2404
2405 static inline __u64 BTREE_NODE_ID(struct btree_node *n)
2406 {
2407         return BTREE_NODE_ID_LO(n) | (BTREE_NODE_ID_HI(n) << 4);
2408 }
2409
2410 static inline void SET_BTREE_NODE_ID(struct btree_node *n, __u64 v)
2411 {
2412         SET_BTREE_NODE_ID_LO(n, v);
2413         SET_BTREE_NODE_ID_HI(n, v >> 4);
2414 }
2415
2416 struct btree_node_entry {
2417         struct bch_csum         csum;
2418
2419         union {
2420         struct bset             keys;
2421         struct {
2422                 __u8            pad[22];
2423                 __le16          u64s;
2424                 __u64           _data[0];
2425         };
2426         };
2427 } __packed __aligned(8);
2428
2429 #endif /* _BCACHEFS_FORMAT_H */