1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2010 Kent Overstreet <kent.overstreet@gmail.com>
5 * Code for managing the extent btree and dynamically updating the writeback
10 #include "bkey_methods.h"
13 #include "btree_iter.h"
18 #include "disk_groups.h"
29 static unsigned bch2_crc_field_size_max[] = {
30 [BCH_EXTENT_ENTRY_crc32] = CRC32_SIZE_MAX,
31 [BCH_EXTENT_ENTRY_crc64] = CRC64_SIZE_MAX,
32 [BCH_EXTENT_ENTRY_crc128] = CRC128_SIZE_MAX,
35 static void bch2_extent_crc_pack(union bch_extent_crc *,
36 struct bch_extent_crc_unpacked,
37 enum bch_extent_entry_type);
39 static struct bch_dev_io_failures *dev_io_failures(struct bch_io_failures *f,
42 struct bch_dev_io_failures *i;
44 for (i = f->devs; i < f->devs + f->nr; i++)
51 void bch2_mark_io_failure(struct bch_io_failures *failed,
52 struct extent_ptr_decoded *p)
54 struct bch_dev_io_failures *f = dev_io_failures(failed, p->ptr.dev);
57 BUG_ON(failed->nr >= ARRAY_SIZE(failed->devs));
59 f = &failed->devs[failed->nr++];
64 } else if (p->idx != f->idx) {
74 * returns true if p1 is better than p2:
76 static inline bool ptr_better(struct bch_fs *c,
77 const struct extent_ptr_decoded p1,
78 const struct extent_ptr_decoded p2)
80 if (likely(!p1.idx && !p2.idx)) {
81 struct bch_dev *dev1 = bch_dev_bkey_exists(c, p1.ptr.dev);
82 struct bch_dev *dev2 = bch_dev_bkey_exists(c, p2.ptr.dev);
84 u64 l1 = atomic64_read(&dev1->cur_latency[READ]);
85 u64 l2 = atomic64_read(&dev2->cur_latency[READ]);
87 /* Pick at random, biased in favor of the faster device: */
89 return bch2_rand_range(l1 + l2) > l1;
92 if (bch2_force_reconstruct_read)
93 return p1.idx > p2.idx;
95 return p1.idx < p2.idx;
99 * This picks a non-stale pointer, preferably from a device other than @avoid.
100 * Avoid can be NULL, meaning pick any. If there are no non-stale pointers to
101 * other devices, it will still pick a pointer from avoid.
103 int bch2_bkey_pick_read_device(struct bch_fs *c, struct bkey_s_c k,
104 struct bch_io_failures *failed,
105 struct extent_ptr_decoded *pick)
107 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
108 const union bch_extent_entry *entry;
109 struct extent_ptr_decoded p;
110 struct bch_dev_io_failures *f;
114 if (k.k->type == KEY_TYPE_error)
117 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
119 * Unwritten extent: no need to actually read, treat it as a
120 * hole and return 0s:
125 ca = bch_dev_bkey_exists(c, p.ptr.dev);
128 * If there are any dirty pointers it's an error if we can't
131 if (!ret && !p.ptr.cached)
134 if (p.ptr.cached && ptr_stale(ca, &p.ptr))
137 f = failed ? dev_io_failures(failed, p.ptr.dev) : NULL;
139 p.idx = f->nr_failed < f->nr_retries
144 !bch2_dev_is_readable(ca))
147 if (bch2_force_reconstruct_read &&
151 if (p.idx >= (unsigned) p.has_ec + 1)
154 if (ret > 0 && !ptr_better(c, p, *pick))
164 /* KEY_TYPE_btree_ptr: */
166 int bch2_btree_ptr_invalid(struct bch_fs *c, struct bkey_s_c k,
167 enum bkey_invalid_flags flags,
168 struct printbuf *err)
172 bkey_fsck_err_on(bkey_val_u64s(k.k) > BCH_REPLICAS_MAX, c, err,
173 btree_ptr_val_too_big,
174 "value too big (%zu > %u)", bkey_val_u64s(k.k), BCH_REPLICAS_MAX);
176 ret = bch2_bkey_ptrs_invalid(c, k, flags, err);
181 void bch2_btree_ptr_to_text(struct printbuf *out, struct bch_fs *c,
184 bch2_bkey_ptrs_to_text(out, c, k);
187 int bch2_btree_ptr_v2_invalid(struct bch_fs *c, struct bkey_s_c k,
188 enum bkey_invalid_flags flags,
189 struct printbuf *err)
193 bkey_fsck_err_on(bkey_val_u64s(k.k) > BKEY_BTREE_PTR_VAL_U64s_MAX, c, err,
194 btree_ptr_v2_val_too_big,
195 "value too big (%zu > %zu)",
196 bkey_val_u64s(k.k), BKEY_BTREE_PTR_VAL_U64s_MAX);
198 ret = bch2_bkey_ptrs_invalid(c, k, flags, err);
203 void bch2_btree_ptr_v2_to_text(struct printbuf *out, struct bch_fs *c,
206 struct bkey_s_c_btree_ptr_v2 bp = bkey_s_c_to_btree_ptr_v2(k);
208 prt_printf(out, "seq %llx written %u min_key %s",
209 le64_to_cpu(bp.v->seq),
210 le16_to_cpu(bp.v->sectors_written),
211 BTREE_PTR_RANGE_UPDATED(bp.v) ? "R " : "");
213 bch2_bpos_to_text(out, bp.v->min_key);
214 prt_printf(out, " ");
215 bch2_bkey_ptrs_to_text(out, c, k);
218 void bch2_btree_ptr_v2_compat(enum btree_id btree_id, unsigned version,
219 unsigned big_endian, int write,
222 struct bkey_s_btree_ptr_v2 bp = bkey_s_to_btree_ptr_v2(k);
224 compat_bpos(0, btree_id, version, big_endian, write, &bp.v->min_key);
226 if (version < bcachefs_metadata_version_inode_btree_change &&
227 btree_id_is_extents(btree_id) &&
228 !bkey_eq(bp.v->min_key, POS_MIN))
229 bp.v->min_key = write
230 ? bpos_nosnap_predecessor(bp.v->min_key)
231 : bpos_nosnap_successor(bp.v->min_key);
234 /* KEY_TYPE_extent: */
236 bool bch2_extent_merge(struct bch_fs *c, struct bkey_s l, struct bkey_s_c r)
238 struct bkey_ptrs l_ptrs = bch2_bkey_ptrs(l);
239 struct bkey_ptrs_c r_ptrs = bch2_bkey_ptrs_c(r);
240 union bch_extent_entry *en_l;
241 const union bch_extent_entry *en_r;
242 struct extent_ptr_decoded lp, rp;
248 while (en_l < l_ptrs.end && en_r < r_ptrs.end) {
249 if (extent_entry_type(en_l) != extent_entry_type(en_r))
252 en_l = extent_entry_next(en_l);
253 en_r = extent_entry_next(en_r);
256 if (en_l < l_ptrs.end || en_r < r_ptrs.end)
261 lp.crc = bch2_extent_crc_unpack(l.k, NULL);
262 rp.crc = bch2_extent_crc_unpack(r.k, NULL);
264 while (__bkey_ptr_next_decode(l.k, l_ptrs.end, lp, en_l) &&
265 __bkey_ptr_next_decode(r.k, r_ptrs.end, rp, en_r)) {
266 if (lp.ptr.offset + lp.crc.offset + lp.crc.live_size !=
267 rp.ptr.offset + rp.crc.offset ||
268 lp.ptr.dev != rp.ptr.dev ||
269 lp.ptr.gen != rp.ptr.gen ||
270 lp.ptr.unwritten != rp.ptr.unwritten ||
271 lp.has_ec != rp.has_ec)
274 /* Extents may not straddle buckets: */
275 ca = bch_dev_bkey_exists(c, lp.ptr.dev);
276 if (PTR_BUCKET_NR(ca, &lp.ptr) != PTR_BUCKET_NR(ca, &rp.ptr))
279 if (lp.has_ec != rp.has_ec ||
281 (lp.ec.block != rp.ec.block ||
282 lp.ec.redundancy != rp.ec.redundancy ||
283 lp.ec.idx != rp.ec.idx)))
286 if (lp.crc.compression_type != rp.crc.compression_type ||
287 lp.crc.nonce != rp.crc.nonce)
290 if (lp.crc.offset + lp.crc.live_size + rp.crc.live_size <=
291 lp.crc.uncompressed_size) {
292 /* can use left extent's crc entry */
293 } else if (lp.crc.live_size <= rp.crc.offset) {
294 /* can use right extent's crc entry */
296 /* check if checksums can be merged: */
297 if (lp.crc.csum_type != rp.crc.csum_type ||
298 lp.crc.nonce != rp.crc.nonce ||
299 crc_is_compressed(lp.crc) ||
300 !bch2_checksum_mergeable(lp.crc.csum_type))
303 if (lp.crc.offset + lp.crc.live_size != lp.crc.compressed_size ||
307 if (lp.crc.csum_type &&
308 lp.crc.uncompressed_size +
309 rp.crc.uncompressed_size > (c->opts.encoded_extent_max >> 9))
313 en_l = extent_entry_next(en_l);
314 en_r = extent_entry_next(en_r);
319 while (en_l < l_ptrs.end && en_r < r_ptrs.end) {
320 if (extent_entry_is_crc(en_l)) {
321 struct bch_extent_crc_unpacked crc_l = bch2_extent_crc_unpack(l.k, entry_to_crc(en_l));
322 struct bch_extent_crc_unpacked crc_r = bch2_extent_crc_unpack(r.k, entry_to_crc(en_r));
324 if (crc_l.uncompressed_size + crc_r.uncompressed_size >
325 bch2_crc_field_size_max[extent_entry_type(en_l)])
329 en_l = extent_entry_next(en_l);
330 en_r = extent_entry_next(en_r);
333 use_right_ptr = false;
336 while (en_l < l_ptrs.end) {
337 if (extent_entry_type(en_l) == BCH_EXTENT_ENTRY_ptr &&
339 en_l->ptr = en_r->ptr;
341 if (extent_entry_is_crc(en_l)) {
342 struct bch_extent_crc_unpacked crc_l =
343 bch2_extent_crc_unpack(l.k, entry_to_crc(en_l));
344 struct bch_extent_crc_unpacked crc_r =
345 bch2_extent_crc_unpack(r.k, entry_to_crc(en_r));
347 use_right_ptr = false;
349 if (crc_l.offset + crc_l.live_size + crc_r.live_size <=
350 crc_l.uncompressed_size) {
351 /* can use left extent's crc entry */
352 } else if (crc_l.live_size <= crc_r.offset) {
353 /* can use right extent's crc entry */
354 crc_r.offset -= crc_l.live_size;
355 bch2_extent_crc_pack(entry_to_crc(en_l), crc_r,
356 extent_entry_type(en_l));
357 use_right_ptr = true;
359 crc_l.csum = bch2_checksum_merge(crc_l.csum_type,
362 crc_r.uncompressed_size << 9);
364 crc_l.uncompressed_size += crc_r.uncompressed_size;
365 crc_l.compressed_size += crc_r.compressed_size;
366 bch2_extent_crc_pack(entry_to_crc(en_l), crc_l,
367 extent_entry_type(en_l));
371 en_l = extent_entry_next(en_l);
372 en_r = extent_entry_next(en_r);
375 bch2_key_resize(l.k, l.k->size + r.k->size);
379 /* KEY_TYPE_reservation: */
381 int bch2_reservation_invalid(struct bch_fs *c, struct bkey_s_c k,
382 enum bkey_invalid_flags flags,
383 struct printbuf *err)
385 struct bkey_s_c_reservation r = bkey_s_c_to_reservation(k);
388 bkey_fsck_err_on(!r.v->nr_replicas || r.v->nr_replicas > BCH_REPLICAS_MAX, c, err,
389 reservation_key_nr_replicas_invalid,
390 "invalid nr_replicas (%u)", r.v->nr_replicas);
395 void bch2_reservation_to_text(struct printbuf *out, struct bch_fs *c,
398 struct bkey_s_c_reservation r = bkey_s_c_to_reservation(k);
400 prt_printf(out, "generation %u replicas %u",
401 le32_to_cpu(r.v->generation),
405 bool bch2_reservation_merge(struct bch_fs *c, struct bkey_s _l, struct bkey_s_c _r)
407 struct bkey_s_reservation l = bkey_s_to_reservation(_l);
408 struct bkey_s_c_reservation r = bkey_s_c_to_reservation(_r);
410 if (l.v->generation != r.v->generation ||
411 l.v->nr_replicas != r.v->nr_replicas)
414 bch2_key_resize(l.k, l.k->size + r.k->size);
418 /* Extent checksum entries: */
420 /* returns true if not equal */
421 static inline bool bch2_crc_unpacked_cmp(struct bch_extent_crc_unpacked l,
422 struct bch_extent_crc_unpacked r)
424 return (l.csum_type != r.csum_type ||
425 l.compression_type != r.compression_type ||
426 l.compressed_size != r.compressed_size ||
427 l.uncompressed_size != r.uncompressed_size ||
428 l.offset != r.offset ||
429 l.live_size != r.live_size ||
430 l.nonce != r.nonce ||
431 bch2_crc_cmp(l.csum, r.csum));
434 static inline bool can_narrow_crc(struct bch_extent_crc_unpacked u,
435 struct bch_extent_crc_unpacked n)
437 return !crc_is_compressed(u) &&
439 u.uncompressed_size > u.live_size &&
440 bch2_csum_type_is_encryption(u.csum_type) ==
441 bch2_csum_type_is_encryption(n.csum_type);
444 bool bch2_can_narrow_extent_crcs(struct bkey_s_c k,
445 struct bch_extent_crc_unpacked n)
447 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
448 struct bch_extent_crc_unpacked crc;
449 const union bch_extent_entry *i;
454 bkey_for_each_crc(k.k, ptrs, crc, i)
455 if (can_narrow_crc(crc, n))
462 * We're writing another replica for this extent, so while we've got the data in
463 * memory we'll be computing a new checksum for the currently live data.
465 * If there are other replicas we aren't moving, and they are checksummed but
466 * not compressed, we can modify them to point to only the data that is
467 * currently live (so that readers won't have to bounce) while we've got the
470 bool bch2_bkey_narrow_crcs(struct bkey_i *k, struct bch_extent_crc_unpacked n)
472 struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(k));
473 struct bch_extent_crc_unpacked u;
474 struct extent_ptr_decoded p;
475 union bch_extent_entry *i;
478 /* Find a checksum entry that covers only live data: */
480 bkey_for_each_crc(&k->k, ptrs, u, i)
481 if (!crc_is_compressed(u) &&
483 u.live_size == u.uncompressed_size) {
490 BUG_ON(crc_is_compressed(n));
492 BUG_ON(n.live_size != k->k.size);
494 restart_narrow_pointers:
495 ptrs = bch2_bkey_ptrs(bkey_i_to_s(k));
497 bkey_for_each_ptr_decode(&k->k, ptrs, p, i)
498 if (can_narrow_crc(p.crc, n)) {
499 bch2_bkey_drop_ptr_noerror(bkey_i_to_s(k), &i->ptr);
500 p.ptr.offset += p.crc.offset;
502 bch2_extent_ptr_decoded_append(k, &p);
504 goto restart_narrow_pointers;
510 static void bch2_extent_crc_pack(union bch_extent_crc *dst,
511 struct bch_extent_crc_unpacked src,
512 enum bch_extent_entry_type type)
514 #define set_common_fields(_dst, _src) \
515 _dst.type = 1 << type; \
516 _dst.csum_type = _src.csum_type, \
517 _dst.compression_type = _src.compression_type, \
518 _dst._compressed_size = _src.compressed_size - 1, \
519 _dst._uncompressed_size = _src.uncompressed_size - 1, \
520 _dst.offset = _src.offset
523 case BCH_EXTENT_ENTRY_crc32:
524 set_common_fields(dst->crc32, src);
525 dst->crc32.csum = (u32 __force) *((__le32 *) &src.csum.lo);
527 case BCH_EXTENT_ENTRY_crc64:
528 set_common_fields(dst->crc64, src);
529 dst->crc64.nonce = src.nonce;
530 dst->crc64.csum_lo = (u64 __force) src.csum.lo;
531 dst->crc64.csum_hi = (u64 __force) *((__le16 *) &src.csum.hi);
533 case BCH_EXTENT_ENTRY_crc128:
534 set_common_fields(dst->crc128, src);
535 dst->crc128.nonce = src.nonce;
536 dst->crc128.csum = src.csum;
541 #undef set_common_fields
544 void bch2_extent_crc_append(struct bkey_i *k,
545 struct bch_extent_crc_unpacked new)
547 struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(k));
548 union bch_extent_crc *crc = (void *) ptrs.end;
549 enum bch_extent_entry_type type;
551 if (bch_crc_bytes[new.csum_type] <= 4 &&
552 new.uncompressed_size <= CRC32_SIZE_MAX &&
553 new.nonce <= CRC32_NONCE_MAX)
554 type = BCH_EXTENT_ENTRY_crc32;
555 else if (bch_crc_bytes[new.csum_type] <= 10 &&
556 new.uncompressed_size <= CRC64_SIZE_MAX &&
557 new.nonce <= CRC64_NONCE_MAX)
558 type = BCH_EXTENT_ENTRY_crc64;
559 else if (bch_crc_bytes[new.csum_type] <= 16 &&
560 new.uncompressed_size <= CRC128_SIZE_MAX &&
561 new.nonce <= CRC128_NONCE_MAX)
562 type = BCH_EXTENT_ENTRY_crc128;
566 bch2_extent_crc_pack(crc, new, type);
568 k->k.u64s += extent_entry_u64s(ptrs.end);
570 EBUG_ON(bkey_val_u64s(&k->k) > BKEY_EXTENT_VAL_U64s_MAX);
573 /* Generic code for keys with pointers: */
575 unsigned bch2_bkey_nr_ptrs(struct bkey_s_c k)
577 return bch2_bkey_devs(k).nr;
580 unsigned bch2_bkey_nr_ptrs_allocated(struct bkey_s_c k)
582 return k.k->type == KEY_TYPE_reservation
583 ? bkey_s_c_to_reservation(k).v->nr_replicas
584 : bch2_bkey_dirty_devs(k).nr;
587 unsigned bch2_bkey_nr_ptrs_fully_allocated(struct bkey_s_c k)
591 if (k.k->type == KEY_TYPE_reservation) {
592 ret = bkey_s_c_to_reservation(k).v->nr_replicas;
594 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
595 const union bch_extent_entry *entry;
596 struct extent_ptr_decoded p;
598 bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
599 ret += !p.ptr.cached && !crc_is_compressed(p.crc);
605 unsigned bch2_bkey_sectors_compressed(struct bkey_s_c k)
607 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
608 const union bch_extent_entry *entry;
609 struct extent_ptr_decoded p;
612 bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
613 if (!p.ptr.cached && crc_is_compressed(p.crc))
614 ret += p.crc.compressed_size;
619 bool bch2_bkey_is_incompressible(struct bkey_s_c k)
621 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
622 const union bch_extent_entry *entry;
623 struct bch_extent_crc_unpacked crc;
625 bkey_for_each_crc(k.k, ptrs, crc, entry)
626 if (crc.compression_type == BCH_COMPRESSION_TYPE_incompressible)
631 unsigned bch2_bkey_replicas(struct bch_fs *c, struct bkey_s_c k)
633 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
634 const union bch_extent_entry *entry;
635 struct extent_ptr_decoded p = { 0 };
636 unsigned replicas = 0;
638 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
643 replicas += p.ec.redundancy;
652 static inline unsigned __extent_ptr_durability(struct bch_dev *ca, struct extent_ptr_decoded *p)
658 ? p->ec.redundancy + 1
662 unsigned bch2_extent_ptr_desired_durability(struct bch_fs *c, struct extent_ptr_decoded *p)
664 struct bch_dev *ca = bch_dev_bkey_exists(c, p->ptr.dev);
666 return __extent_ptr_durability(ca, p);
669 unsigned bch2_extent_ptr_durability(struct bch_fs *c, struct extent_ptr_decoded *p)
671 struct bch_dev *ca = bch_dev_bkey_exists(c, p->ptr.dev);
673 if (ca->mi.state == BCH_MEMBER_STATE_failed)
676 return __extent_ptr_durability(ca, p);
679 unsigned bch2_bkey_durability(struct bch_fs *c, struct bkey_s_c k)
681 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
682 const union bch_extent_entry *entry;
683 struct extent_ptr_decoded p;
684 unsigned durability = 0;
686 bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
687 durability += bch2_extent_ptr_durability(c, &p);
692 static unsigned bch2_bkey_durability_safe(struct bch_fs *c, struct bkey_s_c k)
694 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
695 const union bch_extent_entry *entry;
696 struct extent_ptr_decoded p;
697 unsigned durability = 0;
699 bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
700 if (p.ptr.dev < c->sb.nr_devices && c->devs[p.ptr.dev])
701 durability += bch2_extent_ptr_durability(c, &p);
706 void bch2_bkey_extent_entry_drop(struct bkey_i *k, union bch_extent_entry *entry)
708 union bch_extent_entry *end = bkey_val_end(bkey_i_to_s(k));
709 union bch_extent_entry *next = extent_entry_next(entry);
711 memmove_u64s(entry, next, (u64 *) end - (u64 *) next);
712 k->k.u64s -= extent_entry_u64s(entry);
715 void bch2_extent_ptr_decoded_append(struct bkey_i *k,
716 struct extent_ptr_decoded *p)
718 struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(k));
719 struct bch_extent_crc_unpacked crc =
720 bch2_extent_crc_unpack(&k->k, NULL);
721 union bch_extent_entry *pos;
723 if (!bch2_crc_unpacked_cmp(crc, p->crc)) {
728 bkey_for_each_crc(&k->k, ptrs, crc, pos)
729 if (!bch2_crc_unpacked_cmp(crc, p->crc)) {
730 pos = extent_entry_next(pos);
734 bch2_extent_crc_append(k, p->crc);
735 pos = bkey_val_end(bkey_i_to_s(k));
737 p->ptr.type = 1 << BCH_EXTENT_ENTRY_ptr;
738 __extent_entry_insert(k, pos, to_entry(&p->ptr));
741 p->ec.type = 1 << BCH_EXTENT_ENTRY_stripe_ptr;
742 __extent_entry_insert(k, pos, to_entry(&p->ec));
746 static union bch_extent_entry *extent_entry_prev(struct bkey_ptrs ptrs,
747 union bch_extent_entry *entry)
749 union bch_extent_entry *i = ptrs.start;
754 while (extent_entry_next(i) != entry)
755 i = extent_entry_next(i);
760 * Returns pointer to the next entry after the one being dropped:
762 union bch_extent_entry *bch2_bkey_drop_ptr_noerror(struct bkey_s k,
763 struct bch_extent_ptr *ptr)
765 struct bkey_ptrs ptrs = bch2_bkey_ptrs(k);
766 union bch_extent_entry *entry = to_entry(ptr), *next;
767 union bch_extent_entry *ret = entry;
768 bool drop_crc = true;
770 EBUG_ON(ptr < &ptrs.start->ptr ||
771 ptr >= &ptrs.end->ptr);
772 EBUG_ON(ptr->type != 1 << BCH_EXTENT_ENTRY_ptr);
774 for (next = extent_entry_next(entry);
776 next = extent_entry_next(next)) {
777 if (extent_entry_is_crc(next)) {
779 } else if (extent_entry_is_ptr(next)) {
785 extent_entry_drop(k, entry);
787 while ((entry = extent_entry_prev(ptrs, entry))) {
788 if (extent_entry_is_ptr(entry))
791 if ((extent_entry_is_crc(entry) && drop_crc) ||
792 extent_entry_is_stripe_ptr(entry)) {
793 ret = (void *) ret - extent_entry_bytes(entry);
794 extent_entry_drop(k, entry);
801 union bch_extent_entry *bch2_bkey_drop_ptr(struct bkey_s k,
802 struct bch_extent_ptr *ptr)
804 bool have_dirty = bch2_bkey_dirty_devs(k.s_c).nr;
805 union bch_extent_entry *ret =
806 bch2_bkey_drop_ptr_noerror(k, ptr);
809 * If we deleted all the dirty pointers and there's still cached
810 * pointers, we could set the cached pointers to dirty if they're not
811 * stale - but to do that correctly we'd need to grab an open_bucket
812 * reference so that we don't race with bucket reuse:
815 !bch2_bkey_dirty_devs(k.s_c).nr) {
816 k.k->type = KEY_TYPE_error;
817 set_bkey_val_u64s(k.k, 0);
819 } else if (!bch2_bkey_nr_ptrs(k.s_c)) {
820 k.k->type = KEY_TYPE_deleted;
821 set_bkey_val_u64s(k.k, 0);
828 void bch2_bkey_drop_device(struct bkey_s k, unsigned dev)
830 struct bch_extent_ptr *ptr;
832 bch2_bkey_drop_ptrs(k, ptr, ptr->dev == dev);
835 void bch2_bkey_drop_device_noerror(struct bkey_s k, unsigned dev)
837 struct bch_extent_ptr *ptr = bch2_bkey_has_device(k, dev);
840 bch2_bkey_drop_ptr_noerror(k, ptr);
843 const struct bch_extent_ptr *bch2_bkey_has_device_c(struct bkey_s_c k, unsigned dev)
845 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
846 const struct bch_extent_ptr *ptr;
848 bkey_for_each_ptr(ptrs, ptr)
855 bool bch2_bkey_has_target(struct bch_fs *c, struct bkey_s_c k, unsigned target)
857 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
858 const struct bch_extent_ptr *ptr;
860 bkey_for_each_ptr(ptrs, ptr)
861 if (bch2_dev_in_target(c, ptr->dev, target) &&
863 !ptr_stale(bch_dev_bkey_exists(c, ptr->dev), ptr)))
869 bool bch2_bkey_matches_ptr(struct bch_fs *c, struct bkey_s_c k,
870 struct bch_extent_ptr m, u64 offset)
872 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
873 const union bch_extent_entry *entry;
874 struct extent_ptr_decoded p;
876 bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
877 if (p.ptr.dev == m.dev &&
878 p.ptr.gen == m.gen &&
879 (s64) p.ptr.offset + p.crc.offset - bkey_start_offset(k.k) ==
880 (s64) m.offset - offset)
887 * Returns true if two extents refer to the same data:
889 bool bch2_extents_match(struct bkey_s_c k1, struct bkey_s_c k2)
891 if (k1.k->type != k2.k->type)
894 if (bkey_extent_is_direct_data(k1.k)) {
895 struct bkey_ptrs_c ptrs1 = bch2_bkey_ptrs_c(k1);
896 struct bkey_ptrs_c ptrs2 = bch2_bkey_ptrs_c(k2);
897 const union bch_extent_entry *entry1, *entry2;
898 struct extent_ptr_decoded p1, p2;
900 if (bkey_extent_is_unwritten(k1) != bkey_extent_is_unwritten(k2))
903 bkey_for_each_ptr_decode(k1.k, ptrs1, p1, entry1)
904 bkey_for_each_ptr_decode(k2.k, ptrs2, p2, entry2)
905 if (p1.ptr.dev == p2.ptr.dev &&
906 p1.ptr.gen == p2.ptr.gen &&
907 (s64) p1.ptr.offset + p1.crc.offset - bkey_start_offset(k1.k) ==
908 (s64) p2.ptr.offset + p2.crc.offset - bkey_start_offset(k2.k))
913 /* KEY_TYPE_deleted, etc. */
918 struct bch_extent_ptr *
919 bch2_extent_has_ptr(struct bkey_s_c k1, struct extent_ptr_decoded p1, struct bkey_s k2)
921 struct bkey_ptrs ptrs2 = bch2_bkey_ptrs(k2);
922 union bch_extent_entry *entry2;
923 struct extent_ptr_decoded p2;
925 bkey_for_each_ptr_decode(k2.k, ptrs2, p2, entry2)
926 if (p1.ptr.dev == p2.ptr.dev &&
927 p1.ptr.gen == p2.ptr.gen &&
928 (s64) p1.ptr.offset + p1.crc.offset - bkey_start_offset(k1.k) ==
929 (s64) p2.ptr.offset + p2.crc.offset - bkey_start_offset(k2.k))
935 void bch2_extent_ptr_set_cached(struct bkey_s k, struct bch_extent_ptr *ptr)
937 struct bkey_ptrs ptrs = bch2_bkey_ptrs(k);
938 union bch_extent_entry *entry;
939 union bch_extent_entry *ec = NULL;
941 bkey_extent_entry_for_each(ptrs, entry) {
942 if (&entry->ptr == ptr) {
945 extent_entry_drop(k, ec);
949 if (extent_entry_is_stripe_ptr(entry))
951 else if (extent_entry_is_ptr(entry))
959 * bch_extent_normalize - clean up an extent, dropping stale pointers etc.
961 * Returns true if @k should be dropped entirely
963 * For existing keys, only called when btree nodes are being rewritten, not when
964 * they're merely being compacted/resorted in memory.
966 bool bch2_extent_normalize(struct bch_fs *c, struct bkey_s k)
968 struct bch_extent_ptr *ptr;
970 bch2_bkey_drop_ptrs(k, ptr,
972 ptr_stale(bch_dev_bkey_exists(c, ptr->dev), ptr));
974 return bkey_deleted(k.k);
977 void bch2_bkey_ptrs_to_text(struct printbuf *out, struct bch_fs *c,
980 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
981 const union bch_extent_entry *entry;
985 prt_printf(out, "durability: %u ", bch2_bkey_durability_safe(c, k));
987 bkey_extent_entry_for_each(ptrs, entry) {
989 prt_printf(out, " ");
991 switch (__extent_entry_type(entry)) {
992 case BCH_EXTENT_ENTRY_ptr: {
993 const struct bch_extent_ptr *ptr = entry_to_ptr(entry);
994 struct bch_dev *ca = c && ptr->dev < c->sb.nr_devices && c->devs[ptr->dev]
995 ? bch_dev_bkey_exists(c, ptr->dev)
999 prt_printf(out, "ptr: %u:%llu gen %u%s", ptr->dev,
1000 (u64) ptr->offset, ptr->gen,
1001 ptr->cached ? " cached" : "");
1004 u64 b = sector_to_bucket_and_offset(ca, ptr->offset, &offset);
1006 prt_printf(out, "ptr: %u:%llu:%u gen %u",
1007 ptr->dev, b, offset, ptr->gen);
1009 prt_str(out, " cached");
1011 prt_str(out, " unwritten");
1012 if (ca && ptr_stale(ca, ptr))
1013 prt_printf(out, " stale");
1017 case BCH_EXTENT_ENTRY_crc32:
1018 case BCH_EXTENT_ENTRY_crc64:
1019 case BCH_EXTENT_ENTRY_crc128: {
1020 struct bch_extent_crc_unpacked crc =
1021 bch2_extent_crc_unpack(k.k, entry_to_crc(entry));
1023 prt_printf(out, "crc: c_size %u size %u offset %u nonce %u csum %s compress %s",
1024 crc.compressed_size,
1025 crc.uncompressed_size,
1026 crc.offset, crc.nonce,
1027 bch2_csum_types[crc.csum_type],
1028 bch2_compression_types[crc.compression_type]);
1031 case BCH_EXTENT_ENTRY_stripe_ptr: {
1032 const struct bch_extent_stripe_ptr *ec = &entry->stripe_ptr;
1034 prt_printf(out, "ec: idx %llu block %u",
1035 (u64) ec->idx, ec->block);
1038 case BCH_EXTENT_ENTRY_rebalance: {
1039 const struct bch_extent_rebalance *r = &entry->rebalance;
1041 prt_str(out, "rebalance: target ");
1043 bch2_target_to_text(out, c, r->target);
1045 prt_printf(out, "%u", r->target);
1046 prt_str(out, " compression ");
1047 bch2_compression_opt_to_text(out, r->compression);
1051 prt_printf(out, "(invalid extent entry %.16llx)", *((u64 *) entry));
1059 static int extent_ptr_invalid(struct bch_fs *c,
1061 enum bkey_invalid_flags flags,
1062 const struct bch_extent_ptr *ptr,
1063 unsigned size_ondisk,
1065 struct printbuf *err)
1067 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1068 const struct bch_extent_ptr *ptr2;
1074 if (!bch2_dev_exists2(c, ptr->dev)) {
1076 * If we're in the write path this key might have already been
1077 * overwritten, and we could be seeing a device that doesn't
1078 * exist anymore due to racing with device removal:
1080 if (flags & BKEY_INVALID_WRITE)
1083 bkey_fsck_err(c, err, ptr_to_invalid_device,
1084 "pointer to invalid device (%u)", ptr->dev);
1087 ca = bch_dev_bkey_exists(c, ptr->dev);
1088 bkey_for_each_ptr(ptrs, ptr2)
1089 bkey_fsck_err_on(ptr != ptr2 && ptr->dev == ptr2->dev, c, err,
1090 ptr_to_duplicate_device,
1091 "multiple pointers to same device (%u)", ptr->dev);
1093 bucket = sector_to_bucket_and_offset(ca, ptr->offset, &bucket_offset);
1095 bkey_fsck_err_on(bucket >= ca->mi.nbuckets, c, err,
1096 ptr_after_last_bucket,
1097 "pointer past last bucket (%llu > %llu)", bucket, ca->mi.nbuckets);
1098 bkey_fsck_err_on(ptr->offset < bucket_to_sector(ca, ca->mi.first_bucket), c, err,
1099 ptr_before_first_bucket,
1100 "pointer before first bucket (%llu < %u)", bucket, ca->mi.first_bucket);
1101 bkey_fsck_err_on(bucket_offset + size_ondisk > ca->mi.bucket_size, c, err,
1102 ptr_spans_multiple_buckets,
1103 "pointer spans multiple buckets (%u + %u > %u)",
1104 bucket_offset, size_ondisk, ca->mi.bucket_size);
1109 int bch2_bkey_ptrs_invalid(struct bch_fs *c, struct bkey_s_c k,
1110 enum bkey_invalid_flags flags,
1111 struct printbuf *err)
1113 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1114 const union bch_extent_entry *entry;
1115 struct bch_extent_crc_unpacked crc;
1116 unsigned size_ondisk = k.k->size;
1117 unsigned nonce = UINT_MAX;
1118 unsigned nr_ptrs = 0;
1119 bool have_written = false, have_unwritten = false, have_ec = false, crc_since_last_ptr = false;
1122 if (bkey_is_btree_ptr(k.k))
1123 size_ondisk = btree_sectors(c);
1125 bkey_extent_entry_for_each(ptrs, entry) {
1126 bkey_fsck_err_on(__extent_entry_type(entry) >= BCH_EXTENT_ENTRY_MAX, c, err,
1127 extent_ptrs_invalid_entry,
1128 "invalid extent entry type (got %u, max %u)",
1129 __extent_entry_type(entry), BCH_EXTENT_ENTRY_MAX);
1131 bkey_fsck_err_on(bkey_is_btree_ptr(k.k) &&
1132 !extent_entry_is_ptr(entry), c, err,
1133 btree_ptr_has_non_ptr,
1134 "has non ptr field");
1136 switch (extent_entry_type(entry)) {
1137 case BCH_EXTENT_ENTRY_ptr:
1138 ret = extent_ptr_invalid(c, k, flags, &entry->ptr,
1139 size_ondisk, false, err);
1143 bkey_fsck_err_on(entry->ptr.cached && have_ec, c, err,
1144 ptr_cached_and_erasure_coded,
1145 "cached, erasure coded ptr");
1147 if (!entry->ptr.unwritten)
1148 have_written = true;
1150 have_unwritten = true;
1153 crc_since_last_ptr = false;
1156 case BCH_EXTENT_ENTRY_crc32:
1157 case BCH_EXTENT_ENTRY_crc64:
1158 case BCH_EXTENT_ENTRY_crc128:
1159 crc = bch2_extent_crc_unpack(k.k, entry_to_crc(entry));
1161 bkey_fsck_err_on(crc.offset + crc.live_size > crc.uncompressed_size, c, err,
1162 ptr_crc_uncompressed_size_too_small,
1163 "checksum offset + key size > uncompressed size");
1164 bkey_fsck_err_on(!bch2_checksum_type_valid(c, crc.csum_type), c, err,
1165 ptr_crc_csum_type_unknown,
1166 "invalid checksum type");
1167 bkey_fsck_err_on(crc.compression_type >= BCH_COMPRESSION_TYPE_NR, c, err,
1168 ptr_crc_compression_type_unknown,
1169 "invalid compression type");
1171 if (bch2_csum_type_is_encryption(crc.csum_type)) {
1172 if (nonce == UINT_MAX)
1173 nonce = crc.offset + crc.nonce;
1174 else if (nonce != crc.offset + crc.nonce)
1175 bkey_fsck_err(c, err, ptr_crc_nonce_mismatch,
1179 bkey_fsck_err_on(crc_since_last_ptr, c, err,
1181 "redundant crc entry");
1182 crc_since_last_ptr = true;
1184 bkey_fsck_err_on(crc_is_encoded(crc) &&
1185 (crc.uncompressed_size > c->opts.encoded_extent_max >> 9) &&
1186 (flags & (BKEY_INVALID_WRITE|BKEY_INVALID_COMMIT)), c, err,
1187 ptr_crc_uncompressed_size_too_big,
1188 "too large encoded extent");
1190 size_ondisk = crc.compressed_size;
1192 case BCH_EXTENT_ENTRY_stripe_ptr:
1193 bkey_fsck_err_on(have_ec, c, err,
1194 ptr_stripe_redundant,
1195 "redundant stripe entry");
1198 case BCH_EXTENT_ENTRY_rebalance: {
1199 const struct bch_extent_rebalance *r = &entry->rebalance;
1201 if (!bch2_compression_opt_valid(r->compression)) {
1202 struct bch_compression_opt opt = __bch2_compression_decode(r->compression);
1203 prt_printf(err, "invalid compression opt %u:%u",
1204 opt.type, opt.level);
1205 return -BCH_ERR_invalid_bkey;
1212 bkey_fsck_err_on(!nr_ptrs, c, err,
1213 extent_ptrs_no_ptrs,
1215 bkey_fsck_err_on(nr_ptrs > BCH_BKEY_PTRS_MAX, c, err,
1216 extent_ptrs_too_many_ptrs,
1217 "too many ptrs: %u > %u", nr_ptrs, BCH_BKEY_PTRS_MAX);
1218 bkey_fsck_err_on(have_written && have_unwritten, c, err,
1219 extent_ptrs_written_and_unwritten,
1220 "extent with unwritten and written ptrs");
1221 bkey_fsck_err_on(k.k->type != KEY_TYPE_extent && have_unwritten, c, err,
1222 extent_ptrs_unwritten,
1223 "has unwritten ptrs");
1224 bkey_fsck_err_on(crc_since_last_ptr, c, err,
1225 extent_ptrs_redundant_crc,
1226 "redundant crc entry");
1227 bkey_fsck_err_on(have_ec, c, err,
1228 extent_ptrs_redundant_stripe,
1229 "redundant stripe entry");
1234 void bch2_ptr_swab(struct bkey_s k)
1236 struct bkey_ptrs ptrs = bch2_bkey_ptrs(k);
1237 union bch_extent_entry *entry;
1240 for (d = (u64 *) ptrs.start;
1241 d != (u64 *) ptrs.end;
1245 for (entry = ptrs.start;
1247 entry = extent_entry_next(entry)) {
1248 switch (extent_entry_type(entry)) {
1249 case BCH_EXTENT_ENTRY_ptr:
1251 case BCH_EXTENT_ENTRY_crc32:
1252 entry->crc32.csum = swab32(entry->crc32.csum);
1254 case BCH_EXTENT_ENTRY_crc64:
1255 entry->crc64.csum_hi = swab16(entry->crc64.csum_hi);
1256 entry->crc64.csum_lo = swab64(entry->crc64.csum_lo);
1258 case BCH_EXTENT_ENTRY_crc128:
1259 entry->crc128.csum.hi = (__force __le64)
1260 swab64((__force u64) entry->crc128.csum.hi);
1261 entry->crc128.csum.lo = (__force __le64)
1262 swab64((__force u64) entry->crc128.csum.lo);
1264 case BCH_EXTENT_ENTRY_stripe_ptr:
1266 case BCH_EXTENT_ENTRY_rebalance:
1272 const struct bch_extent_rebalance *bch2_bkey_rebalance_opts(struct bkey_s_c k)
1274 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1275 const union bch_extent_entry *entry;
1277 bkey_extent_entry_for_each(ptrs, entry)
1278 if (__extent_entry_type(entry) == BCH_EXTENT_ENTRY_rebalance)
1279 return &entry->rebalance;
1284 unsigned bch2_bkey_ptrs_need_rebalance(struct bch_fs *c, struct bkey_s_c k,
1285 unsigned target, unsigned compression)
1287 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1288 unsigned rewrite_ptrs = 0;
1291 unsigned compression_type = bch2_compression_opt_to_type(compression);
1292 const union bch_extent_entry *entry;
1293 struct extent_ptr_decoded p;
1296 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
1297 if (p.crc.compression_type == BCH_COMPRESSION_TYPE_incompressible) {
1299 goto incompressible;
1302 if (!p.ptr.cached && p.crc.compression_type != compression_type)
1303 rewrite_ptrs |= 1U << i;
1308 if (target && bch2_target_accepts_data(c, BCH_DATA_user, target)) {
1309 const struct bch_extent_ptr *ptr;
1312 bkey_for_each_ptr(ptrs, ptr) {
1313 if (!ptr->cached && !bch2_dev_in_target(c, ptr->dev, target))
1314 rewrite_ptrs |= 1U << i;
1319 return rewrite_ptrs;
1322 bool bch2_bkey_needs_rebalance(struct bch_fs *c, struct bkey_s_c k)
1324 const struct bch_extent_rebalance *r = bch2_bkey_rebalance_opts(k);
1327 * If it's an indirect extent, we don't delete the rebalance entry when
1328 * done so that we know what options were applied - check if it still
1332 k.k->type == KEY_TYPE_reflink_v &&
1333 !bch2_bkey_ptrs_need_rebalance(c, k, r->target, r->compression))
1339 int bch2_bkey_set_needs_rebalance(struct bch_fs *c, struct bkey_i *_k,
1340 unsigned target, unsigned compression)
1342 struct bkey_s k = bkey_i_to_s(_k);
1343 struct bch_extent_rebalance *r;
1344 bool needs_rebalance;
1346 if (!bkey_extent_is_direct_data(k.k))
1349 /* get existing rebalance entry: */
1350 r = (struct bch_extent_rebalance *) bch2_bkey_rebalance_opts(k.s_c);
1352 if (k.k->type == KEY_TYPE_reflink_v) {
1354 * indirect extents: existing options take precedence,
1355 * so that we don't move extents back and forth if
1356 * they're referenced by different inodes with different
1362 compression = r->compression;
1366 r->compression = compression;
1369 needs_rebalance = bch2_bkey_ptrs_need_rebalance(c, k.s_c, target, compression);
1371 if (needs_rebalance && !r) {
1372 union bch_extent_entry *new = bkey_val_end(k);
1374 new->rebalance.type = 1U << BCH_EXTENT_ENTRY_rebalance;
1375 new->rebalance.compression = compression;
1376 new->rebalance.target = target;
1377 new->rebalance.unused = 0;
1378 k.k->u64s += extent_entry_u64s(new);
1379 } else if (!needs_rebalance && r && k.k->type != KEY_TYPE_reflink_v) {
1381 * For indirect extents, don't delete the rebalance entry when
1382 * we're finished so that we know we specifically moved it or
1383 * compressed it to its current location/compression type
1385 extent_entry_drop(k, (union bch_extent_entry *) r);
1391 /* Generic extent code: */
1393 int bch2_cut_front_s(struct bpos where, struct bkey_s k)
1395 unsigned new_val_u64s = bkey_val_u64s(k.k);
1399 if (bkey_le(where, bkey_start_pos(k.k)))
1402 EBUG_ON(bkey_gt(where, k.k->p));
1404 sub = where.offset - bkey_start_offset(k.k);
1409 k.k->type = KEY_TYPE_deleted;
1413 switch (k.k->type) {
1414 case KEY_TYPE_extent:
1415 case KEY_TYPE_reflink_v: {
1416 struct bkey_ptrs ptrs = bch2_bkey_ptrs(k);
1417 union bch_extent_entry *entry;
1418 bool seen_crc = false;
1420 bkey_extent_entry_for_each(ptrs, entry) {
1421 switch (extent_entry_type(entry)) {
1422 case BCH_EXTENT_ENTRY_ptr:
1424 entry->ptr.offset += sub;
1426 case BCH_EXTENT_ENTRY_crc32:
1427 entry->crc32.offset += sub;
1429 case BCH_EXTENT_ENTRY_crc64:
1430 entry->crc64.offset += sub;
1432 case BCH_EXTENT_ENTRY_crc128:
1433 entry->crc128.offset += sub;
1435 case BCH_EXTENT_ENTRY_stripe_ptr:
1437 case BCH_EXTENT_ENTRY_rebalance:
1441 if (extent_entry_is_crc(entry))
1447 case KEY_TYPE_reflink_p: {
1448 struct bkey_s_reflink_p p = bkey_s_to_reflink_p(k);
1450 le64_add_cpu(&p.v->idx, sub);
1453 case KEY_TYPE_inline_data:
1454 case KEY_TYPE_indirect_inline_data: {
1455 void *p = bkey_inline_data_p(k);
1456 unsigned bytes = bkey_inline_data_bytes(k.k);
1458 sub = min_t(u64, sub << 9, bytes);
1460 memmove(p, p + sub, bytes - sub);
1462 new_val_u64s -= sub >> 3;
1467 val_u64s_delta = bkey_val_u64s(k.k) - new_val_u64s;
1468 BUG_ON(val_u64s_delta < 0);
1470 set_bkey_val_u64s(k.k, new_val_u64s);
1471 memset(bkey_val_end(k), 0, val_u64s_delta * sizeof(u64));
1472 return -val_u64s_delta;
1475 int bch2_cut_back_s(struct bpos where, struct bkey_s k)
1477 unsigned new_val_u64s = bkey_val_u64s(k.k);
1481 if (bkey_ge(where, k.k->p))
1484 EBUG_ON(bkey_lt(where, bkey_start_pos(k.k)));
1486 len = where.offset - bkey_start_offset(k.k);
1488 k.k->p.offset = where.offset;
1492 k.k->type = KEY_TYPE_deleted;
1496 switch (k.k->type) {
1497 case KEY_TYPE_inline_data:
1498 case KEY_TYPE_indirect_inline_data:
1499 new_val_u64s = (bkey_inline_data_offset(k.k) +
1500 min(bkey_inline_data_bytes(k.k), k.k->size << 9)) >> 3;
1504 val_u64s_delta = bkey_val_u64s(k.k) - new_val_u64s;
1505 BUG_ON(val_u64s_delta < 0);
1507 set_bkey_val_u64s(k.k, new_val_u64s);
1508 memset(bkey_val_end(k), 0, val_u64s_delta * sizeof(u64));
1509 return -val_u64s_delta;