1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4 * Copyright (c) 2013 Red Hat, Inc.
9 #include "xfs_shared.h"
10 #include "xfs_format.h"
11 #include "xfs_log_format.h"
12 #include "xfs_trans_resv.h"
14 #include "xfs_mount.h"
16 #include "xfs_dir2_priv.h"
17 #include "xfs_inode.h"
18 #include "xfs_trans.h"
20 #include "xfs_attr_leaf.h"
21 #include "xfs_error.h"
22 #include "xfs_trace.h"
23 #include "xfs_buf_item.h"
29 * Routines to implement directories as Btrees of hashed names.
32 /*========================================================================
33 * Function prototypes for the kernel.
34 *========================================================================*/
37 * Routines used for growing the Btree.
39 STATIC int xfs_da3_root_split(xfs_da_state_t *state,
40 xfs_da_state_blk_t *existing_root,
41 xfs_da_state_blk_t *new_child);
42 STATIC int xfs_da3_node_split(xfs_da_state_t *state,
43 xfs_da_state_blk_t *existing_blk,
44 xfs_da_state_blk_t *split_blk,
45 xfs_da_state_blk_t *blk_to_add,
48 STATIC void xfs_da3_node_rebalance(xfs_da_state_t *state,
49 xfs_da_state_blk_t *node_blk_1,
50 xfs_da_state_blk_t *node_blk_2);
51 STATIC void xfs_da3_node_add(xfs_da_state_t *state,
52 xfs_da_state_blk_t *old_node_blk,
53 xfs_da_state_blk_t *new_node_blk);
56 * Routines used for shrinking the Btree.
58 STATIC int xfs_da3_root_join(xfs_da_state_t *state,
59 xfs_da_state_blk_t *root_blk);
60 STATIC int xfs_da3_node_toosmall(xfs_da_state_t *state, int *retval);
61 STATIC void xfs_da3_node_remove(xfs_da_state_t *state,
62 xfs_da_state_blk_t *drop_blk);
63 STATIC void xfs_da3_node_unbalance(xfs_da_state_t *state,
64 xfs_da_state_blk_t *src_node_blk,
65 xfs_da_state_blk_t *dst_node_blk);
70 STATIC int xfs_da3_blk_unlink(xfs_da_state_t *state,
71 xfs_da_state_blk_t *drop_blk,
72 xfs_da_state_blk_t *save_blk);
75 kmem_zone_t *xfs_da_state_zone; /* anchor for state struct zone */
78 * Allocate a dir-state structure.
79 * We don't put them on the stack since they're large.
82 xfs_da_state_alloc(void)
84 return kmem_zone_zalloc(xfs_da_state_zone, KM_NOFS);
88 * Kill the altpath contents of a da-state structure.
91 xfs_da_state_kill_altpath(xfs_da_state_t *state)
95 for (i = 0; i < state->altpath.active; i++)
96 state->altpath.blk[i].bp = NULL;
97 state->altpath.active = 0;
101 * Free a da-state structure.
104 xfs_da_state_free(xfs_da_state_t *state)
106 xfs_da_state_kill_altpath(state);
108 memset((char *)state, 0, sizeof(*state));
110 kmem_zone_free(xfs_da_state_zone, state);
114 xfs_da3_node_hdr_from_disk(
115 struct xfs_mount *mp,
116 struct xfs_da3_icnode_hdr *to,
117 struct xfs_da_intnode *from)
119 if (xfs_sb_version_hascrc(&mp->m_sb)) {
120 struct xfs_da3_intnode *from3 = (struct xfs_da3_intnode *)from;
122 to->forw = be32_to_cpu(from3->hdr.info.hdr.forw);
123 to->back = be32_to_cpu(from3->hdr.info.hdr.back);
124 to->magic = be16_to_cpu(from3->hdr.info.hdr.magic);
125 to->count = be16_to_cpu(from3->hdr.__count);
126 to->level = be16_to_cpu(from3->hdr.__level);
127 to->btree = from3->__btree;
128 ASSERT(to->magic == XFS_DA3_NODE_MAGIC);
130 to->forw = be32_to_cpu(from->hdr.info.forw);
131 to->back = be32_to_cpu(from->hdr.info.back);
132 to->magic = be16_to_cpu(from->hdr.info.magic);
133 to->count = be16_to_cpu(from->hdr.__count);
134 to->level = be16_to_cpu(from->hdr.__level);
135 to->btree = from->__btree;
136 ASSERT(to->magic == XFS_DA_NODE_MAGIC);
141 xfs_da3_node_hdr_to_disk(
142 struct xfs_mount *mp,
143 struct xfs_da_intnode *to,
144 struct xfs_da3_icnode_hdr *from)
146 if (xfs_sb_version_hascrc(&mp->m_sb)) {
147 struct xfs_da3_intnode *to3 = (struct xfs_da3_intnode *)to;
149 ASSERT(from->magic == XFS_DA3_NODE_MAGIC);
150 to3->hdr.info.hdr.forw = cpu_to_be32(from->forw);
151 to3->hdr.info.hdr.back = cpu_to_be32(from->back);
152 to3->hdr.info.hdr.magic = cpu_to_be16(from->magic);
153 to3->hdr.__count = cpu_to_be16(from->count);
154 to3->hdr.__level = cpu_to_be16(from->level);
156 ASSERT(from->magic == XFS_DA_NODE_MAGIC);
157 to->hdr.info.forw = cpu_to_be32(from->forw);
158 to->hdr.info.back = cpu_to_be32(from->back);
159 to->hdr.info.magic = cpu_to_be16(from->magic);
160 to->hdr.__count = cpu_to_be16(from->count);
161 to->hdr.__level = cpu_to_be16(from->level);
166 * Verify an xfs_da3_blkinfo structure. Note that the da3 fields are only
167 * accessible on v5 filesystems. This header format is common across da node,
168 * attr leaf and dir leaf blocks.
171 xfs_da3_blkinfo_verify(
173 struct xfs_da3_blkinfo *hdr3)
175 struct xfs_mount *mp = bp->b_mount;
176 struct xfs_da_blkinfo *hdr = &hdr3->hdr;
178 if (!xfs_verify_magic16(bp, hdr->magic))
179 return __this_address;
181 if (xfs_sb_version_hascrc(&mp->m_sb)) {
182 if (!uuid_equal(&hdr3->uuid, &mp->m_sb.sb_meta_uuid))
183 return __this_address;
184 if (be64_to_cpu(hdr3->blkno) != bp->b_bn)
185 return __this_address;
186 if (!xfs_log_check_lsn(mp, be64_to_cpu(hdr3->lsn)))
187 return __this_address;
193 static xfs_failaddr_t
197 struct xfs_mount *mp = bp->b_mount;
198 struct xfs_da_intnode *hdr = bp->b_addr;
199 struct xfs_da3_icnode_hdr ichdr;
202 xfs_da3_node_hdr_from_disk(mp, &ichdr, hdr);
204 fa = xfs_da3_blkinfo_verify(bp, bp->b_addr);
208 if (ichdr.level == 0)
209 return __this_address;
210 if (ichdr.level > XFS_DA_NODE_MAXDEPTH)
211 return __this_address;
212 if (ichdr.count == 0)
213 return __this_address;
216 * we don't know if the node is for and attribute or directory tree,
217 * so only fail if the count is outside both bounds
219 if (ichdr.count > mp->m_dir_geo->node_ents &&
220 ichdr.count > mp->m_attr_geo->node_ents)
221 return __this_address;
223 /* XXX: hash order check? */
229 xfs_da3_node_write_verify(
232 struct xfs_mount *mp = bp->b_mount;
233 struct xfs_buf_log_item *bip = bp->b_log_item;
234 struct xfs_da3_node_hdr *hdr3 = bp->b_addr;
237 fa = xfs_da3_node_verify(bp);
239 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
243 if (!xfs_sb_version_hascrc(&mp->m_sb))
247 hdr3->info.lsn = cpu_to_be64(bip->bli_item.li_lsn);
249 xfs_buf_update_cksum(bp, XFS_DA3_NODE_CRC_OFF);
253 * leaf/node format detection on trees is sketchy, so a node read can be done on
254 * leaf level blocks when detection identifies the tree as a node format tree
255 * incorrectly. In this case, we need to swap the verifier to match the correct
256 * format of the block being read.
259 xfs_da3_node_read_verify(
262 struct xfs_da_blkinfo *info = bp->b_addr;
265 switch (be16_to_cpu(info->magic)) {
266 case XFS_DA3_NODE_MAGIC:
267 if (!xfs_buf_verify_cksum(bp, XFS_DA3_NODE_CRC_OFF)) {
268 xfs_verifier_error(bp, -EFSBADCRC,
273 case XFS_DA_NODE_MAGIC:
274 fa = xfs_da3_node_verify(bp);
276 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
278 case XFS_ATTR_LEAF_MAGIC:
279 case XFS_ATTR3_LEAF_MAGIC:
280 bp->b_ops = &xfs_attr3_leaf_buf_ops;
281 bp->b_ops->verify_read(bp);
283 case XFS_DIR2_LEAFN_MAGIC:
284 case XFS_DIR3_LEAFN_MAGIC:
285 bp->b_ops = &xfs_dir3_leafn_buf_ops;
286 bp->b_ops->verify_read(bp);
289 xfs_verifier_error(bp, -EFSCORRUPTED, __this_address);
294 /* Verify the structure of a da3 block. */
295 static xfs_failaddr_t
296 xfs_da3_node_verify_struct(
299 struct xfs_da_blkinfo *info = bp->b_addr;
301 switch (be16_to_cpu(info->magic)) {
302 case XFS_DA3_NODE_MAGIC:
303 case XFS_DA_NODE_MAGIC:
304 return xfs_da3_node_verify(bp);
305 case XFS_ATTR_LEAF_MAGIC:
306 case XFS_ATTR3_LEAF_MAGIC:
307 bp->b_ops = &xfs_attr3_leaf_buf_ops;
308 return bp->b_ops->verify_struct(bp);
309 case XFS_DIR2_LEAFN_MAGIC:
310 case XFS_DIR3_LEAFN_MAGIC:
311 bp->b_ops = &xfs_dir3_leafn_buf_ops;
312 return bp->b_ops->verify_struct(bp);
314 return __this_address;
318 const struct xfs_buf_ops xfs_da3_node_buf_ops = {
319 .name = "xfs_da3_node",
320 .magic16 = { cpu_to_be16(XFS_DA_NODE_MAGIC),
321 cpu_to_be16(XFS_DA3_NODE_MAGIC) },
322 .verify_read = xfs_da3_node_read_verify,
323 .verify_write = xfs_da3_node_write_verify,
324 .verify_struct = xfs_da3_node_verify_struct,
329 struct xfs_trans *tp,
330 struct xfs_inode *dp,
332 xfs_daddr_t mappedbno,
333 struct xfs_buf **bpp,
338 err = xfs_da_read_buf(tp, dp, bno, mappedbno, bpp,
339 which_fork, &xfs_da3_node_buf_ops);
340 if (!err && tp && *bpp) {
341 struct xfs_da_blkinfo *info = (*bpp)->b_addr;
344 switch (be16_to_cpu(info->magic)) {
345 case XFS_DA_NODE_MAGIC:
346 case XFS_DA3_NODE_MAGIC:
347 type = XFS_BLFT_DA_NODE_BUF;
349 case XFS_ATTR_LEAF_MAGIC:
350 case XFS_ATTR3_LEAF_MAGIC:
351 type = XFS_BLFT_ATTR_LEAF_BUF;
353 case XFS_DIR2_LEAFN_MAGIC:
354 case XFS_DIR3_LEAFN_MAGIC:
355 type = XFS_BLFT_DIR_LEAFN_BUF;
358 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW,
359 tp->t_mountp, info, sizeof(*info));
360 xfs_trans_brelse(tp, *bpp);
362 return -EFSCORRUPTED;
364 xfs_trans_buf_set_type(tp, *bpp, type);
369 /*========================================================================
370 * Routines used for growing the Btree.
371 *========================================================================*/
374 * Create the initial contents of an intermediate node.
378 struct xfs_da_args *args,
381 struct xfs_buf **bpp,
384 struct xfs_da_intnode *node;
385 struct xfs_trans *tp = args->trans;
386 struct xfs_mount *mp = tp->t_mountp;
387 struct xfs_da3_icnode_hdr ichdr = {0};
390 struct xfs_inode *dp = args->dp;
392 trace_xfs_da_node_create(args);
393 ASSERT(level <= XFS_DA_NODE_MAXDEPTH);
395 error = xfs_da_get_buf(tp, dp, blkno, -1, &bp, whichfork);
398 bp->b_ops = &xfs_da3_node_buf_ops;
399 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DA_NODE_BUF);
402 if (xfs_sb_version_hascrc(&mp->m_sb)) {
403 struct xfs_da3_node_hdr *hdr3 = bp->b_addr;
405 memset(hdr3, 0, sizeof(struct xfs_da3_node_hdr));
406 ichdr.magic = XFS_DA3_NODE_MAGIC;
407 hdr3->info.blkno = cpu_to_be64(bp->b_bn);
408 hdr3->info.owner = cpu_to_be64(args->dp->i_ino);
409 uuid_copy(&hdr3->info.uuid, &mp->m_sb.sb_meta_uuid);
411 ichdr.magic = XFS_DA_NODE_MAGIC;
415 xfs_da3_node_hdr_to_disk(dp->i_mount, node, &ichdr);
416 xfs_trans_log_buf(tp, bp,
417 XFS_DA_LOGRANGE(node, &node->hdr, args->geo->node_hdr_size));
424 * Split a leaf node, rebalance, then possibly split
425 * intermediate nodes, rebalance, etc.
429 struct xfs_da_state *state)
431 struct xfs_da_state_blk *oldblk;
432 struct xfs_da_state_blk *newblk;
433 struct xfs_da_state_blk *addblk;
434 struct xfs_da_intnode *node;
440 trace_xfs_da_split(state->args);
443 * Walk back up the tree splitting/inserting/adjusting as necessary.
444 * If we need to insert and there isn't room, split the node, then
445 * decide which fragment to insert the new block from below into.
446 * Note that we may split the root this way, but we need more fixup.
448 max = state->path.active - 1;
449 ASSERT((max >= 0) && (max < XFS_DA_NODE_MAXDEPTH));
450 ASSERT(state->path.blk[max].magic == XFS_ATTR_LEAF_MAGIC ||
451 state->path.blk[max].magic == XFS_DIR2_LEAFN_MAGIC);
453 addblk = &state->path.blk[max]; /* initial dummy value */
454 for (i = max; (i >= 0) && addblk; state->path.active--, i--) {
455 oldblk = &state->path.blk[i];
456 newblk = &state->altpath.blk[i];
459 * If a leaf node then
460 * Allocate a new leaf node, then rebalance across them.
461 * else if an intermediate node then
462 * We split on the last layer, must we split the node?
464 switch (oldblk->magic) {
465 case XFS_ATTR_LEAF_MAGIC:
466 error = xfs_attr3_leaf_split(state, oldblk, newblk);
467 if ((error != 0) && (error != -ENOSPC)) {
468 return error; /* GROT: attr is inconsistent */
475 * Entry wouldn't fit, split the leaf again. The new
476 * extrablk will be consumed by xfs_da3_node_split if
479 state->extravalid = 1;
481 state->extraafter = 0; /* before newblk */
482 trace_xfs_attr_leaf_split_before(state->args);
483 error = xfs_attr3_leaf_split(state, oldblk,
486 state->extraafter = 1; /* after newblk */
487 trace_xfs_attr_leaf_split_after(state->args);
488 error = xfs_attr3_leaf_split(state, newblk,
492 return error; /* GROT: attr inconsistent */
495 case XFS_DIR2_LEAFN_MAGIC:
496 error = xfs_dir2_leafn_split(state, oldblk, newblk);
501 case XFS_DA_NODE_MAGIC:
502 error = xfs_da3_node_split(state, oldblk, newblk, addblk,
506 return error; /* GROT: dir is inconsistent */
508 * Record the newly split block for the next time thru?
518 * Update the btree to show the new hashval for this child.
520 xfs_da3_fixhashpath(state, &state->path);
526 * xfs_da3_node_split() should have consumed any extra blocks we added
527 * during a double leaf split in the attr fork. This is guaranteed as
528 * we can't be here if the attr fork only has a single leaf block.
530 ASSERT(state->extravalid == 0 ||
531 state->path.blk[max].magic == XFS_DIR2_LEAFN_MAGIC);
534 * Split the root node.
536 ASSERT(state->path.active == 0);
537 oldblk = &state->path.blk[0];
538 error = xfs_da3_root_split(state, oldblk, addblk);
543 * Update pointers to the node which used to be block 0 and just got
544 * bumped because of the addition of a new root node. Note that the
545 * original block 0 could be at any position in the list of blocks in
548 * Note: the magic numbers and sibling pointers are in the same physical
549 * place for both v2 and v3 headers (by design). Hence it doesn't matter
550 * which version of the xfs_da_intnode structure we use here as the
551 * result will be the same using either structure.
553 node = oldblk->bp->b_addr;
554 if (node->hdr.info.forw) {
555 if (be32_to_cpu(node->hdr.info.forw) != addblk->blkno) {
556 xfs_buf_corruption_error(oldblk->bp);
557 error = -EFSCORRUPTED;
560 node = addblk->bp->b_addr;
561 node->hdr.info.back = cpu_to_be32(oldblk->blkno);
562 xfs_trans_log_buf(state->args->trans, addblk->bp,
563 XFS_DA_LOGRANGE(node, &node->hdr.info,
564 sizeof(node->hdr.info)));
566 node = oldblk->bp->b_addr;
567 if (node->hdr.info.back) {
568 if (be32_to_cpu(node->hdr.info.back) != addblk->blkno) {
569 xfs_buf_corruption_error(oldblk->bp);
570 error = -EFSCORRUPTED;
573 node = addblk->bp->b_addr;
574 node->hdr.info.forw = cpu_to_be32(oldblk->blkno);
575 xfs_trans_log_buf(state->args->trans, addblk->bp,
576 XFS_DA_LOGRANGE(node, &node->hdr.info,
577 sizeof(node->hdr.info)));
585 * Split the root. We have to create a new root and point to the two
586 * parts (the split old root) that we just created. Copy block zero to
587 * the EOF, extending the inode in process.
589 STATIC int /* error */
591 struct xfs_da_state *state,
592 struct xfs_da_state_blk *blk1,
593 struct xfs_da_state_blk *blk2)
595 struct xfs_da_intnode *node;
596 struct xfs_da_intnode *oldroot;
597 struct xfs_da_node_entry *btree;
598 struct xfs_da3_icnode_hdr nodehdr;
599 struct xfs_da_args *args;
601 struct xfs_inode *dp;
602 struct xfs_trans *tp;
603 struct xfs_dir2_leaf *leaf;
609 trace_xfs_da_root_split(state->args);
612 * Copy the existing (incorrect) block from the root node position
613 * to a free space somewhere.
616 error = xfs_da_grow_inode(args, &blkno);
622 error = xfs_da_get_buf(tp, dp, blkno, -1, &bp, args->whichfork);
626 oldroot = blk1->bp->b_addr;
627 if (oldroot->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC) ||
628 oldroot->hdr.info.magic == cpu_to_be16(XFS_DA3_NODE_MAGIC)) {
629 struct xfs_da3_icnode_hdr icnodehdr;
631 xfs_da3_node_hdr_from_disk(dp->i_mount, &icnodehdr, oldroot);
632 btree = icnodehdr.btree;
633 size = (int)((char *)&btree[icnodehdr.count] - (char *)oldroot);
634 level = icnodehdr.level;
637 * we are about to copy oldroot to bp, so set up the type
638 * of bp while we know exactly what it will be.
640 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DA_NODE_BUF);
642 struct xfs_dir3_icleaf_hdr leafhdr;
644 leaf = (xfs_dir2_leaf_t *)oldroot;
645 xfs_dir2_leaf_hdr_from_disk(dp->i_mount, &leafhdr, leaf);
647 ASSERT(leafhdr.magic == XFS_DIR2_LEAFN_MAGIC ||
648 leafhdr.magic == XFS_DIR3_LEAFN_MAGIC);
649 size = (int)((char *)&leafhdr.ents[leafhdr.count] -
654 * we are about to copy oldroot to bp, so set up the type
655 * of bp while we know exactly what it will be.
657 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DIR_LEAFN_BUF);
661 * we can copy most of the information in the node from one block to
662 * another, but for CRC enabled headers we have to make sure that the
663 * block specific identifiers are kept intact. We update the buffer
666 memcpy(node, oldroot, size);
667 if (oldroot->hdr.info.magic == cpu_to_be16(XFS_DA3_NODE_MAGIC) ||
668 oldroot->hdr.info.magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC)) {
669 struct xfs_da3_intnode *node3 = (struct xfs_da3_intnode *)node;
671 node3->hdr.info.blkno = cpu_to_be64(bp->b_bn);
673 xfs_trans_log_buf(tp, bp, 0, size - 1);
675 bp->b_ops = blk1->bp->b_ops;
676 xfs_trans_buf_copy_type(bp, blk1->bp);
681 * Set up the new root node.
683 error = xfs_da3_node_create(args,
684 (args->whichfork == XFS_DATA_FORK) ? args->geo->leafblk : 0,
685 level + 1, &bp, args->whichfork);
690 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr, node);
691 btree = nodehdr.btree;
692 btree[0].hashval = cpu_to_be32(blk1->hashval);
693 btree[0].before = cpu_to_be32(blk1->blkno);
694 btree[1].hashval = cpu_to_be32(blk2->hashval);
695 btree[1].before = cpu_to_be32(blk2->blkno);
697 xfs_da3_node_hdr_to_disk(dp->i_mount, node, &nodehdr);
700 if (oldroot->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) ||
701 oldroot->hdr.info.magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC)) {
702 ASSERT(blk1->blkno >= args->geo->leafblk &&
703 blk1->blkno < args->geo->freeblk);
704 ASSERT(blk2->blkno >= args->geo->leafblk &&
705 blk2->blkno < args->geo->freeblk);
709 /* Header is already logged by xfs_da_node_create */
710 xfs_trans_log_buf(tp, bp,
711 XFS_DA_LOGRANGE(node, btree, sizeof(xfs_da_node_entry_t) * 2));
717 * Split the node, rebalance, then add the new entry.
719 STATIC int /* error */
721 struct xfs_da_state *state,
722 struct xfs_da_state_blk *oldblk,
723 struct xfs_da_state_blk *newblk,
724 struct xfs_da_state_blk *addblk,
728 struct xfs_da_intnode *node;
729 struct xfs_da3_icnode_hdr nodehdr;
734 struct xfs_inode *dp = state->args->dp;
736 trace_xfs_da_node_split(state->args);
738 node = oldblk->bp->b_addr;
739 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr, node);
742 * With V2 dirs the extra block is data or freespace.
744 useextra = state->extravalid && state->args->whichfork == XFS_ATTR_FORK;
745 newcount = 1 + useextra;
747 * Do we have to split the node?
749 if (nodehdr.count + newcount > state->args->geo->node_ents) {
751 * Allocate a new node, add to the doubly linked chain of
752 * nodes, then move some of our excess entries into it.
754 error = xfs_da_grow_inode(state->args, &blkno);
756 return error; /* GROT: dir is inconsistent */
758 error = xfs_da3_node_create(state->args, blkno, treelevel,
759 &newblk->bp, state->args->whichfork);
761 return error; /* GROT: dir is inconsistent */
762 newblk->blkno = blkno;
763 newblk->magic = XFS_DA_NODE_MAGIC;
764 xfs_da3_node_rebalance(state, oldblk, newblk);
765 error = xfs_da3_blk_link(state, oldblk, newblk);
774 * Insert the new entry(s) into the correct block
775 * (updating last hashval in the process).
777 * xfs_da3_node_add() inserts BEFORE the given index,
778 * and as a result of using node_lookup_int() we always
779 * point to a valid entry (not after one), but a split
780 * operation always results in a new block whose hashvals
781 * FOLLOW the current block.
783 * If we had double-split op below us, then add the extra block too.
785 node = oldblk->bp->b_addr;
786 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr, node);
787 if (oldblk->index <= nodehdr.count) {
789 xfs_da3_node_add(state, oldblk, addblk);
791 if (state->extraafter)
793 xfs_da3_node_add(state, oldblk, &state->extrablk);
794 state->extravalid = 0;
798 xfs_da3_node_add(state, newblk, addblk);
800 if (state->extraafter)
802 xfs_da3_node_add(state, newblk, &state->extrablk);
803 state->extravalid = 0;
811 * Balance the btree elements between two intermediate nodes,
812 * usually one full and one empty.
814 * NOTE: if blk2 is empty, then it will get the upper half of blk1.
817 xfs_da3_node_rebalance(
818 struct xfs_da_state *state,
819 struct xfs_da_state_blk *blk1,
820 struct xfs_da_state_blk *blk2)
822 struct xfs_da_intnode *node1;
823 struct xfs_da_intnode *node2;
824 struct xfs_da_intnode *tmpnode;
825 struct xfs_da_node_entry *btree1;
826 struct xfs_da_node_entry *btree2;
827 struct xfs_da_node_entry *btree_s;
828 struct xfs_da_node_entry *btree_d;
829 struct xfs_da3_icnode_hdr nodehdr1;
830 struct xfs_da3_icnode_hdr nodehdr2;
831 struct xfs_trans *tp;
835 struct xfs_inode *dp = state->args->dp;
837 trace_xfs_da_node_rebalance(state->args);
839 node1 = blk1->bp->b_addr;
840 node2 = blk2->bp->b_addr;
841 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr1, node1);
842 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr2, node2);
843 btree1 = nodehdr1.btree;
844 btree2 = nodehdr2.btree;
847 * Figure out how many entries need to move, and in which direction.
848 * Swap the nodes around if that makes it simpler.
850 if (nodehdr1.count > 0 && nodehdr2.count > 0 &&
851 ((be32_to_cpu(btree2[0].hashval) < be32_to_cpu(btree1[0].hashval)) ||
852 (be32_to_cpu(btree2[nodehdr2.count - 1].hashval) <
853 be32_to_cpu(btree1[nodehdr1.count - 1].hashval)))) {
857 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr1, node1);
858 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr2, node2);
859 btree1 = nodehdr1.btree;
860 btree2 = nodehdr2.btree;
864 count = (nodehdr1.count - nodehdr2.count) / 2;
867 tp = state->args->trans;
869 * Two cases: high-to-low and low-to-high.
873 * Move elements in node2 up to make a hole.
875 tmp = nodehdr2.count;
877 tmp *= (uint)sizeof(xfs_da_node_entry_t);
878 btree_s = &btree2[0];
879 btree_d = &btree2[count];
880 memmove(btree_d, btree_s, tmp);
884 * Move the req'd B-tree elements from high in node1 to
887 nodehdr2.count += count;
888 tmp = count * (uint)sizeof(xfs_da_node_entry_t);
889 btree_s = &btree1[nodehdr1.count - count];
890 btree_d = &btree2[0];
891 memcpy(btree_d, btree_s, tmp);
892 nodehdr1.count -= count;
895 * Move the req'd B-tree elements from low in node2 to
899 tmp = count * (uint)sizeof(xfs_da_node_entry_t);
900 btree_s = &btree2[0];
901 btree_d = &btree1[nodehdr1.count];
902 memcpy(btree_d, btree_s, tmp);
903 nodehdr1.count += count;
905 xfs_trans_log_buf(tp, blk1->bp,
906 XFS_DA_LOGRANGE(node1, btree_d, tmp));
909 * Move elements in node2 down to fill the hole.
911 tmp = nodehdr2.count - count;
912 tmp *= (uint)sizeof(xfs_da_node_entry_t);
913 btree_s = &btree2[count];
914 btree_d = &btree2[0];
915 memmove(btree_d, btree_s, tmp);
916 nodehdr2.count -= count;
920 * Log header of node 1 and all current bits of node 2.
922 xfs_da3_node_hdr_to_disk(dp->i_mount, node1, &nodehdr1);
923 xfs_trans_log_buf(tp, blk1->bp,
924 XFS_DA_LOGRANGE(node1, &node1->hdr,
925 state->args->geo->node_hdr_size));
927 xfs_da3_node_hdr_to_disk(dp->i_mount, node2, &nodehdr2);
928 xfs_trans_log_buf(tp, blk2->bp,
929 XFS_DA_LOGRANGE(node2, &node2->hdr,
930 state->args->geo->node_hdr_size +
931 (sizeof(btree2[0]) * nodehdr2.count)));
934 * Record the last hashval from each block for upward propagation.
935 * (note: don't use the swapped node pointers)
938 node1 = blk1->bp->b_addr;
939 node2 = blk2->bp->b_addr;
940 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr1, node1);
941 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr2, node2);
942 btree1 = nodehdr1.btree;
943 btree2 = nodehdr2.btree;
945 blk1->hashval = be32_to_cpu(btree1[nodehdr1.count - 1].hashval);
946 blk2->hashval = be32_to_cpu(btree2[nodehdr2.count - 1].hashval);
949 * Adjust the expected index for insertion.
951 if (blk1->index >= nodehdr1.count) {
952 blk2->index = blk1->index - nodehdr1.count;
953 blk1->index = nodehdr1.count + 1; /* make it invalid */
958 * Add a new entry to an intermediate node.
962 struct xfs_da_state *state,
963 struct xfs_da_state_blk *oldblk,
964 struct xfs_da_state_blk *newblk)
966 struct xfs_da_intnode *node;
967 struct xfs_da3_icnode_hdr nodehdr;
968 struct xfs_da_node_entry *btree;
970 struct xfs_inode *dp = state->args->dp;
972 trace_xfs_da_node_add(state->args);
974 node = oldblk->bp->b_addr;
975 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr, node);
976 btree = nodehdr.btree;
978 ASSERT(oldblk->index >= 0 && oldblk->index <= nodehdr.count);
979 ASSERT(newblk->blkno != 0);
980 if (state->args->whichfork == XFS_DATA_FORK)
981 ASSERT(newblk->blkno >= state->args->geo->leafblk &&
982 newblk->blkno < state->args->geo->freeblk);
985 * We may need to make some room before we insert the new node.
988 if (oldblk->index < nodehdr.count) {
989 tmp = (nodehdr.count - oldblk->index) * (uint)sizeof(*btree);
990 memmove(&btree[oldblk->index + 1], &btree[oldblk->index], tmp);
992 btree[oldblk->index].hashval = cpu_to_be32(newblk->hashval);
993 btree[oldblk->index].before = cpu_to_be32(newblk->blkno);
994 xfs_trans_log_buf(state->args->trans, oldblk->bp,
995 XFS_DA_LOGRANGE(node, &btree[oldblk->index],
996 tmp + sizeof(*btree)));
999 xfs_da3_node_hdr_to_disk(dp->i_mount, node, &nodehdr);
1000 xfs_trans_log_buf(state->args->trans, oldblk->bp,
1001 XFS_DA_LOGRANGE(node, &node->hdr,
1002 state->args->geo->node_hdr_size));
1005 * Copy the last hash value from the oldblk to propagate upwards.
1007 oldblk->hashval = be32_to_cpu(btree[nodehdr.count - 1].hashval);
1010 /*========================================================================
1011 * Routines used for shrinking the Btree.
1012 *========================================================================*/
1015 * Deallocate an empty leaf node, remove it from its parent,
1016 * possibly deallocating that block, etc...
1020 struct xfs_da_state *state)
1022 struct xfs_da_state_blk *drop_blk;
1023 struct xfs_da_state_blk *save_blk;
1027 trace_xfs_da_join(state->args);
1029 drop_blk = &state->path.blk[ state->path.active-1 ];
1030 save_blk = &state->altpath.blk[ state->path.active-1 ];
1031 ASSERT(state->path.blk[0].magic == XFS_DA_NODE_MAGIC);
1032 ASSERT(drop_blk->magic == XFS_ATTR_LEAF_MAGIC ||
1033 drop_blk->magic == XFS_DIR2_LEAFN_MAGIC);
1036 * Walk back up the tree joining/deallocating as necessary.
1037 * When we stop dropping blocks, break out.
1039 for ( ; state->path.active >= 2; drop_blk--, save_blk--,
1040 state->path.active--) {
1042 * See if we can combine the block with a neighbor.
1043 * (action == 0) => no options, just leave
1044 * (action == 1) => coalesce, then unlink
1045 * (action == 2) => block empty, unlink it
1047 switch (drop_blk->magic) {
1048 case XFS_ATTR_LEAF_MAGIC:
1049 error = xfs_attr3_leaf_toosmall(state, &action);
1054 xfs_attr3_leaf_unbalance(state, drop_blk, save_blk);
1056 case XFS_DIR2_LEAFN_MAGIC:
1057 error = xfs_dir2_leafn_toosmall(state, &action);
1062 xfs_dir2_leafn_unbalance(state, drop_blk, save_blk);
1064 case XFS_DA_NODE_MAGIC:
1066 * Remove the offending node, fixup hashvals,
1067 * check for a toosmall neighbor.
1069 xfs_da3_node_remove(state, drop_blk);
1070 xfs_da3_fixhashpath(state, &state->path);
1071 error = xfs_da3_node_toosmall(state, &action);
1076 xfs_da3_node_unbalance(state, drop_blk, save_blk);
1079 xfs_da3_fixhashpath(state, &state->altpath);
1080 error = xfs_da3_blk_unlink(state, drop_blk, save_blk);
1081 xfs_da_state_kill_altpath(state);
1084 error = xfs_da_shrink_inode(state->args, drop_blk->blkno,
1086 drop_blk->bp = NULL;
1091 * We joined all the way to the top. If it turns out that
1092 * we only have one entry in the root, make the child block
1095 xfs_da3_node_remove(state, drop_blk);
1096 xfs_da3_fixhashpath(state, &state->path);
1097 error = xfs_da3_root_join(state, &state->path.blk[0]);
1103 xfs_da_blkinfo_onlychild_validate(struct xfs_da_blkinfo *blkinfo, __u16 level)
1105 __be16 magic = blkinfo->magic;
1108 ASSERT(magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) ||
1109 magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC) ||
1110 magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC) ||
1111 magic == cpu_to_be16(XFS_ATTR3_LEAF_MAGIC));
1113 ASSERT(magic == cpu_to_be16(XFS_DA_NODE_MAGIC) ||
1114 magic == cpu_to_be16(XFS_DA3_NODE_MAGIC));
1116 ASSERT(!blkinfo->forw);
1117 ASSERT(!blkinfo->back);
1120 #define xfs_da_blkinfo_onlychild_validate(blkinfo, level)
1124 * We have only one entry in the root. Copy the only remaining child of
1125 * the old root to block 0 as the new root node.
1129 struct xfs_da_state *state,
1130 struct xfs_da_state_blk *root_blk)
1132 struct xfs_da_intnode *oldroot;
1133 struct xfs_da_args *args;
1136 struct xfs_da3_icnode_hdr oldroothdr;
1138 struct xfs_inode *dp = state->args->dp;
1140 trace_xfs_da_root_join(state->args);
1142 ASSERT(root_blk->magic == XFS_DA_NODE_MAGIC);
1145 oldroot = root_blk->bp->b_addr;
1146 xfs_da3_node_hdr_from_disk(dp->i_mount, &oldroothdr, oldroot);
1147 ASSERT(oldroothdr.forw == 0);
1148 ASSERT(oldroothdr.back == 0);
1151 * If the root has more than one child, then don't do anything.
1153 if (oldroothdr.count > 1)
1157 * Read in the (only) child block, then copy those bytes into
1158 * the root block's buffer and free the original child block.
1160 child = be32_to_cpu(oldroothdr.btree[0].before);
1162 error = xfs_da3_node_read(args->trans, dp, child, -1, &bp,
1166 xfs_da_blkinfo_onlychild_validate(bp->b_addr, oldroothdr.level);
1169 * This could be copying a leaf back into the root block in the case of
1170 * there only being a single leaf block left in the tree. Hence we have
1171 * to update the b_ops pointer as well to match the buffer type change
1172 * that could occur. For dir3 blocks we also need to update the block
1173 * number in the buffer header.
1175 memcpy(root_blk->bp->b_addr, bp->b_addr, args->geo->blksize);
1176 root_blk->bp->b_ops = bp->b_ops;
1177 xfs_trans_buf_copy_type(root_blk->bp, bp);
1178 if (oldroothdr.magic == XFS_DA3_NODE_MAGIC) {
1179 struct xfs_da3_blkinfo *da3 = root_blk->bp->b_addr;
1180 da3->blkno = cpu_to_be64(root_blk->bp->b_bn);
1182 xfs_trans_log_buf(args->trans, root_blk->bp, 0,
1183 args->geo->blksize - 1);
1184 error = xfs_da_shrink_inode(args, child, bp);
1189 * Check a node block and its neighbors to see if the block should be
1190 * collapsed into one or the other neighbor. Always keep the block
1191 * with the smaller block number.
1192 * If the current block is over 50% full, don't try to join it, return 0.
1193 * If the block is empty, fill in the state structure and return 2.
1194 * If it can be collapsed, fill in the state structure and return 1.
1195 * If nothing can be done, return 0.
1198 xfs_da3_node_toosmall(
1199 struct xfs_da_state *state,
1202 struct xfs_da_intnode *node;
1203 struct xfs_da_state_blk *blk;
1204 struct xfs_da_blkinfo *info;
1207 struct xfs_da3_icnode_hdr nodehdr;
1213 struct xfs_inode *dp = state->args->dp;
1215 trace_xfs_da_node_toosmall(state->args);
1218 * Check for the degenerate case of the block being over 50% full.
1219 * If so, it's not worth even looking to see if we might be able
1220 * to coalesce with a sibling.
1222 blk = &state->path.blk[ state->path.active-1 ];
1223 info = blk->bp->b_addr;
1224 node = (xfs_da_intnode_t *)info;
1225 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr, node);
1226 if (nodehdr.count > (state->args->geo->node_ents >> 1)) {
1227 *action = 0; /* blk over 50%, don't try to join */
1228 return 0; /* blk over 50%, don't try to join */
1232 * Check for the degenerate case of the block being empty.
1233 * If the block is empty, we'll simply delete it, no need to
1234 * coalesce it with a sibling block. We choose (arbitrarily)
1235 * to merge with the forward block unless it is NULL.
1237 if (nodehdr.count == 0) {
1239 * Make altpath point to the block we want to keep and
1240 * path point to the block we want to drop (this one).
1242 forward = (info->forw != 0);
1243 memcpy(&state->altpath, &state->path, sizeof(state->path));
1244 error = xfs_da3_path_shift(state, &state->altpath, forward,
1257 * Examine each sibling block to see if we can coalesce with
1258 * at least 25% free space to spare. We need to figure out
1259 * whether to merge with the forward or the backward block.
1260 * We prefer coalescing with the lower numbered sibling so as
1261 * to shrink a directory over time.
1263 count = state->args->geo->node_ents;
1264 count -= state->args->geo->node_ents >> 2;
1265 count -= nodehdr.count;
1267 /* start with smaller blk num */
1268 forward = nodehdr.forw < nodehdr.back;
1269 for (i = 0; i < 2; forward = !forward, i++) {
1270 struct xfs_da3_icnode_hdr thdr;
1272 blkno = nodehdr.forw;
1274 blkno = nodehdr.back;
1277 error = xfs_da3_node_read(state->args->trans, dp,
1278 blkno, -1, &bp, state->args->whichfork);
1283 xfs_da3_node_hdr_from_disk(dp->i_mount, &thdr, node);
1284 xfs_trans_brelse(state->args->trans, bp);
1286 if (count - thdr.count >= 0)
1287 break; /* fits with at least 25% to spare */
1295 * Make altpath point to the block we want to keep (the lower
1296 * numbered block) and path point to the block we want to drop.
1298 memcpy(&state->altpath, &state->path, sizeof(state->path));
1299 if (blkno < blk->blkno) {
1300 error = xfs_da3_path_shift(state, &state->altpath, forward,
1303 error = xfs_da3_path_shift(state, &state->path, forward,
1317 * Pick up the last hashvalue from an intermediate node.
1320 xfs_da3_node_lasthash(
1321 struct xfs_inode *dp,
1325 struct xfs_da3_icnode_hdr nodehdr;
1327 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr, bp->b_addr);
1329 *count = nodehdr.count;
1332 return be32_to_cpu(nodehdr.btree[nodehdr.count - 1].hashval);
1336 * Walk back up the tree adjusting hash values as necessary,
1337 * when we stop making changes, return.
1340 xfs_da3_fixhashpath(
1341 struct xfs_da_state *state,
1342 struct xfs_da_state_path *path)
1344 struct xfs_da_state_blk *blk;
1345 struct xfs_da_intnode *node;
1346 struct xfs_da_node_entry *btree;
1347 xfs_dahash_t lasthash=0;
1350 struct xfs_inode *dp = state->args->dp;
1352 trace_xfs_da_fixhashpath(state->args);
1354 level = path->active-1;
1355 blk = &path->blk[ level ];
1356 switch (blk->magic) {
1357 case XFS_ATTR_LEAF_MAGIC:
1358 lasthash = xfs_attr_leaf_lasthash(blk->bp, &count);
1362 case XFS_DIR2_LEAFN_MAGIC:
1363 lasthash = xfs_dir2_leaf_lasthash(dp, blk->bp, &count);
1367 case XFS_DA_NODE_MAGIC:
1368 lasthash = xfs_da3_node_lasthash(dp, blk->bp, &count);
1373 for (blk--, level--; level >= 0; blk--, level--) {
1374 struct xfs_da3_icnode_hdr nodehdr;
1376 node = blk->bp->b_addr;
1377 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr, node);
1378 btree = nodehdr.btree;
1379 if (be32_to_cpu(btree[blk->index].hashval) == lasthash)
1381 blk->hashval = lasthash;
1382 btree[blk->index].hashval = cpu_to_be32(lasthash);
1383 xfs_trans_log_buf(state->args->trans, blk->bp,
1384 XFS_DA_LOGRANGE(node, &btree[blk->index],
1387 lasthash = be32_to_cpu(btree[nodehdr.count - 1].hashval);
1392 * Remove an entry from an intermediate node.
1395 xfs_da3_node_remove(
1396 struct xfs_da_state *state,
1397 struct xfs_da_state_blk *drop_blk)
1399 struct xfs_da_intnode *node;
1400 struct xfs_da3_icnode_hdr nodehdr;
1401 struct xfs_da_node_entry *btree;
1404 struct xfs_inode *dp = state->args->dp;
1406 trace_xfs_da_node_remove(state->args);
1408 node = drop_blk->bp->b_addr;
1409 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr, node);
1410 ASSERT(drop_blk->index < nodehdr.count);
1411 ASSERT(drop_blk->index >= 0);
1414 * Copy over the offending entry, or just zero it out.
1416 index = drop_blk->index;
1417 btree = nodehdr.btree;
1418 if (index < nodehdr.count - 1) {
1419 tmp = nodehdr.count - index - 1;
1420 tmp *= (uint)sizeof(xfs_da_node_entry_t);
1421 memmove(&btree[index], &btree[index + 1], tmp);
1422 xfs_trans_log_buf(state->args->trans, drop_blk->bp,
1423 XFS_DA_LOGRANGE(node, &btree[index], tmp));
1424 index = nodehdr.count - 1;
1426 memset(&btree[index], 0, sizeof(xfs_da_node_entry_t));
1427 xfs_trans_log_buf(state->args->trans, drop_blk->bp,
1428 XFS_DA_LOGRANGE(node, &btree[index], sizeof(btree[index])));
1430 xfs_da3_node_hdr_to_disk(dp->i_mount, node, &nodehdr);
1431 xfs_trans_log_buf(state->args->trans, drop_blk->bp,
1432 XFS_DA_LOGRANGE(node, &node->hdr, state->args->geo->node_hdr_size));
1435 * Copy the last hash value from the block to propagate upwards.
1437 drop_blk->hashval = be32_to_cpu(btree[index - 1].hashval);
1441 * Unbalance the elements between two intermediate nodes,
1442 * move all Btree elements from one node into another.
1445 xfs_da3_node_unbalance(
1446 struct xfs_da_state *state,
1447 struct xfs_da_state_blk *drop_blk,
1448 struct xfs_da_state_blk *save_blk)
1450 struct xfs_da_intnode *drop_node;
1451 struct xfs_da_intnode *save_node;
1452 struct xfs_da_node_entry *drop_btree;
1453 struct xfs_da_node_entry *save_btree;
1454 struct xfs_da3_icnode_hdr drop_hdr;
1455 struct xfs_da3_icnode_hdr save_hdr;
1456 struct xfs_trans *tp;
1459 struct xfs_inode *dp = state->args->dp;
1461 trace_xfs_da_node_unbalance(state->args);
1463 drop_node = drop_blk->bp->b_addr;
1464 save_node = save_blk->bp->b_addr;
1465 xfs_da3_node_hdr_from_disk(dp->i_mount, &drop_hdr, drop_node);
1466 xfs_da3_node_hdr_from_disk(dp->i_mount, &save_hdr, save_node);
1467 drop_btree = drop_hdr.btree;
1468 save_btree = save_hdr.btree;
1469 tp = state->args->trans;
1472 * If the dying block has lower hashvals, then move all the
1473 * elements in the remaining block up to make a hole.
1475 if ((be32_to_cpu(drop_btree[0].hashval) <
1476 be32_to_cpu(save_btree[0].hashval)) ||
1477 (be32_to_cpu(drop_btree[drop_hdr.count - 1].hashval) <
1478 be32_to_cpu(save_btree[save_hdr.count - 1].hashval))) {
1479 /* XXX: check this - is memmove dst correct? */
1480 tmp = save_hdr.count * sizeof(xfs_da_node_entry_t);
1481 memmove(&save_btree[drop_hdr.count], &save_btree[0], tmp);
1484 xfs_trans_log_buf(tp, save_blk->bp,
1485 XFS_DA_LOGRANGE(save_node, &save_btree[0],
1486 (save_hdr.count + drop_hdr.count) *
1487 sizeof(xfs_da_node_entry_t)));
1489 sindex = save_hdr.count;
1490 xfs_trans_log_buf(tp, save_blk->bp,
1491 XFS_DA_LOGRANGE(save_node, &save_btree[sindex],
1492 drop_hdr.count * sizeof(xfs_da_node_entry_t)));
1496 * Move all the B-tree elements from drop_blk to save_blk.
1498 tmp = drop_hdr.count * (uint)sizeof(xfs_da_node_entry_t);
1499 memcpy(&save_btree[sindex], &drop_btree[0], tmp);
1500 save_hdr.count += drop_hdr.count;
1502 xfs_da3_node_hdr_to_disk(dp->i_mount, save_node, &save_hdr);
1503 xfs_trans_log_buf(tp, save_blk->bp,
1504 XFS_DA_LOGRANGE(save_node, &save_node->hdr,
1505 state->args->geo->node_hdr_size));
1508 * Save the last hashval in the remaining block for upward propagation.
1510 save_blk->hashval = be32_to_cpu(save_btree[save_hdr.count - 1].hashval);
1513 /*========================================================================
1514 * Routines used for finding things in the Btree.
1515 *========================================================================*/
1518 * Walk down the Btree looking for a particular filename, filling
1519 * in the state structure as we go.
1521 * We will set the state structure to point to each of the elements
1522 * in each of the nodes where either the hashval is or should be.
1524 * We support duplicate hashval's so for each entry in the current
1525 * node that could contain the desired hashval, descend. This is a
1526 * pruned depth-first tree search.
1529 xfs_da3_node_lookup_int(
1530 struct xfs_da_state *state,
1533 struct xfs_da_state_blk *blk;
1534 struct xfs_da_blkinfo *curr;
1535 struct xfs_da_intnode *node;
1536 struct xfs_da_node_entry *btree;
1537 struct xfs_da3_icnode_hdr nodehdr;
1538 struct xfs_da_args *args;
1540 xfs_dahash_t hashval;
1541 xfs_dahash_t btreehashval;
1547 unsigned int expected_level = 0;
1549 struct xfs_inode *dp = state->args->dp;
1554 * Descend thru the B-tree searching each level for the right
1555 * node to use, until the right hashval is found.
1557 blkno = args->geo->leafblk;
1558 for (blk = &state->path.blk[0], state->path.active = 1;
1559 state->path.active <= XFS_DA_NODE_MAXDEPTH;
1560 blk++, state->path.active++) {
1562 * Read the next node down in the tree.
1565 error = xfs_da3_node_read(args->trans, args->dp, blkno,
1566 -1, &blk->bp, args->whichfork);
1569 state->path.active--;
1572 curr = blk->bp->b_addr;
1573 magic = be16_to_cpu(curr->magic);
1575 if (magic == XFS_ATTR_LEAF_MAGIC ||
1576 magic == XFS_ATTR3_LEAF_MAGIC) {
1577 blk->magic = XFS_ATTR_LEAF_MAGIC;
1578 blk->hashval = xfs_attr_leaf_lasthash(blk->bp, NULL);
1582 if (magic == XFS_DIR2_LEAFN_MAGIC ||
1583 magic == XFS_DIR3_LEAFN_MAGIC) {
1584 blk->magic = XFS_DIR2_LEAFN_MAGIC;
1585 blk->hashval = xfs_dir2_leaf_lasthash(args->dp,
1590 if (magic != XFS_DA_NODE_MAGIC && magic != XFS_DA3_NODE_MAGIC) {
1591 xfs_buf_corruption_error(blk->bp);
1592 return -EFSCORRUPTED;
1595 blk->magic = XFS_DA_NODE_MAGIC;
1598 * Search an intermediate node for a match.
1600 node = blk->bp->b_addr;
1601 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr, node);
1602 btree = nodehdr.btree;
1604 /* Tree taller than we can handle; bail out! */
1605 if (nodehdr.level >= XFS_DA_NODE_MAXDEPTH) {
1606 xfs_buf_corruption_error(blk->bp);
1607 return -EFSCORRUPTED;
1610 /* Check the level from the root. */
1611 if (blkno == args->geo->leafblk)
1612 expected_level = nodehdr.level - 1;
1613 else if (expected_level != nodehdr.level) {
1614 xfs_buf_corruption_error(blk->bp);
1615 return -EFSCORRUPTED;
1619 max = nodehdr.count;
1620 blk->hashval = be32_to_cpu(btree[max - 1].hashval);
1623 * Binary search. (note: small blocks will skip loop)
1625 probe = span = max / 2;
1626 hashval = args->hashval;
1629 btreehashval = be32_to_cpu(btree[probe].hashval);
1630 if (btreehashval < hashval)
1632 else if (btreehashval > hashval)
1637 ASSERT((probe >= 0) && (probe < max));
1638 ASSERT((span <= 4) ||
1639 (be32_to_cpu(btree[probe].hashval) == hashval));
1642 * Since we may have duplicate hashval's, find the first
1643 * matching hashval in the node.
1646 be32_to_cpu(btree[probe].hashval) >= hashval) {
1649 while (probe < max &&
1650 be32_to_cpu(btree[probe].hashval) < hashval) {
1655 * Pick the right block to descend on.
1658 blk->index = max - 1;
1659 blkno = be32_to_cpu(btree[max - 1].before);
1662 blkno = be32_to_cpu(btree[probe].before);
1665 /* We can't point back to the root. */
1666 if (XFS_IS_CORRUPT(dp->i_mount, blkno == args->geo->leafblk))
1667 return -EFSCORRUPTED;
1670 if (XFS_IS_CORRUPT(dp->i_mount, expected_level != 0))
1671 return -EFSCORRUPTED;
1674 * A leaf block that ends in the hashval that we are interested in
1675 * (final hashval == search hashval) means that the next block may
1676 * contain more entries with the same hashval, shift upward to the
1677 * next leaf and keep searching.
1680 if (blk->magic == XFS_DIR2_LEAFN_MAGIC) {
1681 retval = xfs_dir2_leafn_lookup_int(blk->bp, args,
1682 &blk->index, state);
1683 } else if (blk->magic == XFS_ATTR_LEAF_MAGIC) {
1684 retval = xfs_attr3_leaf_lookup_int(blk->bp, args);
1685 blk->index = args->index;
1686 args->blkno = blk->blkno;
1689 return -EFSCORRUPTED;
1691 if (((retval == -ENOENT) || (retval == -ENOATTR)) &&
1692 (blk->hashval == args->hashval)) {
1693 error = xfs_da3_path_shift(state, &state->path, 1, 1,
1699 } else if (blk->magic == XFS_ATTR_LEAF_MAGIC) {
1700 /* path_shift() gives ENOENT */
1710 /*========================================================================
1712 *========================================================================*/
1715 * Compare two intermediate nodes for "order".
1719 struct xfs_inode *dp,
1720 struct xfs_buf *node1_bp,
1721 struct xfs_buf *node2_bp)
1723 struct xfs_da_intnode *node1;
1724 struct xfs_da_intnode *node2;
1725 struct xfs_da_node_entry *btree1;
1726 struct xfs_da_node_entry *btree2;
1727 struct xfs_da3_icnode_hdr node1hdr;
1728 struct xfs_da3_icnode_hdr node2hdr;
1730 node1 = node1_bp->b_addr;
1731 node2 = node2_bp->b_addr;
1732 xfs_da3_node_hdr_from_disk(dp->i_mount, &node1hdr, node1);
1733 xfs_da3_node_hdr_from_disk(dp->i_mount, &node2hdr, node2);
1734 btree1 = node1hdr.btree;
1735 btree2 = node2hdr.btree;
1737 if (node1hdr.count > 0 && node2hdr.count > 0 &&
1738 ((be32_to_cpu(btree2[0].hashval) < be32_to_cpu(btree1[0].hashval)) ||
1739 (be32_to_cpu(btree2[node2hdr.count - 1].hashval) <
1740 be32_to_cpu(btree1[node1hdr.count - 1].hashval)))) {
1747 * Link a new block into a doubly linked list of blocks (of whatever type).
1751 struct xfs_da_state *state,
1752 struct xfs_da_state_blk *old_blk,
1753 struct xfs_da_state_blk *new_blk)
1755 struct xfs_da_blkinfo *old_info;
1756 struct xfs_da_blkinfo *new_info;
1757 struct xfs_da_blkinfo *tmp_info;
1758 struct xfs_da_args *args;
1762 struct xfs_inode *dp = state->args->dp;
1765 * Set up environment.
1768 ASSERT(args != NULL);
1769 old_info = old_blk->bp->b_addr;
1770 new_info = new_blk->bp->b_addr;
1771 ASSERT(old_blk->magic == XFS_DA_NODE_MAGIC ||
1772 old_blk->magic == XFS_DIR2_LEAFN_MAGIC ||
1773 old_blk->magic == XFS_ATTR_LEAF_MAGIC);
1775 switch (old_blk->magic) {
1776 case XFS_ATTR_LEAF_MAGIC:
1777 before = xfs_attr_leaf_order(old_blk->bp, new_blk->bp);
1779 case XFS_DIR2_LEAFN_MAGIC:
1780 before = xfs_dir2_leafn_order(dp, old_blk->bp, new_blk->bp);
1782 case XFS_DA_NODE_MAGIC:
1783 before = xfs_da3_node_order(dp, old_blk->bp, new_blk->bp);
1788 * Link blocks in appropriate order.
1792 * Link new block in before existing block.
1794 trace_xfs_da_link_before(args);
1795 new_info->forw = cpu_to_be32(old_blk->blkno);
1796 new_info->back = old_info->back;
1797 if (old_info->back) {
1798 error = xfs_da3_node_read(args->trans, dp,
1799 be32_to_cpu(old_info->back),
1800 -1, &bp, args->whichfork);
1804 tmp_info = bp->b_addr;
1805 ASSERT(tmp_info->magic == old_info->magic);
1806 ASSERT(be32_to_cpu(tmp_info->forw) == old_blk->blkno);
1807 tmp_info->forw = cpu_to_be32(new_blk->blkno);
1808 xfs_trans_log_buf(args->trans, bp, 0, sizeof(*tmp_info)-1);
1810 old_info->back = cpu_to_be32(new_blk->blkno);
1813 * Link new block in after existing block.
1815 trace_xfs_da_link_after(args);
1816 new_info->forw = old_info->forw;
1817 new_info->back = cpu_to_be32(old_blk->blkno);
1818 if (old_info->forw) {
1819 error = xfs_da3_node_read(args->trans, dp,
1820 be32_to_cpu(old_info->forw),
1821 -1, &bp, args->whichfork);
1825 tmp_info = bp->b_addr;
1826 ASSERT(tmp_info->magic == old_info->magic);
1827 ASSERT(be32_to_cpu(tmp_info->back) == old_blk->blkno);
1828 tmp_info->back = cpu_to_be32(new_blk->blkno);
1829 xfs_trans_log_buf(args->trans, bp, 0, sizeof(*tmp_info)-1);
1831 old_info->forw = cpu_to_be32(new_blk->blkno);
1834 xfs_trans_log_buf(args->trans, old_blk->bp, 0, sizeof(*tmp_info) - 1);
1835 xfs_trans_log_buf(args->trans, new_blk->bp, 0, sizeof(*tmp_info) - 1);
1840 * Unlink a block from a doubly linked list of blocks.
1842 STATIC int /* error */
1844 struct xfs_da_state *state,
1845 struct xfs_da_state_blk *drop_blk,
1846 struct xfs_da_state_blk *save_blk)
1848 struct xfs_da_blkinfo *drop_info;
1849 struct xfs_da_blkinfo *save_info;
1850 struct xfs_da_blkinfo *tmp_info;
1851 struct xfs_da_args *args;
1856 * Set up environment.
1859 ASSERT(args != NULL);
1860 save_info = save_blk->bp->b_addr;
1861 drop_info = drop_blk->bp->b_addr;
1862 ASSERT(save_blk->magic == XFS_DA_NODE_MAGIC ||
1863 save_blk->magic == XFS_DIR2_LEAFN_MAGIC ||
1864 save_blk->magic == XFS_ATTR_LEAF_MAGIC);
1865 ASSERT(save_blk->magic == drop_blk->magic);
1866 ASSERT((be32_to_cpu(save_info->forw) == drop_blk->blkno) ||
1867 (be32_to_cpu(save_info->back) == drop_blk->blkno));
1868 ASSERT((be32_to_cpu(drop_info->forw) == save_blk->blkno) ||
1869 (be32_to_cpu(drop_info->back) == save_blk->blkno));
1872 * Unlink the leaf block from the doubly linked chain of leaves.
1874 if (be32_to_cpu(save_info->back) == drop_blk->blkno) {
1875 trace_xfs_da_unlink_back(args);
1876 save_info->back = drop_info->back;
1877 if (drop_info->back) {
1878 error = xfs_da3_node_read(args->trans, args->dp,
1879 be32_to_cpu(drop_info->back),
1880 -1, &bp, args->whichfork);
1884 tmp_info = bp->b_addr;
1885 ASSERT(tmp_info->magic == save_info->magic);
1886 ASSERT(be32_to_cpu(tmp_info->forw) == drop_blk->blkno);
1887 tmp_info->forw = cpu_to_be32(save_blk->blkno);
1888 xfs_trans_log_buf(args->trans, bp, 0,
1889 sizeof(*tmp_info) - 1);
1892 trace_xfs_da_unlink_forward(args);
1893 save_info->forw = drop_info->forw;
1894 if (drop_info->forw) {
1895 error = xfs_da3_node_read(args->trans, args->dp,
1896 be32_to_cpu(drop_info->forw),
1897 -1, &bp, args->whichfork);
1901 tmp_info = bp->b_addr;
1902 ASSERT(tmp_info->magic == save_info->magic);
1903 ASSERT(be32_to_cpu(tmp_info->back) == drop_blk->blkno);
1904 tmp_info->back = cpu_to_be32(save_blk->blkno);
1905 xfs_trans_log_buf(args->trans, bp, 0,
1906 sizeof(*tmp_info) - 1);
1910 xfs_trans_log_buf(args->trans, save_blk->bp, 0, sizeof(*save_info) - 1);
1915 * Move a path "forward" or "!forward" one block at the current level.
1917 * This routine will adjust a "path" to point to the next block
1918 * "forward" (higher hashvalues) or "!forward" (lower hashvals) in the
1919 * Btree, including updating pointers to the intermediate nodes between
1920 * the new bottom and the root.
1924 struct xfs_da_state *state,
1925 struct xfs_da_state_path *path,
1930 struct xfs_da_state_blk *blk;
1931 struct xfs_da_blkinfo *info;
1932 struct xfs_da_args *args;
1933 struct xfs_da_node_entry *btree;
1934 struct xfs_da3_icnode_hdr nodehdr;
1936 xfs_dablk_t blkno = 0;
1939 struct xfs_inode *dp = state->args->dp;
1941 trace_xfs_da_path_shift(state->args);
1944 * Roll up the Btree looking for the first block where our
1945 * current index is not at the edge of the block. Note that
1946 * we skip the bottom layer because we want the sibling block.
1949 ASSERT(args != NULL);
1950 ASSERT(path != NULL);
1951 ASSERT((path->active > 0) && (path->active < XFS_DA_NODE_MAXDEPTH));
1952 level = (path->active-1) - 1; /* skip bottom layer in path */
1953 for (blk = &path->blk[level]; level >= 0; blk--, level--) {
1954 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr,
1957 if (forward && (blk->index < nodehdr.count - 1)) {
1959 blkno = be32_to_cpu(nodehdr.btree[blk->index].before);
1961 } else if (!forward && (blk->index > 0)) {
1963 blkno = be32_to_cpu(nodehdr.btree[blk->index].before);
1968 *result = -ENOENT; /* we're out of our tree */
1969 ASSERT(args->op_flags & XFS_DA_OP_OKNOENT);
1974 * Roll down the edge of the subtree until we reach the
1975 * same depth we were at originally.
1977 for (blk++, level++; level < path->active; blk++, level++) {
1979 * Read the next child block into a local buffer.
1981 error = xfs_da3_node_read(args->trans, dp, blkno, -1, &bp,
1987 * Release the old block (if it's dirty, the trans doesn't
1988 * actually let go) and swap the local buffer into the path
1989 * structure. This ensures failure of the above read doesn't set
1990 * a NULL buffer in an active slot in the path.
1993 xfs_trans_brelse(args->trans, blk->bp);
1997 info = blk->bp->b_addr;
1998 ASSERT(info->magic == cpu_to_be16(XFS_DA_NODE_MAGIC) ||
1999 info->magic == cpu_to_be16(XFS_DA3_NODE_MAGIC) ||
2000 info->magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) ||
2001 info->magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC) ||
2002 info->magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC) ||
2003 info->magic == cpu_to_be16(XFS_ATTR3_LEAF_MAGIC));
2007 * Note: we flatten the magic number to a single type so we
2008 * don't have to compare against crc/non-crc types elsewhere.
2010 switch (be16_to_cpu(info->magic)) {
2011 case XFS_DA_NODE_MAGIC:
2012 case XFS_DA3_NODE_MAGIC:
2013 blk->magic = XFS_DA_NODE_MAGIC;
2014 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr,
2016 btree = nodehdr.btree;
2017 blk->hashval = be32_to_cpu(btree[nodehdr.count - 1].hashval);
2021 blk->index = nodehdr.count - 1;
2022 blkno = be32_to_cpu(btree[blk->index].before);
2024 case XFS_ATTR_LEAF_MAGIC:
2025 case XFS_ATTR3_LEAF_MAGIC:
2026 blk->magic = XFS_ATTR_LEAF_MAGIC;
2027 ASSERT(level == path->active-1);
2029 blk->hashval = xfs_attr_leaf_lasthash(blk->bp, NULL);
2031 case XFS_DIR2_LEAFN_MAGIC:
2032 case XFS_DIR3_LEAFN_MAGIC:
2033 blk->magic = XFS_DIR2_LEAFN_MAGIC;
2034 ASSERT(level == path->active-1);
2036 blk->hashval = xfs_dir2_leaf_lasthash(args->dp,
2049 /*========================================================================
2051 *========================================================================*/
2054 * Implement a simple hash on a character string.
2055 * Rotate the hash value by 7 bits, then XOR each character in.
2056 * This is implemented with some source-level loop unrolling.
2059 xfs_da_hashname(const uint8_t *name, int namelen)
2064 * Do four characters at a time as long as we can.
2066 for (hash = 0; namelen >= 4; namelen -= 4, name += 4)
2067 hash = (name[0] << 21) ^ (name[1] << 14) ^ (name[2] << 7) ^
2068 (name[3] << 0) ^ rol32(hash, 7 * 4);
2071 * Now do the rest of the characters.
2075 return (name[0] << 14) ^ (name[1] << 7) ^ (name[2] << 0) ^
2078 return (name[0] << 7) ^ (name[1] << 0) ^ rol32(hash, 7 * 2);
2080 return (name[0] << 0) ^ rol32(hash, 7 * 1);
2081 default: /* case 0: */
2088 struct xfs_da_args *args,
2089 const unsigned char *name,
2092 return (args->namelen == len && memcmp(args->name, name, len) == 0) ?
2093 XFS_CMP_EXACT : XFS_CMP_DIFFERENT;
2097 xfs_default_hashname(
2098 struct xfs_name *name)
2100 return xfs_da_hashname(name->name, name->len);
2103 const struct xfs_nameops xfs_default_nameops = {
2104 .hashname = xfs_default_hashname,
2105 .compname = xfs_da_compname
2109 xfs_da_grow_inode_int(
2110 struct xfs_da_args *args,
2114 struct xfs_trans *tp = args->trans;
2115 struct xfs_inode *dp = args->dp;
2116 int w = args->whichfork;
2117 xfs_rfsblock_t nblks = dp->i_d.di_nblocks;
2118 struct xfs_bmbt_irec map, *mapp;
2119 int nmap, error, got, i, mapi;
2122 * Find a spot in the file space to put the new block.
2124 error = xfs_bmap_first_unused(tp, dp, count, bno, w);
2129 * Try mapping it in one filesystem block.
2132 error = xfs_bmapi_write(tp, dp, *bno, count,
2133 xfs_bmapi_aflag(w)|XFS_BMAPI_METADATA|XFS_BMAPI_CONTIG,
2134 args->total, &map, &nmap);
2142 } else if (nmap == 0 && count > 1) {
2147 * If we didn't get it and the block might work if fragmented,
2148 * try without the CONTIG flag. Loop until we get it all.
2150 mapp = kmem_alloc(sizeof(*mapp) * count, 0);
2151 for (b = *bno, mapi = 0; b < *bno + count; ) {
2152 nmap = min(XFS_BMAP_MAX_NMAP, count);
2153 c = (int)(*bno + count - b);
2154 error = xfs_bmapi_write(tp, dp, b, c,
2155 xfs_bmapi_aflag(w)|XFS_BMAPI_METADATA,
2156 args->total, &mapp[mapi], &nmap);
2162 b = mapp[mapi - 1].br_startoff +
2163 mapp[mapi - 1].br_blockcount;
2171 * Count the blocks we got, make sure it matches the total.
2173 for (i = 0, got = 0; i < mapi; i++)
2174 got += mapp[i].br_blockcount;
2175 if (got != count || mapp[0].br_startoff != *bno ||
2176 mapp[mapi - 1].br_startoff + mapp[mapi - 1].br_blockcount !=
2182 /* account for newly allocated blocks in reserved blocks total */
2183 args->total -= dp->i_d.di_nblocks - nblks;
2192 * Add a block to the btree ahead of the file.
2193 * Return the new block number to the caller.
2197 struct xfs_da_args *args,
2198 xfs_dablk_t *new_blkno)
2203 trace_xfs_da_grow_inode(args);
2205 bno = args->geo->leafblk;
2206 error = xfs_da_grow_inode_int(args, &bno, args->geo->fsbcount);
2208 *new_blkno = (xfs_dablk_t)bno;
2213 * Ick. We need to always be able to remove a btree block, even
2214 * if there's no space reservation because the filesystem is full.
2215 * This is called if xfs_bunmapi on a btree block fails due to ENOSPC.
2216 * It swaps the target block with the last block in the file. The
2217 * last block in the file can always be removed since it can't cause
2218 * a bmap btree split to do that.
2221 xfs_da3_swap_lastblock(
2222 struct xfs_da_args *args,
2223 xfs_dablk_t *dead_blknop,
2224 struct xfs_buf **dead_bufp)
2226 struct xfs_da_blkinfo *dead_info;
2227 struct xfs_da_blkinfo *sib_info;
2228 struct xfs_da_intnode *par_node;
2229 struct xfs_da_intnode *dead_node;
2230 struct xfs_dir2_leaf *dead_leaf2;
2231 struct xfs_da_node_entry *btree;
2232 struct xfs_da3_icnode_hdr par_hdr;
2233 struct xfs_inode *dp;
2234 struct xfs_trans *tp;
2235 struct xfs_mount *mp;
2236 struct xfs_buf *dead_buf;
2237 struct xfs_buf *last_buf;
2238 struct xfs_buf *sib_buf;
2239 struct xfs_buf *par_buf;
2240 xfs_dahash_t dead_hash;
2241 xfs_fileoff_t lastoff;
2242 xfs_dablk_t dead_blkno;
2243 xfs_dablk_t last_blkno;
2244 xfs_dablk_t sib_blkno;
2245 xfs_dablk_t par_blkno;
2252 trace_xfs_da_swap_lastblock(args);
2254 dead_buf = *dead_bufp;
2255 dead_blkno = *dead_blknop;
2258 w = args->whichfork;
2259 ASSERT(w == XFS_DATA_FORK);
2261 lastoff = args->geo->freeblk;
2262 error = xfs_bmap_last_before(tp, dp, &lastoff, w);
2265 if (XFS_IS_CORRUPT(mp, lastoff == 0))
2266 return -EFSCORRUPTED;
2268 * Read the last block in the btree space.
2270 last_blkno = (xfs_dablk_t)lastoff - args->geo->fsbcount;
2271 error = xfs_da3_node_read(tp, dp, last_blkno, -1, &last_buf, w);
2275 * Copy the last block into the dead buffer and log it.
2277 memcpy(dead_buf->b_addr, last_buf->b_addr, args->geo->blksize);
2278 xfs_trans_log_buf(tp, dead_buf, 0, args->geo->blksize - 1);
2279 dead_info = dead_buf->b_addr;
2281 * Get values from the moved block.
2283 if (dead_info->magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) ||
2284 dead_info->magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC)) {
2285 struct xfs_dir3_icleaf_hdr leafhdr;
2286 struct xfs_dir2_leaf_entry *ents;
2288 dead_leaf2 = (xfs_dir2_leaf_t *)dead_info;
2289 xfs_dir2_leaf_hdr_from_disk(dp->i_mount, &leafhdr,
2291 ents = leafhdr.ents;
2293 dead_hash = be32_to_cpu(ents[leafhdr.count - 1].hashval);
2295 struct xfs_da3_icnode_hdr deadhdr;
2297 dead_node = (xfs_da_intnode_t *)dead_info;
2298 xfs_da3_node_hdr_from_disk(dp->i_mount, &deadhdr, dead_node);
2299 btree = deadhdr.btree;
2300 dead_level = deadhdr.level;
2301 dead_hash = be32_to_cpu(btree[deadhdr.count - 1].hashval);
2303 sib_buf = par_buf = NULL;
2305 * If the moved block has a left sibling, fix up the pointers.
2307 if ((sib_blkno = be32_to_cpu(dead_info->back))) {
2308 error = xfs_da3_node_read(tp, dp, sib_blkno, -1, &sib_buf, w);
2311 sib_info = sib_buf->b_addr;
2312 if (XFS_IS_CORRUPT(mp,
2313 be32_to_cpu(sib_info->forw) != last_blkno ||
2314 sib_info->magic != dead_info->magic)) {
2315 error = -EFSCORRUPTED;
2318 sib_info->forw = cpu_to_be32(dead_blkno);
2319 xfs_trans_log_buf(tp, sib_buf,
2320 XFS_DA_LOGRANGE(sib_info, &sib_info->forw,
2321 sizeof(sib_info->forw)));
2325 * If the moved block has a right sibling, fix up the pointers.
2327 if ((sib_blkno = be32_to_cpu(dead_info->forw))) {
2328 error = xfs_da3_node_read(tp, dp, sib_blkno, -1, &sib_buf, w);
2331 sib_info = sib_buf->b_addr;
2332 if (XFS_IS_CORRUPT(mp,
2333 be32_to_cpu(sib_info->back) != last_blkno ||
2334 sib_info->magic != dead_info->magic)) {
2335 error = -EFSCORRUPTED;
2338 sib_info->back = cpu_to_be32(dead_blkno);
2339 xfs_trans_log_buf(tp, sib_buf,
2340 XFS_DA_LOGRANGE(sib_info, &sib_info->back,
2341 sizeof(sib_info->back)));
2344 par_blkno = args->geo->leafblk;
2347 * Walk down the tree looking for the parent of the moved block.
2350 error = xfs_da3_node_read(tp, dp, par_blkno, -1, &par_buf, w);
2353 par_node = par_buf->b_addr;
2354 xfs_da3_node_hdr_from_disk(dp->i_mount, &par_hdr, par_node);
2355 if (XFS_IS_CORRUPT(mp,
2356 level >= 0 && level != par_hdr.level + 1)) {
2357 error = -EFSCORRUPTED;
2360 level = par_hdr.level;
2361 btree = par_hdr.btree;
2363 entno < par_hdr.count &&
2364 be32_to_cpu(btree[entno].hashval) < dead_hash;
2367 if (XFS_IS_CORRUPT(mp, entno == par_hdr.count)) {
2368 error = -EFSCORRUPTED;
2371 par_blkno = be32_to_cpu(btree[entno].before);
2372 if (level == dead_level + 1)
2374 xfs_trans_brelse(tp, par_buf);
2378 * We're in the right parent block.
2379 * Look for the right entry.
2383 entno < par_hdr.count &&
2384 be32_to_cpu(btree[entno].before) != last_blkno;
2387 if (entno < par_hdr.count)
2389 par_blkno = par_hdr.forw;
2390 xfs_trans_brelse(tp, par_buf);
2392 if (XFS_IS_CORRUPT(mp, par_blkno == 0)) {
2393 error = -EFSCORRUPTED;
2396 error = xfs_da3_node_read(tp, dp, par_blkno, -1, &par_buf, w);
2399 par_node = par_buf->b_addr;
2400 xfs_da3_node_hdr_from_disk(dp->i_mount, &par_hdr, par_node);
2401 if (XFS_IS_CORRUPT(mp, par_hdr.level != level)) {
2402 error = -EFSCORRUPTED;
2405 btree = par_hdr.btree;
2409 * Update the parent entry pointing to the moved block.
2411 btree[entno].before = cpu_to_be32(dead_blkno);
2412 xfs_trans_log_buf(tp, par_buf,
2413 XFS_DA_LOGRANGE(par_node, &btree[entno].before,
2414 sizeof(btree[entno].before)));
2415 *dead_blknop = last_blkno;
2416 *dead_bufp = last_buf;
2420 xfs_trans_brelse(tp, par_buf);
2422 xfs_trans_brelse(tp, sib_buf);
2423 xfs_trans_brelse(tp, last_buf);
2428 * Remove a btree block from a directory or attribute.
2431 xfs_da_shrink_inode(
2432 struct xfs_da_args *args,
2433 xfs_dablk_t dead_blkno,
2434 struct xfs_buf *dead_buf)
2436 struct xfs_inode *dp;
2437 int done, error, w, count;
2438 struct xfs_trans *tp;
2440 trace_xfs_da_shrink_inode(args);
2443 w = args->whichfork;
2445 count = args->geo->fsbcount;
2448 * Remove extents. If we get ENOSPC for a dir we have to move
2449 * the last block to the place we want to kill.
2451 error = xfs_bunmapi(tp, dp, dead_blkno, count,
2452 xfs_bmapi_aflag(w), 0, &done);
2453 if (error == -ENOSPC) {
2454 if (w != XFS_DATA_FORK)
2456 error = xfs_da3_swap_lastblock(args, &dead_blkno,
2464 xfs_trans_binval(tp, dead_buf);
2469 * See if the mapping(s) for this btree block are valid, i.e.
2470 * don't contain holes, are logically contiguous, and cover the whole range.
2473 xfs_da_map_covers_blocks(
2475 xfs_bmbt_irec_t *mapp,
2482 for (i = 0, off = bno; i < nmap; i++) {
2483 if (mapp[i].br_startblock == HOLESTARTBLOCK ||
2484 mapp[i].br_startblock == DELAYSTARTBLOCK) {
2487 if (off != mapp[i].br_startoff) {
2490 off += mapp[i].br_blockcount;
2492 return off == bno + count;
2496 * Convert a struct xfs_bmbt_irec to a struct xfs_buf_map.
2498 * For the single map case, it is assumed that the caller has provided a pointer
2499 * to a valid xfs_buf_map. For the multiple map case, this function will
2500 * allocate the xfs_buf_map to hold all the maps and replace the caller's single
2501 * map pointer with the allocated map.
2504 xfs_buf_map_from_irec(
2505 struct xfs_mount *mp,
2506 struct xfs_buf_map **mapp,
2508 struct xfs_bmbt_irec *irecs,
2511 struct xfs_buf_map *map;
2514 ASSERT(*nmaps == 1);
2515 ASSERT(nirecs >= 1);
2518 map = kmem_zalloc(nirecs * sizeof(struct xfs_buf_map),
2527 for (i = 0; i < *nmaps; i++) {
2528 ASSERT(irecs[i].br_startblock != DELAYSTARTBLOCK &&
2529 irecs[i].br_startblock != HOLESTARTBLOCK);
2530 map[i].bm_bn = XFS_FSB_TO_DADDR(mp, irecs[i].br_startblock);
2531 map[i].bm_len = XFS_FSB_TO_BB(mp, irecs[i].br_blockcount);
2537 * Map the block we are given ready for reading. There are three possible return
2539 * -1 - will be returned if we land in a hole and mappedbno == -2 so the
2540 * caller knows not to execute a subsequent read.
2541 * 0 - if we mapped the block successfully
2542 * >0 - positive error number if there was an error.
2546 struct xfs_inode *dp,
2548 xfs_daddr_t mappedbno,
2550 struct xfs_buf_map **map,
2553 struct xfs_mount *mp = dp->i_mount;
2556 struct xfs_bmbt_irec irec;
2557 struct xfs_bmbt_irec *irecs = &irec;
2560 ASSERT(map && *map);
2561 ASSERT(*nmaps == 1);
2563 if (whichfork == XFS_DATA_FORK)
2564 nfsb = mp->m_dir_geo->fsbcount;
2566 nfsb = mp->m_attr_geo->fsbcount;
2569 * Caller doesn't have a mapping. -2 means don't complain
2570 * if we land in a hole.
2572 if (mappedbno == -1 || mappedbno == -2) {
2574 * Optimize the one-block case.
2577 irecs = kmem_zalloc(sizeof(irec) * nfsb,
2581 error = xfs_bmapi_read(dp, (xfs_fileoff_t)bno, nfsb, irecs,
2582 &nirecs, xfs_bmapi_aflag(whichfork));
2586 irecs->br_startblock = XFS_DADDR_TO_FSB(mp, mappedbno);
2587 irecs->br_startoff = (xfs_fileoff_t)bno;
2588 irecs->br_blockcount = nfsb;
2589 irecs->br_state = 0;
2593 if (!xfs_da_map_covers_blocks(nirecs, irecs, bno, nfsb)) {
2594 /* Caller ok with no mapping. */
2595 if (!XFS_IS_CORRUPT(mp, mappedbno != -2)) {
2600 /* Caller expected a mapping, so abort. */
2601 if (xfs_error_level >= XFS_ERRLEVEL_LOW) {
2604 xfs_alert(mp, "%s: bno %lld dir: inode %lld", __func__,
2605 (long long)bno, (long long)dp->i_ino);
2606 for (i = 0; i < *nmaps; i++) {
2608 "[%02d] br_startoff %lld br_startblock %lld br_blockcount %lld br_state %d",
2610 (long long)irecs[i].br_startoff,
2611 (long long)irecs[i].br_startblock,
2612 (long long)irecs[i].br_blockcount,
2616 error = -EFSCORRUPTED;
2619 error = xfs_buf_map_from_irec(mp, map, nmaps, irecs, nirecs);
2627 * Get a buffer for the dir/attr block.
2631 struct xfs_trans *trans,
2632 struct xfs_inode *dp,
2634 xfs_daddr_t mappedbno,
2635 struct xfs_buf **bpp,
2639 struct xfs_buf_map map;
2640 struct xfs_buf_map *mapp;
2647 error = xfs_dabuf_map(dp, bno, mappedbno, whichfork,
2650 /* mapping a hole is not an error, but we don't continue */
2656 bp = xfs_trans_get_buf_map(trans, dp->i_mount->m_ddev_targp,
2658 error = bp ? bp->b_error : -EIO;
2661 xfs_trans_brelse(trans, bp);
2675 * Get a buffer for the dir/attr block, fill in the contents.
2679 struct xfs_trans *trans,
2680 struct xfs_inode *dp,
2682 xfs_daddr_t mappedbno,
2683 struct xfs_buf **bpp,
2685 const struct xfs_buf_ops *ops)
2688 struct xfs_buf_map map;
2689 struct xfs_buf_map *mapp;
2696 error = xfs_dabuf_map(dp, bno, mappedbno, whichfork,
2699 /* mapping a hole is not an error, but we don't continue */
2705 error = xfs_trans_read_buf_map(dp->i_mount, trans,
2706 dp->i_mount->m_ddev_targp,
2707 mapp, nmap, 0, &bp, ops);
2711 if (whichfork == XFS_ATTR_FORK)
2712 xfs_buf_set_ref(bp, XFS_ATTR_BTREE_REF);
2714 xfs_buf_set_ref(bp, XFS_DIR_BTREE_REF);
2724 * Readahead the dir/attr block.
2728 struct xfs_inode *dp,
2730 xfs_daddr_t mappedbno,
2732 const struct xfs_buf_ops *ops)
2734 struct xfs_buf_map map;
2735 struct xfs_buf_map *mapp;
2741 error = xfs_dabuf_map(dp, bno, mappedbno, whichfork,
2744 /* mapping a hole is not an error, but we don't continue */
2750 mappedbno = mapp[0].bm_bn;
2751 xfs_buf_readahead_map(dp->i_mount->m_ddev_targp, mapp, nmap, ops);