1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4 * Copyright (c) 2018 Red Hat, Inc.
10 #include "xfs_shared.h"
11 #include "xfs_format.h"
12 #include "xfs_trans_resv.h"
14 #include "xfs_mount.h"
15 #include "xfs_btree.h"
16 #include "xfs_alloc_btree.h"
17 #include "xfs_rmap_btree.h"
18 #include "xfs_alloc.h"
19 #include "xfs_ialloc.h"
22 #include "xfs_ag_resv.h"
23 #include "xfs_health.h"
25 static struct xfs_buf *
31 const struct xfs_buf_ops *ops)
35 bp = xfs_buf_get_uncached(mp->m_ddev_targp, numblks, flags);
39 xfs_buf_zero(bp, 0, BBTOB(bp->b_length));
41 bp->b_maps[0].bm_bn = blkno;
48 * Generic btree root block init function
54 struct aghdr_init_data *id)
56 xfs_btree_init_block(mp, bp, id->type, 0, 0, id->agno, 0);
60 * Alloc btree root block init functions
66 struct aghdr_init_data *id)
68 struct xfs_alloc_rec *arec;
70 xfs_btree_init_block(mp, bp, XFS_BTNUM_BNO, 0, 1, id->agno, 0);
71 arec = XFS_ALLOC_REC_ADDR(mp, XFS_BUF_TO_BLOCK(bp), 1);
72 arec->ar_startblock = cpu_to_be32(mp->m_ag_prealloc_blocks);
73 arec->ar_blockcount = cpu_to_be32(id->agsize -
74 be32_to_cpu(arec->ar_startblock));
81 struct aghdr_init_data *id)
83 struct xfs_alloc_rec *arec;
85 xfs_btree_init_block(mp, bp, XFS_BTNUM_CNT, 0, 1, id->agno, 0);
86 arec = XFS_ALLOC_REC_ADDR(mp, XFS_BUF_TO_BLOCK(bp), 1);
87 arec->ar_startblock = cpu_to_be32(mp->m_ag_prealloc_blocks);
88 arec->ar_blockcount = cpu_to_be32(id->agsize -
89 be32_to_cpu(arec->ar_startblock));
93 * Reverse map root block init
99 struct aghdr_init_data *id)
101 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
102 struct xfs_rmap_rec *rrec;
104 xfs_btree_init_block(mp, bp, XFS_BTNUM_RMAP, 0, 4, id->agno, 0);
107 * mark the AG header regions as static metadata The BNO
108 * btree block is the first block after the headers, so
109 * it's location defines the size of region the static
112 * Note: unlike mkfs, we never have to account for log
113 * space when growing the data regions
115 rrec = XFS_RMAP_REC_ADDR(block, 1);
116 rrec->rm_startblock = 0;
117 rrec->rm_blockcount = cpu_to_be32(XFS_BNO_BLOCK(mp));
118 rrec->rm_owner = cpu_to_be64(XFS_RMAP_OWN_FS);
121 /* account freespace btree root blocks */
122 rrec = XFS_RMAP_REC_ADDR(block, 2);
123 rrec->rm_startblock = cpu_to_be32(XFS_BNO_BLOCK(mp));
124 rrec->rm_blockcount = cpu_to_be32(2);
125 rrec->rm_owner = cpu_to_be64(XFS_RMAP_OWN_AG);
128 /* account inode btree root blocks */
129 rrec = XFS_RMAP_REC_ADDR(block, 3);
130 rrec->rm_startblock = cpu_to_be32(XFS_IBT_BLOCK(mp));
131 rrec->rm_blockcount = cpu_to_be32(XFS_RMAP_BLOCK(mp) -
133 rrec->rm_owner = cpu_to_be64(XFS_RMAP_OWN_INOBT);
136 /* account for rmap btree root */
137 rrec = XFS_RMAP_REC_ADDR(block, 4);
138 rrec->rm_startblock = cpu_to_be32(XFS_RMAP_BLOCK(mp));
139 rrec->rm_blockcount = cpu_to_be32(1);
140 rrec->rm_owner = cpu_to_be64(XFS_RMAP_OWN_AG);
143 /* account for refc btree root */
144 if (xfs_sb_version_hasreflink(&mp->m_sb)) {
145 rrec = XFS_RMAP_REC_ADDR(block, 5);
146 rrec->rm_startblock = cpu_to_be32(xfs_refc_block(mp));
147 rrec->rm_blockcount = cpu_to_be32(1);
148 rrec->rm_owner = cpu_to_be64(XFS_RMAP_OWN_REFC);
150 be16_add_cpu(&block->bb_numrecs, 1);
155 * Initialise new secondary superblocks with the pre-grow geometry, but mark
156 * them as "in progress" so we know they haven't yet been activated. This will
157 * get cleared when the update with the new geometry information is done after
158 * changes to the primary are committed. This isn't strictly necessary, but we
159 * get it for free with the delayed buffer write lists and it means we can tell
160 * if a grow operation didn't complete properly after the fact.
164 struct xfs_mount *mp,
166 struct aghdr_init_data *id)
168 struct xfs_dsb *dsb = XFS_BUF_TO_SBP(bp);
170 xfs_sb_to_disk(dsb, &mp->m_sb);
171 dsb->sb_inprogress = 1;
176 struct xfs_mount *mp,
178 struct aghdr_init_data *id)
180 struct xfs_agf *agf = XFS_BUF_TO_AGF(bp);
181 xfs_extlen_t tmpsize;
183 agf->agf_magicnum = cpu_to_be32(XFS_AGF_MAGIC);
184 agf->agf_versionnum = cpu_to_be32(XFS_AGF_VERSION);
185 agf->agf_seqno = cpu_to_be32(id->agno);
186 agf->agf_length = cpu_to_be32(id->agsize);
187 agf->agf_roots[XFS_BTNUM_BNOi] = cpu_to_be32(XFS_BNO_BLOCK(mp));
188 agf->agf_roots[XFS_BTNUM_CNTi] = cpu_to_be32(XFS_CNT_BLOCK(mp));
189 agf->agf_levels[XFS_BTNUM_BNOi] = cpu_to_be32(1);
190 agf->agf_levels[XFS_BTNUM_CNTi] = cpu_to_be32(1);
191 if (xfs_sb_version_hasrmapbt(&mp->m_sb)) {
192 agf->agf_roots[XFS_BTNUM_RMAPi] =
193 cpu_to_be32(XFS_RMAP_BLOCK(mp));
194 agf->agf_levels[XFS_BTNUM_RMAPi] = cpu_to_be32(1);
195 agf->agf_rmap_blocks = cpu_to_be32(1);
198 agf->agf_flfirst = cpu_to_be32(1);
200 agf->agf_flcount = 0;
201 tmpsize = id->agsize - mp->m_ag_prealloc_blocks;
202 agf->agf_freeblks = cpu_to_be32(tmpsize);
203 agf->agf_longest = cpu_to_be32(tmpsize);
204 if (xfs_sb_version_hascrc(&mp->m_sb))
205 uuid_copy(&agf->agf_uuid, &mp->m_sb.sb_meta_uuid);
206 if (xfs_sb_version_hasreflink(&mp->m_sb)) {
207 agf->agf_refcount_root = cpu_to_be32(
209 agf->agf_refcount_level = cpu_to_be32(1);
210 agf->agf_refcount_blocks = cpu_to_be32(1);
216 struct xfs_mount *mp,
218 struct aghdr_init_data *id)
220 struct xfs_agfl *agfl = XFS_BUF_TO_AGFL(bp);
224 if (xfs_sb_version_hascrc(&mp->m_sb)) {
225 agfl->agfl_magicnum = cpu_to_be32(XFS_AGFL_MAGIC);
226 agfl->agfl_seqno = cpu_to_be32(id->agno);
227 uuid_copy(&agfl->agfl_uuid, &mp->m_sb.sb_meta_uuid);
230 agfl_bno = XFS_BUF_TO_AGFL_BNO(mp, bp);
231 for (bucket = 0; bucket < xfs_agfl_size(mp); bucket++)
232 agfl_bno[bucket] = cpu_to_be32(NULLAGBLOCK);
237 struct xfs_mount *mp,
239 struct aghdr_init_data *id)
241 struct xfs_agi *agi = XFS_BUF_TO_AGI(bp);
244 agi->agi_magicnum = cpu_to_be32(XFS_AGI_MAGIC);
245 agi->agi_versionnum = cpu_to_be32(XFS_AGI_VERSION);
246 agi->agi_seqno = cpu_to_be32(id->agno);
247 agi->agi_length = cpu_to_be32(id->agsize);
249 agi->agi_root = cpu_to_be32(XFS_IBT_BLOCK(mp));
250 agi->agi_level = cpu_to_be32(1);
251 agi->agi_freecount = 0;
252 agi->agi_newino = cpu_to_be32(NULLAGINO);
253 agi->agi_dirino = cpu_to_be32(NULLAGINO);
254 if (xfs_sb_version_hascrc(&mp->m_sb))
255 uuid_copy(&agi->agi_uuid, &mp->m_sb.sb_meta_uuid);
256 if (xfs_sb_version_hasfinobt(&mp->m_sb)) {
257 agi->agi_free_root = cpu_to_be32(XFS_FIBT_BLOCK(mp));
258 agi->agi_free_level = cpu_to_be32(1);
260 for (bucket = 0; bucket < XFS_AGI_UNLINKED_BUCKETS; bucket++)
261 agi->agi_unlinked[bucket] = cpu_to_be32(NULLAGINO);
264 typedef void (*aghdr_init_work_f)(struct xfs_mount *mp, struct xfs_buf *bp,
265 struct aghdr_init_data *id);
268 struct xfs_mount *mp,
269 struct aghdr_init_data *id,
270 aghdr_init_work_f work,
271 const struct xfs_buf_ops *ops)
276 bp = xfs_get_aghdr_buf(mp, id->daddr, id->numblks, 0, ops);
282 xfs_buf_delwri_queue(bp, &id->buffer_list);
287 struct xfs_aghdr_grow_data {
290 const struct xfs_buf_ops *ops;
291 aghdr_init_work_f work;
297 * Prepare new AG headers to be written to disk. We use uncached buffers here,
298 * as it is assumed these new AG headers are currently beyond the currently
299 * valid filesystem address space. Using cached buffers would trip over EOFS
300 * corruption detection alogrithms in the buffer cache lookup routines.
302 * This is a non-transactional function, but the prepared buffers are added to a
303 * delayed write buffer list supplied by the caller so they can submit them to
304 * disk and wait on them as required.
308 struct xfs_mount *mp,
309 struct aghdr_init_data *id)
312 struct xfs_aghdr_grow_data aghdr_data[] = {
314 .daddr = XFS_AG_DADDR(mp, id->agno, XFS_SB_DADDR),
315 .numblks = XFS_FSS_TO_BB(mp, 1),
316 .ops = &xfs_sb_buf_ops,
317 .work = &xfs_sbblock_init,
321 .daddr = XFS_AG_DADDR(mp, id->agno, XFS_AGF_DADDR(mp)),
322 .numblks = XFS_FSS_TO_BB(mp, 1),
323 .ops = &xfs_agf_buf_ops,
324 .work = &xfs_agfblock_init,
328 .daddr = XFS_AG_DADDR(mp, id->agno, XFS_AGFL_DADDR(mp)),
329 .numblks = XFS_FSS_TO_BB(mp, 1),
330 .ops = &xfs_agfl_buf_ops,
331 .work = &xfs_agflblock_init,
335 .daddr = XFS_AG_DADDR(mp, id->agno, XFS_AGI_DADDR(mp)),
336 .numblks = XFS_FSS_TO_BB(mp, 1),
337 .ops = &xfs_agi_buf_ops,
338 .work = &xfs_agiblock_init,
341 { /* BNO root block */
342 .daddr = XFS_AGB_TO_DADDR(mp, id->agno, XFS_BNO_BLOCK(mp)),
343 .numblks = BTOBB(mp->m_sb.sb_blocksize),
344 .ops = &xfs_bnobt_buf_ops,
345 .work = &xfs_bnoroot_init,
348 { /* CNT root block */
349 .daddr = XFS_AGB_TO_DADDR(mp, id->agno, XFS_CNT_BLOCK(mp)),
350 .numblks = BTOBB(mp->m_sb.sb_blocksize),
351 .ops = &xfs_cntbt_buf_ops,
352 .work = &xfs_cntroot_init,
355 { /* INO root block */
356 .daddr = XFS_AGB_TO_DADDR(mp, id->agno, XFS_IBT_BLOCK(mp)),
357 .numblks = BTOBB(mp->m_sb.sb_blocksize),
358 .ops = &xfs_inobt_buf_ops,
359 .work = &xfs_btroot_init,
360 .type = XFS_BTNUM_INO,
363 { /* FINO root block */
364 .daddr = XFS_AGB_TO_DADDR(mp, id->agno, XFS_FIBT_BLOCK(mp)),
365 .numblks = BTOBB(mp->m_sb.sb_blocksize),
366 .ops = &xfs_finobt_buf_ops,
367 .work = &xfs_btroot_init,
368 .type = XFS_BTNUM_FINO,
369 .need_init = xfs_sb_version_hasfinobt(&mp->m_sb)
371 { /* RMAP root block */
372 .daddr = XFS_AGB_TO_DADDR(mp, id->agno, XFS_RMAP_BLOCK(mp)),
373 .numblks = BTOBB(mp->m_sb.sb_blocksize),
374 .ops = &xfs_rmapbt_buf_ops,
375 .work = &xfs_rmaproot_init,
376 .need_init = xfs_sb_version_hasrmapbt(&mp->m_sb)
378 { /* REFC root block */
379 .daddr = XFS_AGB_TO_DADDR(mp, id->agno, xfs_refc_block(mp)),
380 .numblks = BTOBB(mp->m_sb.sb_blocksize),
381 .ops = &xfs_refcountbt_buf_ops,
382 .work = &xfs_btroot_init,
383 .type = XFS_BTNUM_REFC,
384 .need_init = xfs_sb_version_hasreflink(&mp->m_sb)
386 { /* NULL terminating block */
387 .daddr = XFS_BUF_DADDR_NULL,
390 struct xfs_aghdr_grow_data *dp;
393 /* Account for AG free space in new AG */
394 id->nfree += id->agsize - mp->m_ag_prealloc_blocks;
395 for (dp = &aghdr_data[0]; dp->daddr != XFS_BUF_DADDR_NULL; dp++) {
399 id->daddr = dp->daddr;
400 id->numblks = dp->numblks;
402 error = xfs_ag_init_hdr(mp, id, dp->work, dp->ops);
410 * Extent the AG indicated by the @id by the length passed in
414 struct xfs_mount *mp,
415 struct xfs_trans *tp,
416 struct aghdr_init_data *id,
425 * Change the agi length.
427 error = xfs_ialloc_read_agi(mp, tp, id->agno, &bp);
431 agi = XFS_BUF_TO_AGI(bp);
432 be32_add_cpu(&agi->agi_length, len);
433 ASSERT(id->agno == mp->m_sb.sb_agcount - 1 ||
434 be32_to_cpu(agi->agi_length) == mp->m_sb.sb_agblocks);
435 xfs_ialloc_log_agi(tp, bp, XFS_AGI_LENGTH);
440 error = xfs_alloc_read_agf(mp, tp, id->agno, 0, &bp);
444 agf = XFS_BUF_TO_AGF(bp);
445 be32_add_cpu(&agf->agf_length, len);
446 ASSERT(agf->agf_length == agi->agi_length);
447 xfs_alloc_log_agf(tp, bp, XFS_AGF_LENGTH);
450 * Free the new space.
452 * XFS_RMAP_OINFO_SKIP_UPDATE is used here to tell the rmap btree that
453 * this doesn't actually exist in the rmap btree.
455 error = xfs_rmap_free(tp, bp, id->agno,
456 be32_to_cpu(agf->agf_length) - len,
457 len, &XFS_RMAP_OINFO_SKIP_UPDATE);
461 return xfs_free_extent(tp, XFS_AGB_TO_FSB(mp, id->agno,
462 be32_to_cpu(agf->agf_length) - len),
463 len, &XFS_RMAP_OINFO_SKIP_UPDATE,
467 /* Retrieve AG geometry. */
470 struct xfs_mount *mp,
472 struct xfs_ag_geometry *ageo)
474 struct xfs_buf *agi_bp;
475 struct xfs_buf *agf_bp;
478 struct xfs_perag *pag;
479 unsigned int freeblks;
482 if (agno >= mp->m_sb.sb_agcount)
485 /* Lock the AG headers. */
486 error = xfs_ialloc_read_agi(mp, NULL, agno, &agi_bp);
489 error = xfs_alloc_read_agf(mp, NULL, agno, 0, &agf_bp);
492 pag = xfs_perag_get(mp, agno);
495 memset(ageo, 0, sizeof(*ageo));
496 ageo->ag_number = agno;
498 agi = XFS_BUF_TO_AGI(agi_bp);
499 ageo->ag_icount = be32_to_cpu(agi->agi_count);
500 ageo->ag_ifree = be32_to_cpu(agi->agi_freecount);
502 agf = XFS_BUF_TO_AGF(agf_bp);
503 ageo->ag_length = be32_to_cpu(agf->agf_length);
504 freeblks = pag->pagf_freeblks +
506 pag->pagf_btreeblks -
507 xfs_ag_resv_needed(pag, XFS_AG_RESV_NONE);
508 ageo->ag_freeblks = freeblks;
509 xfs_ag_geom_health(pag, ageo);
511 /* Release resources. */
513 xfs_buf_relse(agf_bp);
515 xfs_buf_relse(agi_bp);