2 * Copyright (C) 2017 Oracle. All Rights Reserved.
4 * Author: Darrick J. Wong <darrick.wong@oracle.com>
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 2
9 * of the License, or (at your option) any later version.
11 * This program is distributed in the hope that it would be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
22 #include "xfs_shared.h"
23 #include "xfs_format.h"
24 #include "xfs_trans_resv.h"
25 #include "xfs_mount.h"
26 #include "xfs_defer.h"
27 #include "xfs_btree.h"
29 #include "xfs_log_format.h"
30 #include "xfs_trans.h"
32 #include "xfs_inode.h"
33 #include "xfs_inode_fork.h"
34 #include "xfs_alloc.h"
36 #include "xfs_quota.h"
38 #include "xfs_dquot.h"
39 #include "xfs_dquot_item.h"
40 #include "scrub/xfs_scrub.h"
41 #include "scrub/scrub.h"
42 #include "scrub/common.h"
43 #include "scrub/trace.h"
45 /* Convert a scrub type code to a DQ flag, or return 0 if error. */
47 xfs_scrub_quota_to_dqtype(
48 struct xfs_scrub_context *sc)
50 switch (sc->sm->sm_type) {
51 case XFS_SCRUB_TYPE_UQUOTA:
53 case XFS_SCRUB_TYPE_GQUOTA:
55 case XFS_SCRUB_TYPE_PQUOTA:
62 /* Set us up to scrub a quota. */
64 xfs_scrub_setup_quota(
65 struct xfs_scrub_context *sc,
71 * If userspace gave us an AG number or inode data, they don't
72 * know what they're doing. Get out.
74 if (sc->sm->sm_agno || sc->sm->sm_ino || sc->sm->sm_gen)
77 dqtype = xfs_scrub_quota_to_dqtype(sc);
80 if (!xfs_this_quota_on(sc->mp, dqtype))
87 /* Scrub the fields in an individual quota item. */
90 struct xfs_scrub_context *sc,
95 struct xfs_mount *mp = sc->mp;
96 struct xfs_disk_dquot *d = &dq->q_core;
97 struct xfs_quotainfo *qi = mp->m_quotainfo;
99 unsigned long long bsoft;
100 unsigned long long isoft;
101 unsigned long long rsoft;
102 unsigned long long bhard;
103 unsigned long long ihard;
104 unsigned long long rhard;
105 unsigned long long bcount;
106 unsigned long long icount;
107 unsigned long long rcount;
110 offset = id * qi->qi_dqperchunk;
113 * We fed $id and DQNEXT into the xfs_qm_dqget call, which means
114 * that the actual dquot we got must either have the same id or
115 * the next higher id.
117 if (id > be32_to_cpu(d->d_id))
118 xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
120 /* Did we get the dquot type we wanted? */
121 if (dqtype != (d->d_flags & XFS_DQ_ALLTYPES))
122 xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
124 if (d->d_pad0 != cpu_to_be32(0) || d->d_pad != cpu_to_be16(0))
125 xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
127 /* Check the limits. */
128 bhard = be64_to_cpu(d->d_blk_hardlimit);
129 ihard = be64_to_cpu(d->d_ino_hardlimit);
130 rhard = be64_to_cpu(d->d_rtb_hardlimit);
132 bsoft = be64_to_cpu(d->d_blk_softlimit);
133 isoft = be64_to_cpu(d->d_ino_softlimit);
134 rsoft = be64_to_cpu(d->d_rtb_softlimit);
137 * Warn if the hard limits are larger than the fs.
138 * Administrators can do this, though in production this seems
139 * suspect, which is why we flag it for review.
141 * Complain about corruption if the soft limit is greater than
144 if (bhard > mp->m_sb.sb_dblocks)
145 xfs_scrub_fblock_set_warning(sc, XFS_DATA_FORK, offset);
147 xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
149 if (ihard > mp->m_maxicount)
150 xfs_scrub_fblock_set_warning(sc, XFS_DATA_FORK, offset);
152 xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
154 if (rhard > mp->m_sb.sb_rblocks)
155 xfs_scrub_fblock_set_warning(sc, XFS_DATA_FORK, offset);
157 xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
159 /* Check the resource counts. */
160 bcount = be64_to_cpu(d->d_bcount);
161 icount = be64_to_cpu(d->d_icount);
162 rcount = be64_to_cpu(d->d_rtbcount);
163 fs_icount = percpu_counter_sum(&mp->m_icount);
166 * Check that usage doesn't exceed physical limits. However, on
167 * a reflink filesystem we're allowed to exceed physical space
168 * if there are no quota limits.
170 if (xfs_sb_version_hasreflink(&mp->m_sb)) {
171 if (mp->m_sb.sb_dblocks < bcount)
172 xfs_scrub_fblock_set_warning(sc, XFS_DATA_FORK,
175 if (mp->m_sb.sb_dblocks < bcount)
176 xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK,
179 if (icount > fs_icount || rcount > mp->m_sb.sb_rblocks)
180 xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
183 * We can violate the hard limits if the admin suddenly sets a
184 * lower limit than the actual usage. However, we flag it for
187 if (id != 0 && bhard != 0 && bcount > bhard)
188 xfs_scrub_fblock_set_warning(sc, XFS_DATA_FORK, offset);
189 if (id != 0 && ihard != 0 && icount > ihard)
190 xfs_scrub_fblock_set_warning(sc, XFS_DATA_FORK, offset);
191 if (id != 0 && rhard != 0 && rcount > rhard)
192 xfs_scrub_fblock_set_warning(sc, XFS_DATA_FORK, offset);
195 /* Scrub all of a quota type's items. */
198 struct xfs_scrub_context *sc)
200 struct xfs_bmbt_irec irec = { 0 };
201 struct xfs_mount *mp = sc->mp;
202 struct xfs_inode *ip;
203 struct xfs_quotainfo *qi = mp->m_quotainfo;
204 struct xfs_dquot *dq;
205 xfs_fileoff_t max_dqid_off;
206 xfs_fileoff_t off = 0;
212 if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
215 mutex_lock(&qi->qi_quotaofflock);
216 dqtype = xfs_scrub_quota_to_dqtype(sc);
217 if (!xfs_this_quota_on(sc->mp, dqtype)) {
219 goto out_unlock_quota;
222 /* Attach to the quota inode and set sc->ip so that reporting works. */
223 ip = xfs_quota_inode(sc->mp, dqtype);
226 /* Look for problem extents. */
227 xfs_ilock(ip, XFS_ILOCK_EXCL);
228 if (ip->i_d.di_flags & XFS_DIFLAG_REALTIME) {
229 xfs_scrub_ino_set_corrupt(sc, sc->ip->i_ino, NULL);
230 goto out_unlock_inode;
232 max_dqid_off = ((xfs_dqid_t)-1) / qi->qi_dqperchunk;
234 if (xfs_scrub_should_terminate(sc, &error))
237 off = irec.br_startoff + irec.br_blockcount;
239 error = xfs_bmapi_read(ip, off, -1, &irec, &nimaps,
241 if (!xfs_scrub_fblock_process_error(sc, XFS_DATA_FORK, off,
243 goto out_unlock_inode;
246 if (irec.br_startblock == HOLESTARTBLOCK)
249 /* Check the extent record doesn't point to crap. */
250 if (irec.br_startblock + irec.br_blockcount <=
252 xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK,
254 if (!xfs_verify_fsbno(mp, irec.br_startblock) ||
255 !xfs_verify_fsbno(mp, irec.br_startblock +
256 irec.br_blockcount - 1))
257 xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK,
261 * Unwritten extents or blocks mapped above the highest
262 * quota id shouldn't happen.
264 if (isnullstartblock(irec.br_startblock) ||
265 irec.br_startoff > max_dqid_off ||
266 irec.br_startoff + irec.br_blockcount > max_dqid_off + 1)
267 xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, off);
269 xfs_iunlock(ip, XFS_ILOCK_EXCL);
270 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
273 /* Check all the quota items. */
274 while (id < ((xfs_dqid_t)-1ULL)) {
275 if (xfs_scrub_should_terminate(sc, &error))
278 error = xfs_qm_dqget(mp, NULL, id, dqtype, XFS_QMOPT_DQNEXT,
280 if (error == -ENOENT)
282 if (!xfs_scrub_fblock_process_error(sc, XFS_DATA_FORK,
283 id * qi->qi_dqperchunk, &error))
286 xfs_scrub_quota_item(sc, dqtype, dq, id);
288 id = be32_to_cpu(dq->q_core.d_id) + 1;
295 /* We set sc->ip earlier, so make sure we clear it now. */
298 mutex_unlock(&qi->qi_quotaofflock);
302 xfs_iunlock(ip, XFS_ILOCK_EXCL);