1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
14 #include "xfs_mount.h"
15 #include "xfs_inode.h"
16 #include "xfs_iwalk.h"
17 #include "xfs_quota.h"
19 #include "xfs_bmap_util.h"
20 #include "xfs_trans.h"
21 #include "xfs_trans_space.h"
23 #include "xfs_trace.h"
24 #include "xfs_icache.h"
25 #include "xfs_error.h"
27 #include "xfs_ialloc.h"
28 #include "xfs_log_priv.h"
31 * The global quota manager. There is only one of these for the entire
32 * system, _not_ one per file system. XQM keeps track of the overall
33 * quota functionality, including maintaining the freelist and hash
36 STATIC int xfs_qm_init_quotainos(struct xfs_mount *mp);
37 STATIC int xfs_qm_init_quotainfo(struct xfs_mount *mp);
39 STATIC void xfs_qm_destroy_quotainos(struct xfs_quotainfo *qi);
40 STATIC void xfs_qm_dqfree_one(struct xfs_dquot *dqp);
42 * We use the batch lookup interface to iterate over the dquots as it
43 * currently is the only interface into the radix tree code that allows
44 * fuzzy lookups instead of exact matches. Holding the lock over multiple
45 * operations is fine as all callers are used either during mount/umount
48 #define XFS_DQ_LOOKUP_BATCH 32
54 int (*execute)(struct xfs_dquot *dqp, void *data),
57 struct xfs_quotainfo *qi = mp->m_quotainfo;
58 struct radix_tree_root *tree = xfs_dquot_tree(qi, type);
70 struct xfs_dquot *batch[XFS_DQ_LOOKUP_BATCH];
74 mutex_lock(&qi->qi_tree_lock);
75 nr_found = radix_tree_gang_lookup(tree, (void **)batch,
76 next_index, XFS_DQ_LOOKUP_BATCH);
78 mutex_unlock(&qi->qi_tree_lock);
82 for (i = 0; i < nr_found; i++) {
83 struct xfs_dquot *dqp = batch[i];
85 next_index = dqp->q_id + 1;
87 error = execute(batch[i], data);
88 if (error == -EAGAIN) {
92 if (error && last_error != -EFSCORRUPTED)
96 mutex_unlock(&qi->qi_tree_lock);
98 /* bail out if the filesystem is corrupted. */
99 if (last_error == -EFSCORRUPTED) {
103 /* we're done if id overflows back to zero */
118 * Purge a dquot from all tracking data structures and free it.
122 struct xfs_dquot *dqp,
125 struct xfs_quotainfo *qi = dqp->q_mount->m_quotainfo;
129 if ((dqp->q_flags & XFS_DQFLAG_FREEING) || dqp->q_nrefs != 0)
132 dqp->q_flags |= XFS_DQFLAG_FREEING;
137 * If we are turning this type of quotas off, we don't care
138 * about the dirty metadata sitting in this dquot. OTOH, if
139 * we're unmounting, we do care, so we flush it and wait.
141 if (XFS_DQ_IS_DIRTY(dqp)) {
142 struct xfs_buf *bp = NULL;
145 * We don't care about getting disk errors here. We need
146 * to purge this dquot anyway, so we go ahead regardless.
148 error = xfs_qm_dqflush(dqp, &bp);
150 error = xfs_bwrite(bp);
152 } else if (error == -EAGAIN) {
153 dqp->q_flags &= ~XFS_DQFLAG_FREEING;
159 ASSERT(atomic_read(&dqp->q_pincount) == 0);
160 ASSERT(xlog_is_shutdown(dqp->q_logitem.qli_item.li_log) ||
161 !test_bit(XFS_LI_IN_AIL, &dqp->q_logitem.qli_item.li_flags));
166 radix_tree_delete(xfs_dquot_tree(qi, xfs_dquot_type(dqp)), dqp->q_id);
170 * We move dquots to the freelist as soon as their reference count
171 * hits zero, so it really should be on the freelist here.
173 ASSERT(!list_empty(&dqp->q_lru));
174 list_lru_del(&qi->qi_lru, &dqp->q_lru);
175 XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot_unused);
177 xfs_qm_dqdestroy(dqp);
186 * Purge the dquot cache.
190 struct xfs_mount *mp)
192 xfs_qm_dquot_walk(mp, XFS_DQTYPE_USER, xfs_qm_dqpurge, NULL);
193 xfs_qm_dquot_walk(mp, XFS_DQTYPE_GROUP, xfs_qm_dqpurge, NULL);
194 xfs_qm_dquot_walk(mp, XFS_DQTYPE_PROJ, xfs_qm_dqpurge, NULL);
198 * Just destroy the quotainfo structure.
202 struct xfs_mount *mp)
204 if (mp->m_quotainfo) {
205 xfs_qm_dqpurge_all(mp);
206 xfs_qm_destroy_quotainfo(mp);
211 * Called from the vfsops layer.
214 xfs_qm_unmount_quotas(
218 * Release the dquots that root inode, et al might be holding,
219 * before we flush quotas and blow away the quotainfo structure.
221 ASSERT(mp->m_rootip);
222 xfs_qm_dqdetach(mp->m_rootip);
224 xfs_qm_dqdetach(mp->m_rbmip);
226 xfs_qm_dqdetach(mp->m_rsumip);
229 * Release the quota inodes.
231 if (mp->m_quotainfo) {
232 if (mp->m_quotainfo->qi_uquotaip) {
233 xfs_irele(mp->m_quotainfo->qi_uquotaip);
234 mp->m_quotainfo->qi_uquotaip = NULL;
236 if (mp->m_quotainfo->qi_gquotaip) {
237 xfs_irele(mp->m_quotainfo->qi_gquotaip);
238 mp->m_quotainfo->qi_gquotaip = NULL;
240 if (mp->m_quotainfo->qi_pquotaip) {
241 xfs_irele(mp->m_quotainfo->qi_pquotaip);
242 mp->m_quotainfo->qi_pquotaip = NULL;
249 struct xfs_inode *ip,
252 struct xfs_dquot **IO_idqpp)
254 struct xfs_dquot *dqp;
257 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
261 * See if we already have it in the inode itself. IO_idqpp is &i_udquot
262 * or &i_gdquot. This made the code look weird, but made the logic a lot
267 trace_xfs_dqattach_found(dqp);
272 * Find the dquot from somewhere. This bumps the reference count of
273 * dquot and returns it locked. This can return ENOENT if dquot didn't
274 * exist on disk and we didn't ask it to allocate; ESRCH if quotas got
275 * turned off suddenly.
277 error = xfs_qm_dqget_inode(ip, type, doalloc, &dqp);
281 trace_xfs_dqattach_get(dqp);
284 * dqget may have dropped and re-acquired the ilock, but it guarantees
285 * that the dquot returned is the one that should go in the inode.
293 xfs_qm_need_dqattach(
294 struct xfs_inode *ip)
296 struct xfs_mount *mp = ip->i_mount;
298 if (!XFS_IS_QUOTA_ON(mp))
300 if (!XFS_NOT_DQATTACHED(mp, ip))
302 if (xfs_is_quota_inode(&mp->m_sb, ip->i_ino))
308 * Given a locked inode, attach dquot(s) to it, taking U/G/P-QUOTAON
310 * If @doalloc is true, the dquot(s) will be allocated if needed.
311 * Inode may get unlocked and relocked in here, and the caller must deal with
315 xfs_qm_dqattach_locked(
319 xfs_mount_t *mp = ip->i_mount;
322 if (!xfs_qm_need_dqattach(ip))
325 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
327 if (XFS_IS_UQUOTA_ON(mp) && !ip->i_udquot) {
328 error = xfs_qm_dqattach_one(ip, XFS_DQTYPE_USER,
329 doalloc, &ip->i_udquot);
332 ASSERT(ip->i_udquot);
335 if (XFS_IS_GQUOTA_ON(mp) && !ip->i_gdquot) {
336 error = xfs_qm_dqattach_one(ip, XFS_DQTYPE_GROUP,
337 doalloc, &ip->i_gdquot);
340 ASSERT(ip->i_gdquot);
343 if (XFS_IS_PQUOTA_ON(mp) && !ip->i_pdquot) {
344 error = xfs_qm_dqattach_one(ip, XFS_DQTYPE_PROJ,
345 doalloc, &ip->i_pdquot);
348 ASSERT(ip->i_pdquot);
353 * Don't worry about the dquots that we may have attached before any
354 * error - they'll get detached later if it has not already been done.
356 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
362 struct xfs_inode *ip)
366 if (!xfs_qm_need_dqattach(ip))
369 xfs_ilock(ip, XFS_ILOCK_EXCL);
370 error = xfs_qm_dqattach_locked(ip, false);
371 xfs_iunlock(ip, XFS_ILOCK_EXCL);
377 * Release dquots (and their references) if any.
378 * The inode should be locked EXCL except when this's called by
385 if (!(ip->i_udquot || ip->i_gdquot || ip->i_pdquot))
388 trace_xfs_dquot_dqdetach(ip);
390 ASSERT(!xfs_is_quota_inode(&ip->i_mount->m_sb, ip->i_ino));
392 xfs_qm_dqrele(ip->i_udquot);
396 xfs_qm_dqrele(ip->i_gdquot);
400 xfs_qm_dqrele(ip->i_pdquot);
405 struct xfs_qm_isolate {
406 struct list_head buffers;
407 struct list_head dispose;
410 static enum lru_status
411 xfs_qm_dquot_isolate(
412 struct list_head *item,
413 struct list_lru_one *lru,
414 spinlock_t *lru_lock,
416 __releases(lru_lock) __acquires(lru_lock)
418 struct xfs_dquot *dqp = container_of(item,
419 struct xfs_dquot, q_lru);
420 struct xfs_qm_isolate *isol = arg;
422 if (!xfs_dqlock_nowait(dqp))
426 * This dquot has acquired a reference in the meantime remove it from
427 * the freelist and try again.
431 XFS_STATS_INC(dqp->q_mount, xs_qm_dqwants);
433 trace_xfs_dqreclaim_want(dqp);
434 list_lru_isolate(lru, &dqp->q_lru);
435 XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot_unused);
440 * If the dquot is dirty, flush it. If it's already being flushed, just
441 * skip it so there is time for the IO to complete before we try to
442 * reclaim it again on the next LRU pass.
444 if (!xfs_dqflock_nowait(dqp)) {
449 if (XFS_DQ_IS_DIRTY(dqp)) {
450 struct xfs_buf *bp = NULL;
453 trace_xfs_dqreclaim_dirty(dqp);
455 /* we have to drop the LRU lock to flush the dquot */
456 spin_unlock(lru_lock);
458 error = xfs_qm_dqflush(dqp, &bp);
460 goto out_unlock_dirty;
462 xfs_buf_delwri_queue(bp, &isol->buffers);
464 goto out_unlock_dirty;
469 * Prevent lookups now that we are past the point of no return.
471 dqp->q_flags |= XFS_DQFLAG_FREEING;
474 ASSERT(dqp->q_nrefs == 0);
475 list_lru_isolate_move(lru, &dqp->q_lru, &isol->dispose);
476 XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot_unused);
477 trace_xfs_dqreclaim_done(dqp);
478 XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaims);
482 trace_xfs_dqreclaim_busy(dqp);
483 XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaim_misses);
487 trace_xfs_dqreclaim_busy(dqp);
488 XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaim_misses);
496 struct shrinker *shrink,
497 struct shrink_control *sc)
499 struct xfs_quotainfo *qi = container_of(shrink,
500 struct xfs_quotainfo, qi_shrinker);
501 struct xfs_qm_isolate isol;
505 if ((sc->gfp_mask & (__GFP_FS|__GFP_DIRECT_RECLAIM)) != (__GFP_FS|__GFP_DIRECT_RECLAIM))
508 INIT_LIST_HEAD(&isol.buffers);
509 INIT_LIST_HEAD(&isol.dispose);
511 freed = list_lru_shrink_walk(&qi->qi_lru, sc,
512 xfs_qm_dquot_isolate, &isol);
514 error = xfs_buf_delwri_submit(&isol.buffers);
516 xfs_warn(NULL, "%s: dquot reclaim failed", __func__);
518 while (!list_empty(&isol.dispose)) {
519 struct xfs_dquot *dqp;
521 dqp = list_first_entry(&isol.dispose, struct xfs_dquot, q_lru);
522 list_del_init(&dqp->q_lru);
523 xfs_qm_dqfree_one(dqp);
531 struct shrinker *shrink,
532 struct shrink_control *sc)
534 struct xfs_quotainfo *qi = container_of(shrink,
535 struct xfs_quotainfo, qi_shrinker);
537 return list_lru_shrink_count(&qi->qi_lru, sc);
542 struct xfs_mount *mp,
544 struct xfs_quotainfo *qinf)
546 struct xfs_dquot *dqp;
547 struct xfs_def_quota *defq;
550 error = xfs_qm_dqget_uncached(mp, 0, type, &dqp);
554 defq = xfs_get_defquota(qinf, xfs_dquot_type(dqp));
557 * Timers and warnings have been already set, let's just set the
558 * default limits for this quota type
560 defq->blk.hard = dqp->q_blk.hardlimit;
561 defq->blk.soft = dqp->q_blk.softlimit;
562 defq->ino.hard = dqp->q_ino.hardlimit;
563 defq->ino.soft = dqp->q_ino.softlimit;
564 defq->rtb.hard = dqp->q_rtb.hardlimit;
565 defq->rtb.soft = dqp->q_rtb.softlimit;
566 xfs_qm_dqdestroy(dqp);
569 /* Initialize quota time limits from the root dquot. */
571 xfs_qm_init_timelimits(
572 struct xfs_mount *mp,
575 struct xfs_quotainfo *qinf = mp->m_quotainfo;
576 struct xfs_def_quota *defq;
577 struct xfs_dquot *dqp;
580 defq = xfs_get_defquota(qinf, type);
582 defq->blk.time = XFS_QM_BTIMELIMIT;
583 defq->ino.time = XFS_QM_ITIMELIMIT;
584 defq->rtb.time = XFS_QM_RTBTIMELIMIT;
587 * We try to get the limits from the superuser's limits fields.
588 * This is quite hacky, but it is standard quota practice.
590 * Since we may not have done a quotacheck by this point, just read
591 * the dquot without attaching it to any hashtables or lists.
593 error = xfs_qm_dqget_uncached(mp, 0, type, &dqp);
598 * The warnings and timers set the grace period given to
599 * a user or group before he or she can not perform any
600 * more writing. If it is zero, a default is used.
602 if (dqp->q_blk.timer)
603 defq->blk.time = dqp->q_blk.timer;
604 if (dqp->q_ino.timer)
605 defq->ino.time = dqp->q_ino.timer;
606 if (dqp->q_rtb.timer)
607 defq->rtb.time = dqp->q_rtb.timer;
609 xfs_qm_dqdestroy(dqp);
613 * This initializes all the quota information that's kept in the
617 xfs_qm_init_quotainfo(
618 struct xfs_mount *mp)
620 struct xfs_quotainfo *qinf;
623 ASSERT(XFS_IS_QUOTA_ON(mp));
625 qinf = mp->m_quotainfo = kmem_zalloc(sizeof(struct xfs_quotainfo), 0);
627 error = list_lru_init(&qinf->qi_lru);
632 * See if quotainodes are setup, and if not, allocate them,
633 * and change the superblock accordingly.
635 error = xfs_qm_init_quotainos(mp);
639 INIT_RADIX_TREE(&qinf->qi_uquota_tree, GFP_NOFS);
640 INIT_RADIX_TREE(&qinf->qi_gquota_tree, GFP_NOFS);
641 INIT_RADIX_TREE(&qinf->qi_pquota_tree, GFP_NOFS);
642 mutex_init(&qinf->qi_tree_lock);
644 /* mutex used to serialize quotaoffs */
645 mutex_init(&qinf->qi_quotaofflock);
647 /* Precalc some constants */
648 qinf->qi_dqchunklen = XFS_FSB_TO_BB(mp, XFS_DQUOT_CLUSTER_SIZE_FSB);
649 qinf->qi_dqperchunk = xfs_calc_dquots_per_chunk(qinf->qi_dqchunklen);
650 if (xfs_has_bigtime(mp)) {
651 qinf->qi_expiry_min =
652 xfs_dq_bigtime_to_unix(XFS_DQ_BIGTIME_EXPIRY_MIN);
653 qinf->qi_expiry_max =
654 xfs_dq_bigtime_to_unix(XFS_DQ_BIGTIME_EXPIRY_MAX);
656 qinf->qi_expiry_min = XFS_DQ_LEGACY_EXPIRY_MIN;
657 qinf->qi_expiry_max = XFS_DQ_LEGACY_EXPIRY_MAX;
659 trace_xfs_quota_expiry_range(mp, qinf->qi_expiry_min,
660 qinf->qi_expiry_max);
662 mp->m_qflags |= (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_CHKD);
664 xfs_qm_init_timelimits(mp, XFS_DQTYPE_USER);
665 xfs_qm_init_timelimits(mp, XFS_DQTYPE_GROUP);
666 xfs_qm_init_timelimits(mp, XFS_DQTYPE_PROJ);
668 if (XFS_IS_UQUOTA_ON(mp))
669 xfs_qm_set_defquota(mp, XFS_DQTYPE_USER, qinf);
670 if (XFS_IS_GQUOTA_ON(mp))
671 xfs_qm_set_defquota(mp, XFS_DQTYPE_GROUP, qinf);
672 if (XFS_IS_PQUOTA_ON(mp))
673 xfs_qm_set_defquota(mp, XFS_DQTYPE_PROJ, qinf);
675 qinf->qi_shrinker.count_objects = xfs_qm_shrink_count;
676 qinf->qi_shrinker.scan_objects = xfs_qm_shrink_scan;
677 qinf->qi_shrinker.seeks = DEFAULT_SEEKS;
678 qinf->qi_shrinker.flags = SHRINKER_NUMA_AWARE;
680 error = register_shrinker(&qinf->qi_shrinker);
687 mutex_destroy(&qinf->qi_quotaofflock);
688 mutex_destroy(&qinf->qi_tree_lock);
689 xfs_qm_destroy_quotainos(qinf);
691 list_lru_destroy(&qinf->qi_lru);
694 mp->m_quotainfo = NULL;
699 * Gets called when unmounting a filesystem or when all quotas get
701 * This purges the quota inodes, destroys locks and frees itself.
704 xfs_qm_destroy_quotainfo(
705 struct xfs_mount *mp)
707 struct xfs_quotainfo *qi;
709 qi = mp->m_quotainfo;
712 unregister_shrinker(&qi->qi_shrinker);
713 list_lru_destroy(&qi->qi_lru);
714 xfs_qm_destroy_quotainos(qi);
715 mutex_destroy(&qi->qi_tree_lock);
716 mutex_destroy(&qi->qi_quotaofflock);
718 mp->m_quotainfo = NULL;
722 * Create an inode and return with a reference already taken, but unlocked
723 * This is how we create quota inodes
727 struct xfs_mount *mp,
728 struct xfs_inode **ipp,
731 struct xfs_trans *tp;
733 bool need_alloc = true;
737 * With superblock that doesn't have separate pquotino, we
738 * share an inode between gquota and pquota. If the on-disk
739 * superblock has GQUOTA and the filesystem is now mounted
740 * with PQUOTA, just use sb_gquotino for sb_pquotino and
743 if (!xfs_has_pquotino(mp) &&
744 (flags & (XFS_QMOPT_PQUOTA|XFS_QMOPT_GQUOTA))) {
745 xfs_ino_t ino = NULLFSINO;
747 if ((flags & XFS_QMOPT_PQUOTA) &&
748 (mp->m_sb.sb_gquotino != NULLFSINO)) {
749 ino = mp->m_sb.sb_gquotino;
750 if (XFS_IS_CORRUPT(mp,
751 mp->m_sb.sb_pquotino != NULLFSINO))
752 return -EFSCORRUPTED;
753 } else if ((flags & XFS_QMOPT_GQUOTA) &&
754 (mp->m_sb.sb_pquotino != NULLFSINO)) {
755 ino = mp->m_sb.sb_pquotino;
756 if (XFS_IS_CORRUPT(mp,
757 mp->m_sb.sb_gquotino != NULLFSINO))
758 return -EFSCORRUPTED;
760 if (ino != NULLFSINO) {
761 error = xfs_iget(mp, NULL, ino, 0, 0, ipp);
764 mp->m_sb.sb_gquotino = NULLFSINO;
765 mp->m_sb.sb_pquotino = NULLFSINO;
770 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_create,
771 need_alloc ? XFS_QM_QINOCREATE_SPACE_RES(mp) : 0,
779 error = xfs_dialloc(&tp, 0, S_IFREG, &ino);
781 error = xfs_init_new_inode(&init_user_ns, tp, NULL, ino,
782 S_IFREG, 1, 0, 0, false, ipp);
784 xfs_trans_cancel(tp);
790 * Make the changes in the superblock, and log those too.
791 * sbfields arg may contain fields other than *QUOTINO;
792 * VERSIONNUM for example.
794 spin_lock(&mp->m_sb_lock);
795 if (flags & XFS_QMOPT_SBVERSION) {
796 ASSERT(!xfs_has_quota(mp));
799 mp->m_sb.sb_uquotino = NULLFSINO;
800 mp->m_sb.sb_gquotino = NULLFSINO;
801 mp->m_sb.sb_pquotino = NULLFSINO;
803 /* qflags will get updated fully _after_ quotacheck */
804 mp->m_sb.sb_qflags = mp->m_qflags & XFS_ALL_QUOTA_ACCT;
806 if (flags & XFS_QMOPT_UQUOTA)
807 mp->m_sb.sb_uquotino = (*ipp)->i_ino;
808 else if (flags & XFS_QMOPT_GQUOTA)
809 mp->m_sb.sb_gquotino = (*ipp)->i_ino;
811 mp->m_sb.sb_pquotino = (*ipp)->i_ino;
812 spin_unlock(&mp->m_sb_lock);
815 error = xfs_trans_commit(tp);
817 ASSERT(xfs_is_shutdown(mp));
818 xfs_alert(mp, "%s failed (error %d)!", __func__, error);
821 xfs_finish_inode_setup(*ipp);
827 xfs_qm_reset_dqcounts(
828 struct xfs_mount *mp,
833 struct xfs_dqblk *dqb;
836 trace_xfs_reset_dqcounts(bp, _RET_IP_);
839 * Reset all counters and timers. They'll be
840 * started afresh by xfs_qm_quotacheck.
843 j = (int)XFS_FSB_TO_B(mp, XFS_DQUOT_CLUSTER_SIZE_FSB) /
844 sizeof(struct xfs_dqblk);
845 ASSERT(mp->m_quotainfo->qi_dqperchunk == j);
848 for (j = 0; j < mp->m_quotainfo->qi_dqperchunk; j++) {
849 struct xfs_disk_dquot *ddq;
851 ddq = (struct xfs_disk_dquot *)&dqb[j];
854 * Do a sanity check, and if needed, repair the dqblk. Don't
855 * output any warnings because it's perfectly possible to
856 * find uninitialised dquot blks. See comment in
859 if (xfs_dqblk_verify(mp, &dqb[j], id + j) ||
860 (dqb[j].dd_diskdq.d_type & XFS_DQTYPE_REC_MASK) != type)
861 xfs_dqblk_repair(mp, &dqb[j], id + j, type);
864 * Reset type in case we are reusing group quota file for
865 * project quotas or vice versa
873 * dquot id 0 stores the default grace period and the maximum
874 * warning limit that were set by the administrator, so we
875 * should not reset them.
877 if (ddq->d_id != 0) {
884 if (xfs_has_bigtime(mp))
885 ddq->d_type |= XFS_DQTYPE_BIGTIME;
888 if (xfs_has_crc(mp)) {
889 xfs_update_cksum((char *)&dqb[j],
890 sizeof(struct xfs_dqblk),
897 xfs_qm_reset_dqcounts_all(
898 struct xfs_mount *mp,
901 xfs_filblks_t blkcnt,
903 struct list_head *buffer_list)
911 * Blkcnt arg can be a very big number, and might even be
912 * larger than the log itself. So, we have to break it up into
913 * manageable-sized transactions.
914 * Note that we don't start a permanent transaction here; we might
915 * not be able to get a log reservation for the whole thing up front,
916 * and we don't really care to either, because we just discard
917 * everything if we were to crash in the middle of this loop.
920 error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
921 XFS_FSB_TO_DADDR(mp, bno),
922 mp->m_quotainfo->qi_dqchunklen, 0, &bp,
926 * CRC and validation errors will return a EFSCORRUPTED here. If
927 * this occurs, re-read without CRC validation so that we can
928 * repair the damage via xfs_qm_reset_dqcounts(). This process
929 * will leave a trace in the log indicating corruption has
932 if (error == -EFSCORRUPTED) {
933 error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
934 XFS_FSB_TO_DADDR(mp, bno),
935 mp->m_quotainfo->qi_dqchunklen, 0, &bp,
943 * A corrupt buffer might not have a verifier attached, so
944 * make sure we have the correct one attached before writeback
947 bp->b_ops = &xfs_dquot_buf_ops;
948 xfs_qm_reset_dqcounts(mp, bp, firstid, type);
949 xfs_buf_delwri_queue(bp, buffer_list);
952 /* goto the next block. */
954 firstid += mp->m_quotainfo->qi_dqperchunk;
961 * Iterate over all allocated dquot blocks in this quota inode, zeroing all
962 * counters for every chunk of dquots that we find.
965 xfs_qm_reset_dqcounts_buf(
966 struct xfs_mount *mp,
967 struct xfs_inode *qip,
969 struct list_head *buffer_list)
971 struct xfs_bmbt_irec *map;
972 int i, nmaps; /* number of map entries */
973 int error; /* return value */
974 xfs_fileoff_t lblkno;
975 xfs_filblks_t maxlblkcnt;
977 xfs_fsblock_t rablkno;
978 xfs_filblks_t rablkcnt;
982 * This looks racy, but we can't keep an inode lock across a
983 * trans_reserve. But, this gets called during quotacheck, and that
984 * happens only at mount time which is single threaded.
986 if (qip->i_nblocks == 0)
989 map = kmem_alloc(XFS_DQITER_MAP_SIZE * sizeof(*map), 0);
992 maxlblkcnt = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
996 nmaps = XFS_DQITER_MAP_SIZE;
998 * We aren't changing the inode itself. Just changing
999 * some of its data. No new blocks are added here, and
1000 * the inode is never added to the transaction.
1002 lock_mode = xfs_ilock_data_map_shared(qip);
1003 error = xfs_bmapi_read(qip, lblkno, maxlblkcnt - lblkno,
1005 xfs_iunlock(qip, lock_mode);
1009 ASSERT(nmaps <= XFS_DQITER_MAP_SIZE);
1010 for (i = 0; i < nmaps; i++) {
1011 ASSERT(map[i].br_startblock != DELAYSTARTBLOCK);
1012 ASSERT(map[i].br_blockcount);
1015 lblkno += map[i].br_blockcount;
1017 if (map[i].br_startblock == HOLESTARTBLOCK)
1020 firstid = (xfs_dqid_t) map[i].br_startoff *
1021 mp->m_quotainfo->qi_dqperchunk;
1023 * Do a read-ahead on the next extent.
1025 if ((i+1 < nmaps) &&
1026 (map[i+1].br_startblock != HOLESTARTBLOCK)) {
1027 rablkcnt = map[i+1].br_blockcount;
1028 rablkno = map[i+1].br_startblock;
1029 while (rablkcnt--) {
1030 xfs_buf_readahead(mp->m_ddev_targp,
1031 XFS_FSB_TO_DADDR(mp, rablkno),
1032 mp->m_quotainfo->qi_dqchunklen,
1033 &xfs_dquot_buf_ops);
1038 * Iterate thru all the blks in the extent and
1039 * reset the counters of all the dquots inside them.
1041 error = xfs_qm_reset_dqcounts_all(mp, firstid,
1042 map[i].br_startblock,
1043 map[i].br_blockcount,
1048 } while (nmaps > 0);
1056 * Called by dqusage_adjust in doing a quotacheck.
1058 * Given the inode, and a dquot id this updates both the incore dqout as well
1059 * as the buffer copy. This is so that once the quotacheck is done, we can
1060 * just log all the buffers, as opposed to logging numerous updates to
1061 * individual dquots.
1064 xfs_qm_quotacheck_dqadjust(
1065 struct xfs_inode *ip,
1070 struct xfs_mount *mp = ip->i_mount;
1071 struct xfs_dquot *dqp;
1075 id = xfs_qm_id_for_quotatype(ip, type);
1076 error = xfs_qm_dqget(mp, id, type, true, &dqp);
1079 * Shouldn't be able to turn off quotas here.
1081 ASSERT(error != -ESRCH);
1082 ASSERT(error != -ENOENT);
1086 trace_xfs_dqadjust(dqp);
1089 * Adjust the inode count and the block count to reflect this inode's
1093 dqp->q_ino.reserved++;
1095 dqp->q_blk.count += nblks;
1096 dqp->q_blk.reserved += nblks;
1099 dqp->q_rtb.count += rtblks;
1100 dqp->q_rtb.reserved += rtblks;
1104 * Set default limits, adjust timers (since we changed usages)
1106 * There are no timers for the default values set in the root dquot.
1109 xfs_qm_adjust_dqlimits(dqp);
1110 xfs_qm_adjust_dqtimers(dqp);
1113 dqp->q_flags |= XFS_DQFLAG_DIRTY;
1119 * callback routine supplied to bulkstat(). Given an inumber, find its
1120 * dquots and update them to account for resources taken by that inode.
1124 xfs_qm_dqusage_adjust(
1125 struct xfs_mount *mp,
1126 struct xfs_trans *tp,
1130 struct xfs_inode *ip;
1132 xfs_filblks_t rtblks = 0; /* total rt blks */
1135 ASSERT(XFS_IS_QUOTA_ON(mp));
1138 * rootino must have its resources accounted for, not so with the quota
1141 if (xfs_is_quota_inode(&mp->m_sb, ino))
1145 * We don't _need_ to take the ilock EXCL here because quotacheck runs
1146 * at mount time and therefore nobody will be racing chown/chproj.
1148 error = xfs_iget(mp, tp, ino, XFS_IGET_DONTCACHE, 0, &ip);
1149 if (error == -EINVAL || error == -ENOENT)
1154 ASSERT(ip->i_delayed_blks == 0);
1156 if (XFS_IS_REALTIME_INODE(ip)) {
1157 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
1159 error = xfs_iread_extents(tp, ip, XFS_DATA_FORK);
1163 xfs_bmap_count_leaves(ifp, &rtblks);
1166 nblks = (xfs_qcnt_t)ip->i_nblocks - rtblks;
1169 * Add the (disk blocks and inode) resources occupied by this
1170 * inode to its dquots. We do this adjustment in the incore dquot,
1171 * and also copy the changes to its buffer.
1172 * We don't care about putting these changes in a transaction
1173 * envelope because if we crash in the middle of a 'quotacheck'
1174 * we have to start from the beginning anyway.
1175 * Once we're done, we'll log all the dquot bufs.
1177 * The *QUOTA_ON checks below may look pretty racy, but quotachecks
1178 * and quotaoffs don't race. (Quotachecks happen at mount time only).
1180 if (XFS_IS_UQUOTA_ON(mp)) {
1181 error = xfs_qm_quotacheck_dqadjust(ip, XFS_DQTYPE_USER, nblks,
1187 if (XFS_IS_GQUOTA_ON(mp)) {
1188 error = xfs_qm_quotacheck_dqadjust(ip, XFS_DQTYPE_GROUP, nblks,
1194 if (XFS_IS_PQUOTA_ON(mp)) {
1195 error = xfs_qm_quotacheck_dqadjust(ip, XFS_DQTYPE_PROJ, nblks,
1208 struct xfs_dquot *dqp,
1211 struct xfs_mount *mp = dqp->q_mount;
1212 struct list_head *buffer_list = data;
1213 struct xfs_buf *bp = NULL;
1217 if (dqp->q_flags & XFS_DQFLAG_FREEING)
1219 if (!XFS_DQ_IS_DIRTY(dqp))
1223 * The only way the dquot is already flush locked by the time quotacheck
1224 * gets here is if reclaim flushed it before the dqadjust walk dirtied
1225 * it for the final time. Quotacheck collects all dquot bufs in the
1226 * local delwri queue before dquots are dirtied, so reclaim can't have
1227 * possibly queued it for I/O. The only way out is to push the buffer to
1228 * cycle the flush lock.
1230 if (!xfs_dqflock_nowait(dqp)) {
1231 /* buf is pinned in-core by delwri list */
1232 bp = xfs_buf_incore(mp->m_ddev_targp, dqp->q_blkno,
1233 mp->m_quotainfo->qi_dqchunklen, 0);
1240 xfs_buf_delwri_pushbuf(bp, buffer_list);
1247 error = xfs_qm_dqflush(dqp, &bp);
1251 xfs_buf_delwri_queue(bp, buffer_list);
1259 * Walk thru all the filesystem inodes and construct a consistent view
1260 * of the disk quota world. If the quotacheck fails, disable quotas.
1268 LIST_HEAD (buffer_list);
1269 struct xfs_inode *uip = mp->m_quotainfo->qi_uquotaip;
1270 struct xfs_inode *gip = mp->m_quotainfo->qi_gquotaip;
1271 struct xfs_inode *pip = mp->m_quotainfo->qi_pquotaip;
1275 ASSERT(uip || gip || pip);
1276 ASSERT(XFS_IS_QUOTA_ON(mp));
1278 xfs_notice(mp, "Quotacheck needed: Please wait.");
1281 * First we go thru all the dquots on disk, USR and GRP/PRJ, and reset
1282 * their counters to zero. We need a clean slate.
1283 * We don't log our changes till later.
1286 error = xfs_qm_reset_dqcounts_buf(mp, uip, XFS_DQTYPE_USER,
1290 flags |= XFS_UQUOTA_CHKD;
1294 error = xfs_qm_reset_dqcounts_buf(mp, gip, XFS_DQTYPE_GROUP,
1298 flags |= XFS_GQUOTA_CHKD;
1302 error = xfs_qm_reset_dqcounts_buf(mp, pip, XFS_DQTYPE_PROJ,
1306 flags |= XFS_PQUOTA_CHKD;
1309 error = xfs_iwalk_threaded(mp, 0, 0, xfs_qm_dqusage_adjust, 0, true,
1315 * We've made all the changes that we need to make incore. Flush them
1316 * down to disk buffers if everything was updated successfully.
1318 if (XFS_IS_UQUOTA_ON(mp)) {
1319 error = xfs_qm_dquot_walk(mp, XFS_DQTYPE_USER, xfs_qm_flush_one,
1322 if (XFS_IS_GQUOTA_ON(mp)) {
1323 error2 = xfs_qm_dquot_walk(mp, XFS_DQTYPE_GROUP, xfs_qm_flush_one,
1328 if (XFS_IS_PQUOTA_ON(mp)) {
1329 error2 = xfs_qm_dquot_walk(mp, XFS_DQTYPE_PROJ, xfs_qm_flush_one,
1335 error2 = xfs_buf_delwri_submit(&buffer_list);
1340 * We can get this error if we couldn't do a dquot allocation inside
1341 * xfs_qm_dqusage_adjust (via bulkstat). We don't care about the
1342 * dirty dquots that might be cached, we just want to get rid of them
1343 * and turn quotaoff. The dquots won't be attached to any of the inodes
1344 * at this point (because we intentionally didn't in dqget_noattach).
1347 xfs_qm_dqpurge_all(mp);
1352 * If one type of quotas is off, then it will lose its
1353 * quotachecked status, since we won't be doing accounting for
1354 * that type anymore.
1356 mp->m_qflags &= ~XFS_ALL_QUOTA_CHKD;
1357 mp->m_qflags |= flags;
1360 xfs_buf_delwri_cancel(&buffer_list);
1364 "Quotacheck: Unsuccessful (Error %d): Disabling quotas.",
1367 * We must turn off quotas.
1369 ASSERT(mp->m_quotainfo != NULL);
1370 xfs_qm_destroy_quotainfo(mp);
1371 if (xfs_mount_reset_sbqflags(mp)) {
1373 "Quotacheck: Failed to reset quota flags.");
1376 xfs_notice(mp, "Quotacheck: Done.");
1381 * This is called from xfs_mountfs to start quotas and initialize all
1382 * necessary data structures like quotainfo. This is also responsible for
1383 * running a quotacheck as necessary. We are guaranteed that the superblock
1384 * is consistently read in at this point.
1386 * If we fail here, the mount will continue with quota turned off. We don't
1387 * need to inidicate success or failure at all.
1390 xfs_qm_mount_quotas(
1391 struct xfs_mount *mp)
1397 * If quotas on realtime volumes is not supported, we disable
1398 * quotas immediately.
1400 if (mp->m_sb.sb_rextents) {
1401 xfs_notice(mp, "Cannot turn on quotas for realtime filesystem");
1406 ASSERT(XFS_IS_QUOTA_ON(mp));
1409 * Allocate the quotainfo structure inside the mount struct, and
1410 * create quotainode(s), and change/rev superblock if necessary.
1412 error = xfs_qm_init_quotainfo(mp);
1415 * We must turn off quotas.
1417 ASSERT(mp->m_quotainfo == NULL);
1422 * If any of the quotas are not consistent, do a quotacheck.
1424 if (XFS_QM_NEED_QUOTACHECK(mp)) {
1425 error = xfs_qm_quotacheck(mp);
1427 /* Quotacheck failed and disabled quotas. */
1432 * If one type of quotas is off, then it will lose its
1433 * quotachecked status, since we won't be doing accounting for
1434 * that type anymore.
1436 if (!XFS_IS_UQUOTA_ON(mp))
1437 mp->m_qflags &= ~XFS_UQUOTA_CHKD;
1438 if (!XFS_IS_GQUOTA_ON(mp))
1439 mp->m_qflags &= ~XFS_GQUOTA_CHKD;
1440 if (!XFS_IS_PQUOTA_ON(mp))
1441 mp->m_qflags &= ~XFS_PQUOTA_CHKD;
1445 * We actually don't have to acquire the m_sb_lock at all.
1446 * This can only be called from mount, and that's single threaded. XXX
1448 spin_lock(&mp->m_sb_lock);
1449 sbf = mp->m_sb.sb_qflags;
1450 mp->m_sb.sb_qflags = mp->m_qflags & XFS_MOUNT_QUOTA_ALL;
1451 spin_unlock(&mp->m_sb_lock);
1453 if (sbf != (mp->m_qflags & XFS_MOUNT_QUOTA_ALL)) {
1454 if (xfs_sync_sb(mp, false)) {
1456 * We could only have been turning quotas off.
1457 * We aren't in very good shape actually because
1458 * the incore structures are convinced that quotas are
1459 * off, but the on disk superblock doesn't know that !
1461 ASSERT(!(XFS_IS_QUOTA_ON(mp)));
1462 xfs_alert(mp, "%s: Superblock update failed!",
1468 xfs_warn(mp, "Failed to initialize disk quotas.");
1474 * This is called after the superblock has been read in and we're ready to
1475 * iget the quota inodes.
1478 xfs_qm_init_quotainos(
1481 struct xfs_inode *uip = NULL;
1482 struct xfs_inode *gip = NULL;
1483 struct xfs_inode *pip = NULL;
1487 ASSERT(mp->m_quotainfo);
1490 * Get the uquota and gquota inodes
1492 if (xfs_has_quota(mp)) {
1493 if (XFS_IS_UQUOTA_ON(mp) &&
1494 mp->m_sb.sb_uquotino != NULLFSINO) {
1495 ASSERT(mp->m_sb.sb_uquotino > 0);
1496 error = xfs_iget(mp, NULL, mp->m_sb.sb_uquotino,
1501 if (XFS_IS_GQUOTA_ON(mp) &&
1502 mp->m_sb.sb_gquotino != NULLFSINO) {
1503 ASSERT(mp->m_sb.sb_gquotino > 0);
1504 error = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino,
1509 if (XFS_IS_PQUOTA_ON(mp) &&
1510 mp->m_sb.sb_pquotino != NULLFSINO) {
1511 ASSERT(mp->m_sb.sb_pquotino > 0);
1512 error = xfs_iget(mp, NULL, mp->m_sb.sb_pquotino,
1518 flags |= XFS_QMOPT_SBVERSION;
1522 * Create the three inodes, if they don't exist already. The changes
1523 * made above will get added to a transaction and logged in one of
1524 * the qino_alloc calls below. If the device is readonly,
1525 * temporarily switch to read-write to do this.
1527 if (XFS_IS_UQUOTA_ON(mp) && uip == NULL) {
1528 error = xfs_qm_qino_alloc(mp, &uip,
1529 flags | XFS_QMOPT_UQUOTA);
1533 flags &= ~XFS_QMOPT_SBVERSION;
1535 if (XFS_IS_GQUOTA_ON(mp) && gip == NULL) {
1536 error = xfs_qm_qino_alloc(mp, &gip,
1537 flags | XFS_QMOPT_GQUOTA);
1541 flags &= ~XFS_QMOPT_SBVERSION;
1543 if (XFS_IS_PQUOTA_ON(mp) && pip == NULL) {
1544 error = xfs_qm_qino_alloc(mp, &pip,
1545 flags | XFS_QMOPT_PQUOTA);
1550 mp->m_quotainfo->qi_uquotaip = uip;
1551 mp->m_quotainfo->qi_gquotaip = gip;
1552 mp->m_quotainfo->qi_pquotaip = pip;
1567 xfs_qm_destroy_quotainos(
1568 struct xfs_quotainfo *qi)
1570 if (qi->qi_uquotaip) {
1571 xfs_irele(qi->qi_uquotaip);
1572 qi->qi_uquotaip = NULL; /* paranoia */
1574 if (qi->qi_gquotaip) {
1575 xfs_irele(qi->qi_gquotaip);
1576 qi->qi_gquotaip = NULL;
1578 if (qi->qi_pquotaip) {
1579 xfs_irele(qi->qi_pquotaip);
1580 qi->qi_pquotaip = NULL;
1586 struct xfs_dquot *dqp)
1588 struct xfs_mount *mp = dqp->q_mount;
1589 struct xfs_quotainfo *qi = mp->m_quotainfo;
1591 mutex_lock(&qi->qi_tree_lock);
1592 radix_tree_delete(xfs_dquot_tree(qi, xfs_dquot_type(dqp)), dqp->q_id);
1595 mutex_unlock(&qi->qi_tree_lock);
1597 xfs_qm_dqdestroy(dqp);
1600 /* --------------- utility functions for vnodeops ---------------- */
1604 * Given an inode, a uid, gid and prid make sure that we have
1605 * allocated relevant dquot(s) on disk, and that we won't exceed inode
1606 * quotas by creating this file.
1607 * This also attaches dquot(s) to the given inode after locking it,
1608 * and returns the dquots corresponding to the uid and/or gid.
1610 * in : inode (unlocked)
1611 * out : udquot, gdquot with references taken and unlocked
1615 struct xfs_inode *ip,
1620 struct xfs_dquot **O_udqpp,
1621 struct xfs_dquot **O_gdqpp,
1622 struct xfs_dquot **O_pdqpp)
1624 struct xfs_mount *mp = ip->i_mount;
1625 struct inode *inode = VFS_I(ip);
1626 struct user_namespace *user_ns = inode->i_sb->s_user_ns;
1627 struct xfs_dquot *uq = NULL;
1628 struct xfs_dquot *gq = NULL;
1629 struct xfs_dquot *pq = NULL;
1633 if (!XFS_IS_QUOTA_ON(mp))
1636 lockflags = XFS_ILOCK_EXCL;
1637 xfs_ilock(ip, lockflags);
1639 if ((flags & XFS_QMOPT_INHERIT) && XFS_INHERIT_GID(ip))
1643 * Attach the dquot(s) to this inode, doing a dquot allocation
1644 * if necessary. The dquot(s) will not be locked.
1646 if (XFS_NOT_DQATTACHED(mp, ip)) {
1647 error = xfs_qm_dqattach_locked(ip, true);
1649 xfs_iunlock(ip, lockflags);
1654 if ((flags & XFS_QMOPT_UQUOTA) && XFS_IS_UQUOTA_ON(mp)) {
1656 if (!uid_eq(inode->i_uid, uid)) {
1658 * What we need is the dquot that has this uid, and
1659 * if we send the inode to dqget, the uid of the inode
1660 * takes priority over what's sent in the uid argument.
1661 * We must unlock inode here before calling dqget if
1662 * we're not sending the inode, because otherwise
1663 * we'll deadlock by doing trans_reserve while
1666 xfs_iunlock(ip, lockflags);
1667 error = xfs_qm_dqget(mp, from_kuid(user_ns, uid),
1668 XFS_DQTYPE_USER, true, &uq);
1670 ASSERT(error != -ENOENT);
1674 * Get the ilock in the right order.
1677 lockflags = XFS_ILOCK_SHARED;
1678 xfs_ilock(ip, lockflags);
1681 * Take an extra reference, because we'll return
1684 ASSERT(ip->i_udquot);
1685 uq = xfs_qm_dqhold(ip->i_udquot);
1688 if ((flags & XFS_QMOPT_GQUOTA) && XFS_IS_GQUOTA_ON(mp)) {
1690 if (!gid_eq(inode->i_gid, gid)) {
1691 xfs_iunlock(ip, lockflags);
1692 error = xfs_qm_dqget(mp, from_kgid(user_ns, gid),
1693 XFS_DQTYPE_GROUP, true, &gq);
1695 ASSERT(error != -ENOENT);
1699 lockflags = XFS_ILOCK_SHARED;
1700 xfs_ilock(ip, lockflags);
1702 ASSERT(ip->i_gdquot);
1703 gq = xfs_qm_dqhold(ip->i_gdquot);
1706 if ((flags & XFS_QMOPT_PQUOTA) && XFS_IS_PQUOTA_ON(mp)) {
1708 if (ip->i_projid != prid) {
1709 xfs_iunlock(ip, lockflags);
1710 error = xfs_qm_dqget(mp, prid,
1711 XFS_DQTYPE_PROJ, true, &pq);
1713 ASSERT(error != -ENOENT);
1717 lockflags = XFS_ILOCK_SHARED;
1718 xfs_ilock(ip, lockflags);
1720 ASSERT(ip->i_pdquot);
1721 pq = xfs_qm_dqhold(ip->i_pdquot);
1724 trace_xfs_dquot_dqalloc(ip);
1726 xfs_iunlock(ip, lockflags);
1748 * Actually transfer ownership, and do dquot modifications.
1749 * These were already reserved.
1753 struct xfs_trans *tp,
1754 struct xfs_inode *ip,
1755 struct xfs_dquot **IO_olddq,
1756 struct xfs_dquot *newdq)
1758 struct xfs_dquot *prevdq;
1759 uint bfield = XFS_IS_REALTIME_INODE(ip) ?
1760 XFS_TRANS_DQ_RTBCOUNT : XFS_TRANS_DQ_BCOUNT;
1763 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1764 ASSERT(XFS_IS_QUOTA_ON(ip->i_mount));
1769 ASSERT(prevdq != newdq);
1771 xfs_trans_mod_dquot(tp, prevdq, bfield, -(ip->i_nblocks));
1772 xfs_trans_mod_dquot(tp, prevdq, XFS_TRANS_DQ_ICOUNT, -1);
1774 /* the sparkling new dquot */
1775 xfs_trans_mod_dquot(tp, newdq, bfield, ip->i_nblocks);
1776 xfs_trans_mod_dquot(tp, newdq, XFS_TRANS_DQ_ICOUNT, 1);
1779 * Back when we made quota reservations for the chown, we reserved the
1780 * ondisk blocks + delalloc blocks with the new dquot. Now that we've
1781 * switched the dquots, decrease the new dquot's block reservation
1782 * (having already bumped up the real counter) so that we don't have
1783 * any reservation to give back when we commit.
1785 xfs_trans_mod_dquot(tp, newdq, XFS_TRANS_DQ_RES_BLKS,
1786 -ip->i_delayed_blks);
1789 * Give the incore reservation for delalloc blocks back to the old
1790 * dquot. We don't normally handle delalloc quota reservations
1791 * transactionally, so just lock the dquot and subtract from the
1792 * reservation. Dirty the transaction because it's too late to turn
1795 tp->t_flags |= XFS_TRANS_DIRTY;
1797 ASSERT(prevdq->q_blk.reserved >= ip->i_delayed_blks);
1798 prevdq->q_blk.reserved -= ip->i_delayed_blks;
1799 xfs_dqunlock(prevdq);
1802 * Take an extra reference, because the inode is going to keep
1803 * this dquot pointer even after the trans_commit.
1805 *IO_olddq = xfs_qm_dqhold(newdq);
1811 xfs_qm_vop_rename_dqattach(
1812 struct xfs_inode **i_tab)
1814 struct xfs_mount *mp = i_tab[0]->i_mount;
1817 if (!XFS_IS_QUOTA_ON(mp))
1820 for (i = 0; (i < 4 && i_tab[i]); i++) {
1821 struct xfs_inode *ip = i_tab[i];
1825 * Watch out for duplicate entries in the table.
1827 if (i == 0 || ip != i_tab[i-1]) {
1828 if (XFS_NOT_DQATTACHED(mp, ip)) {
1829 error = xfs_qm_dqattach(ip);
1839 xfs_qm_vop_create_dqattach(
1840 struct xfs_trans *tp,
1841 struct xfs_inode *ip,
1842 struct xfs_dquot *udqp,
1843 struct xfs_dquot *gdqp,
1844 struct xfs_dquot *pdqp)
1846 struct xfs_mount *mp = tp->t_mountp;
1848 if (!XFS_IS_QUOTA_ON(mp))
1851 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1853 if (udqp && XFS_IS_UQUOTA_ON(mp)) {
1854 ASSERT(ip->i_udquot == NULL);
1855 ASSERT(i_uid_read(VFS_I(ip)) == udqp->q_id);
1857 ip->i_udquot = xfs_qm_dqhold(udqp);
1858 xfs_trans_mod_dquot(tp, udqp, XFS_TRANS_DQ_ICOUNT, 1);
1860 if (gdqp && XFS_IS_GQUOTA_ON(mp)) {
1861 ASSERT(ip->i_gdquot == NULL);
1862 ASSERT(i_gid_read(VFS_I(ip)) == gdqp->q_id);
1864 ip->i_gdquot = xfs_qm_dqhold(gdqp);
1865 xfs_trans_mod_dquot(tp, gdqp, XFS_TRANS_DQ_ICOUNT, 1);
1867 if (pdqp && XFS_IS_PQUOTA_ON(mp)) {
1868 ASSERT(ip->i_pdquot == NULL);
1869 ASSERT(ip->i_projid == pdqp->q_id);
1871 ip->i_pdquot = xfs_qm_dqhold(pdqp);
1872 xfs_trans_mod_dquot(tp, pdqp, XFS_TRANS_DQ_ICOUNT, 1);
1876 /* Decide if this inode's dquot is near an enforcement boundary. */
1878 xfs_inode_near_dquot_enforcement(
1879 struct xfs_inode *ip,
1882 struct xfs_dquot *dqp;
1885 /* We only care for quotas that are enabled and enforced. */
1886 dqp = xfs_inode_dquot(ip, type);
1887 if (!dqp || !xfs_dquot_is_enforced(dqp))
1890 if (xfs_dquot_res_over_limits(&dqp->q_ino) ||
1891 xfs_dquot_res_over_limits(&dqp->q_rtb))
1894 /* For space on the data device, check the various thresholds. */
1895 if (!dqp->q_prealloc_hi_wmark)
1898 if (dqp->q_blk.reserved < dqp->q_prealloc_lo_wmark)
1901 if (dqp->q_blk.reserved >= dqp->q_prealloc_hi_wmark)
1904 freesp = dqp->q_prealloc_hi_wmark - dqp->q_blk.reserved;
1905 if (freesp < dqp->q_low_space[XFS_QLOWSP_5_PCNT])