1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2000-2002 Silicon Graphics, Inc.
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_mount.h"
13 #include "xfs_inode.h"
14 #include "xfs_trans.h"
15 #include "xfs_trans_priv.h"
16 #include "xfs_quota.h"
18 #include "xfs_trace.h"
20 STATIC void xfs_trans_alloc_dqinfo(xfs_trans_t *);
23 * Add the locked dquot to the transaction.
24 * The dquot must be locked, and it cannot be associated with any
30 struct xfs_dquot *dqp)
32 ASSERT(XFS_DQ_IS_LOCKED(dqp));
33 ASSERT(dqp->q_logitem.qli_dquot == dqp);
36 * Get a log_item_desc to point at the new item.
38 xfs_trans_add_item(tp, &dqp->q_logitem.qli_item);
42 * This is called to mark the dquot as needing
43 * to be logged when the transaction is committed. The dquot must
44 * already be associated with the given transaction.
45 * Note that it marks the entire transaction as dirty. In the ordinary
46 * case, this gets called via xfs_trans_commit, after the transaction
47 * is already dirty. However, there's nothing stop this from getting
48 * called directly, as done by xfs_qm_scall_setqlim. Hence, the TRANS_DIRTY
54 struct xfs_dquot *dqp)
56 ASSERT(XFS_DQ_IS_LOCKED(dqp));
58 tp->t_flags |= XFS_TRANS_DIRTY;
59 set_bit(XFS_LI_DIRTY, &dqp->q_logitem.qli_item.li_flags);
63 * Carry forward whatever is left of the quota blk reservation to
64 * the spanky new transaction
68 struct xfs_trans *otp,
69 struct xfs_trans *ntp)
71 struct xfs_dqtrx *oq, *nq;
73 struct xfs_dqtrx *oqa, *nqa;
74 uint64_t blk_res_used;
79 xfs_trans_alloc_dqinfo(ntp);
82 * Because the quota blk reservation is carried forward,
83 * it is also necessary to carry forward the DQ_DIRTY flag.
85 if (otp->t_flags & XFS_TRANS_DQ_DIRTY)
86 ntp->t_flags |= XFS_TRANS_DQ_DIRTY;
88 for (j = 0; j < XFS_QM_TRANS_DQTYPES; j++) {
89 oqa = otp->t_dqinfo->dqs[j];
90 nqa = ntp->t_dqinfo->dqs[j];
91 for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) {
94 if (oqa[i].qt_dquot == NULL)
99 if (oq->qt_blk_res && oq->qt_bcount_delta > 0)
100 blk_res_used = oq->qt_bcount_delta;
102 nq->qt_dquot = oq->qt_dquot;
103 nq->qt_bcount_delta = nq->qt_icount_delta = 0;
104 nq->qt_rtbcount_delta = 0;
107 * Transfer whatever is left of the reservations.
109 nq->qt_blk_res = oq->qt_blk_res - blk_res_used;
110 oq->qt_blk_res = blk_res_used;
112 nq->qt_rtblk_res = oq->qt_rtblk_res -
113 oq->qt_rtblk_res_used;
114 oq->qt_rtblk_res = oq->qt_rtblk_res_used;
116 nq->qt_ino_res = oq->qt_ino_res - oq->qt_ino_res_used;
117 oq->qt_ino_res = oq->qt_ino_res_used;
124 * Wrap around mod_dquot to account for both user and group quotas.
127 xfs_trans_mod_dquot_byino(
133 xfs_mount_t *mp = tp->t_mountp;
135 if (!XFS_IS_QUOTA_RUNNING(mp) ||
136 !XFS_IS_QUOTA_ON(mp) ||
137 xfs_is_quota_inode(&mp->m_sb, ip->i_ino))
140 if (tp->t_dqinfo == NULL)
141 xfs_trans_alloc_dqinfo(tp);
143 if (XFS_IS_UQUOTA_ON(mp) && ip->i_udquot)
144 (void) xfs_trans_mod_dquot(tp, ip->i_udquot, field, delta);
145 if (XFS_IS_GQUOTA_ON(mp) && ip->i_gdquot)
146 (void) xfs_trans_mod_dquot(tp, ip->i_gdquot, field, delta);
147 if (XFS_IS_PQUOTA_ON(mp) && ip->i_pdquot)
148 (void) xfs_trans_mod_dquot(tp, ip->i_pdquot, field, delta);
151 STATIC struct xfs_dqtrx *
153 struct xfs_trans *tp,
154 struct xfs_dquot *dqp)
157 struct xfs_dqtrx *qa;
159 switch (xfs_dquot_type(dqp)) {
160 case XFS_DQTYPE_USER:
161 qa = tp->t_dqinfo->dqs[XFS_QM_TRANS_USR];
163 case XFS_DQTYPE_GROUP:
164 qa = tp->t_dqinfo->dqs[XFS_QM_TRANS_GRP];
166 case XFS_DQTYPE_PROJ:
167 qa = tp->t_dqinfo->dqs[XFS_QM_TRANS_PRJ];
173 for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) {
174 if (qa[i].qt_dquot == NULL ||
175 qa[i].qt_dquot == dqp)
183 * Make the changes in the transaction structure.
184 * The moral equivalent to xfs_trans_mod_sb().
185 * We don't touch any fields in the dquot, so we don't care
186 * if it's locked or not (most of the time it won't be).
190 struct xfs_trans *tp,
191 struct xfs_dquot *dqp,
195 struct xfs_dqtrx *qtrx;
198 ASSERT(XFS_IS_QUOTA_RUNNING(tp->t_mountp));
201 if (tp->t_dqinfo == NULL)
202 xfs_trans_alloc_dqinfo(tp);
204 * Find either the first free slot or the slot that belongs
207 qtrx = xfs_trans_get_dqtrx(tp, dqp);
209 if (qtrx->qt_dquot == NULL)
210 qtrx->qt_dquot = dqp;
213 trace_xfs_trans_mod_dquot_before(qtrx);
214 trace_xfs_trans_mod_dquot(tp, dqp, field, delta);
220 * regular disk blk reservation
222 case XFS_TRANS_DQ_RES_BLKS:
223 qtrx->qt_blk_res += delta;
229 case XFS_TRANS_DQ_RES_INOS:
230 qtrx->qt_ino_res += delta;
236 case XFS_TRANS_DQ_BCOUNT:
237 qtrx->qt_bcount_delta += delta;
240 case XFS_TRANS_DQ_DELBCOUNT:
241 qtrx->qt_delbcnt_delta += delta;
247 case XFS_TRANS_DQ_ICOUNT:
248 if (qtrx->qt_ino_res && delta > 0) {
249 qtrx->qt_ino_res_used += delta;
250 ASSERT(qtrx->qt_ino_res >= qtrx->qt_ino_res_used);
252 qtrx->qt_icount_delta += delta;
258 case XFS_TRANS_DQ_RES_RTBLKS:
259 qtrx->qt_rtblk_res += delta;
265 case XFS_TRANS_DQ_RTBCOUNT:
266 if (qtrx->qt_rtblk_res && delta > 0) {
267 qtrx->qt_rtblk_res_used += delta;
268 ASSERT(qtrx->qt_rtblk_res >= qtrx->qt_rtblk_res_used);
270 qtrx->qt_rtbcount_delta += delta;
273 case XFS_TRANS_DQ_DELRTBCOUNT:
274 qtrx->qt_delrtb_delta += delta;
282 trace_xfs_trans_mod_dquot_after(qtrx);
284 tp->t_flags |= XFS_TRANS_DQ_DIRTY;
289 * Given an array of dqtrx structures, lock all the dquots associated and join
290 * them to the transaction, provided they have been modified. We know that the
291 * highest number of dquots of one type - usr, grp and prj - involved in a
292 * transaction is 3 so we don't need to make this very generic.
295 xfs_trans_dqlockedjoin(
296 struct xfs_trans *tp,
299 ASSERT(q[0].qt_dquot != NULL);
300 if (q[1].qt_dquot == NULL) {
301 xfs_dqlock(q[0].qt_dquot);
302 xfs_trans_dqjoin(tp, q[0].qt_dquot);
304 ASSERT(XFS_QM_TRANS_MAXDQS == 2);
305 xfs_dqlock2(q[0].qt_dquot, q[1].qt_dquot);
306 xfs_trans_dqjoin(tp, q[0].qt_dquot);
307 xfs_trans_dqjoin(tp, q[1].qt_dquot);
311 /* Apply dqtrx changes to the quota reservation counters. */
313 xfs_apply_quota_reservation_deltas(
314 struct xfs_dquot_res *res,
321 * Subtle math here: If reserved > res_used (the normal case),
322 * we're simply subtracting the unused transaction quota
323 * reservation from the dquot reservation.
325 * If, however, res_used > reserved, then we have allocated
326 * more quota blocks than were reserved for the transaction.
327 * We must add that excess to the dquot reservation since it
328 * tracks (usage + resv) and by definition we didn't reserve
331 res->reserved -= abs(reserved - res_used);
332 } else if (count_delta != 0) {
334 * These blks were never reserved, either inside a transaction
335 * or outside one (in a delayed allocation). Also, this isn't
336 * always a negative number since we sometimes deliberately
337 * skip quota reservations.
339 res->reserved += count_delta;
344 * Called by xfs_trans_commit() and similar in spirit to
345 * xfs_trans_apply_sb_deltas().
346 * Go thru all the dquots belonging to this transaction and modify the
347 * INCORE dquot to reflect the actual usages.
348 * Unreserve just the reservations done by this transaction.
349 * dquot is still left locked at exit.
352 xfs_trans_apply_dquot_deltas(
353 struct xfs_trans *tp)
356 struct xfs_dquot *dqp;
357 struct xfs_dqtrx *qtrx, *qa;
359 int64_t totalrtbdelta;
361 if (!(tp->t_flags & XFS_TRANS_DQ_DIRTY))
364 ASSERT(tp->t_dqinfo);
365 for (j = 0; j < XFS_QM_TRANS_DQTYPES; j++) {
366 qa = tp->t_dqinfo->dqs[j];
367 if (qa[0].qt_dquot == NULL)
371 * Lock all of the dquots and join them to the transaction.
373 xfs_trans_dqlockedjoin(tp, qa);
375 for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) {
376 uint64_t blk_res_used;
380 * The array of dquots is filled
381 * sequentially, not sparsely.
383 if ((dqp = qtrx->qt_dquot) == NULL)
386 ASSERT(XFS_DQ_IS_LOCKED(dqp));
389 * adjust the actual number of blocks used
393 * The issue here is - sometimes we don't make a blkquota
394 * reservation intentionally to be fair to users
395 * (when the amount is small). On the other hand,
396 * delayed allocs do make reservations, but that's
397 * outside of a transaction, so we have no
398 * idea how much was really reserved.
399 * So, here we've accumulated delayed allocation blks and
400 * non-delay blks. The assumption is that the
401 * delayed ones are always reserved (outside of a
402 * transaction), and the others may or may not have
403 * quota reservations.
405 totalbdelta = qtrx->qt_bcount_delta +
406 qtrx->qt_delbcnt_delta;
407 totalrtbdelta = qtrx->qt_rtbcount_delta +
408 qtrx->qt_delrtb_delta;
410 if (totalbdelta != 0 || totalrtbdelta != 0 ||
411 qtrx->qt_icount_delta != 0) {
412 trace_xfs_trans_apply_dquot_deltas_before(dqp);
413 trace_xfs_trans_apply_dquot_deltas(qtrx);
418 ASSERT(dqp->q_blk.count >= -totalbdelta);
420 if (totalrtbdelta < 0)
421 ASSERT(dqp->q_rtb.count >= -totalrtbdelta);
423 if (qtrx->qt_icount_delta < 0)
424 ASSERT(dqp->q_ino.count >= -qtrx->qt_icount_delta);
427 dqp->q_blk.count += totalbdelta;
429 if (qtrx->qt_icount_delta)
430 dqp->q_ino.count += qtrx->qt_icount_delta;
433 dqp->q_rtb.count += totalrtbdelta;
435 if (totalbdelta != 0 || totalrtbdelta != 0 ||
436 qtrx->qt_icount_delta != 0)
437 trace_xfs_trans_apply_dquot_deltas_after(dqp);
440 * Get any default limits in use.
441 * Start/reset the timer(s) if needed.
444 xfs_qm_adjust_dqlimits(dqp);
445 xfs_qm_adjust_dqtimers(dqp);
448 dqp->q_flags |= XFS_DQFLAG_DIRTY;
450 * add this to the list of items to get logged
452 xfs_trans_log_dquot(tp, dqp);
454 * Take off what's left of the original reservation.
455 * In case of delayed allocations, there's no
456 * reservation that a transaction structure knows of.
458 blk_res_used = max_t(int64_t, 0, qtrx->qt_bcount_delta);
459 xfs_apply_quota_reservation_deltas(&dqp->q_blk,
460 qtrx->qt_blk_res, blk_res_used,
461 qtrx->qt_bcount_delta);
464 * Adjust the RT reservation.
466 xfs_apply_quota_reservation_deltas(&dqp->q_rtb,
468 qtrx->qt_rtblk_res_used,
469 qtrx->qt_rtbcount_delta);
472 * Adjust the inode reservation.
474 ASSERT(qtrx->qt_ino_res >= qtrx->qt_ino_res_used);
475 xfs_apply_quota_reservation_deltas(&dqp->q_ino,
477 qtrx->qt_ino_res_used,
478 qtrx->qt_icount_delta);
480 ASSERT(dqp->q_blk.reserved >= dqp->q_blk.count);
481 ASSERT(dqp->q_ino.reserved >= dqp->q_ino.count);
482 ASSERT(dqp->q_rtb.reserved >= dqp->q_rtb.count);
488 * Release the reservations, and adjust the dquots accordingly.
489 * This is called only when the transaction is being aborted. If by
490 * any chance we have done dquot modifications incore (ie. deltas) already,
491 * we simply throw those away, since that's the expected behavior
492 * when a transaction is curtailed without a commit.
495 xfs_trans_unreserve_and_mod_dquots(
496 struct xfs_trans *tp)
499 struct xfs_dquot *dqp;
500 struct xfs_dqtrx *qtrx, *qa;
503 if (!tp->t_dqinfo || !(tp->t_flags & XFS_TRANS_DQ_DIRTY))
506 for (j = 0; j < XFS_QM_TRANS_DQTYPES; j++) {
507 qa = tp->t_dqinfo->dqs[j];
509 for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) {
512 * We assume that the array of dquots is filled
513 * sequentially, not sparsely.
515 if ((dqp = qtrx->qt_dquot) == NULL)
518 * Unreserve the original reservation. We don't care
519 * about the number of blocks used field, or deltas.
520 * Also we don't bother to zero the fields.
523 if (qtrx->qt_blk_res) {
526 dqp->q_blk.reserved -=
527 (xfs_qcnt_t)qtrx->qt_blk_res;
529 if (qtrx->qt_ino_res) {
534 dqp->q_ino.reserved -=
535 (xfs_qcnt_t)qtrx->qt_ino_res;
538 if (qtrx->qt_rtblk_res) {
543 dqp->q_rtb.reserved -=
544 (xfs_qcnt_t)qtrx->qt_rtblk_res;
555 struct xfs_mount *mp,
556 struct xfs_dquot *dqp,
559 enum quota_type qtype;
561 switch (xfs_dquot_type(dqp)) {
562 case XFS_DQTYPE_PROJ:
565 case XFS_DQTYPE_USER:
568 case XFS_DQTYPE_GROUP:
575 quota_send_warning(make_kqid(&init_user_ns, qtype, dqp->q_id),
576 mp->m_super->s_dev, type);
580 * Decide if we can make an additional reservation against a quota resource.
581 * Returns an inode QUOTA_NL_ warning code and whether or not it's fatal.
583 * Note that we assume that the numeric difference between the inode and block
584 * warning codes will always be 3 since it's userspace ABI now, and will never
585 * decrease the quota reservation, so the *BELOW messages are irrelevant.
589 struct xfs_dquot_res *res,
590 struct xfs_quota_limits *qlim,
594 xfs_qcnt_t hardlimit = res->hardlimit;
595 xfs_qcnt_t softlimit = res->softlimit;
596 xfs_qcnt_t total_count = res->reserved + delta;
598 BUILD_BUG_ON(QUOTA_NL_BHARDWARN != QUOTA_NL_IHARDWARN + 3);
599 BUILD_BUG_ON(QUOTA_NL_BSOFTLONGWARN != QUOTA_NL_ISOFTLONGWARN + 3);
600 BUILD_BUG_ON(QUOTA_NL_BSOFTWARN != QUOTA_NL_ISOFTWARN + 3);
604 return QUOTA_NL_NOWARN;
607 hardlimit = qlim->hard;
609 softlimit = qlim->soft;
611 if (hardlimit && total_count > hardlimit) {
613 return QUOTA_NL_IHARDWARN;
616 if (softlimit && total_count > softlimit) {
617 time64_t now = ktime_get_real_seconds();
619 if ((res->timer != 0 && now > res->timer) ||
620 (res->warnings != 0 && res->warnings >= qlim->warn)) {
622 return QUOTA_NL_ISOFTLONGWARN;
626 return QUOTA_NL_ISOFTWARN;
629 return QUOTA_NL_NOWARN;
633 * This reserves disk blocks and inodes against a dquot.
634 * Flags indicate if the dquot is to be locked here and also
635 * if the blk reservation is for RT or regular blocks.
636 * Sending in XFS_QMOPT_FORCE_RES flag skips the quota check.
640 struct xfs_trans *tp,
641 struct xfs_mount *mp,
642 struct xfs_dquot *dqp,
647 struct xfs_quotainfo *q = mp->m_quotainfo;
648 struct xfs_def_quota *defq;
649 struct xfs_dquot_res *blkres;
650 struct xfs_quota_limits *qlim;
654 defq = xfs_get_defquota(q, xfs_dquot_type(dqp));
656 if (flags & XFS_TRANS_DQ_RES_BLKS) {
657 blkres = &dqp->q_blk;
660 blkres = &dqp->q_rtb;
664 if ((flags & XFS_QMOPT_FORCE_RES) == 0 && dqp->q_id &&
665 xfs_dquot_is_enforced(dqp)) {
670 * dquot is locked already. See if we'd go over the hardlimit
671 * or exceed the timelimit if we'd reserve resources.
673 quota_nl = xfs_dqresv_check(blkres, qlim, nblks, &fatal);
674 if (quota_nl != QUOTA_NL_NOWARN) {
676 * Quota block warning codes are 3 more than the inode
677 * codes, which we check above.
679 xfs_quota_warn(mp, dqp, quota_nl + 3);
684 quota_nl = xfs_dqresv_check(&dqp->q_ino, &defq->ino, ninos,
686 if (quota_nl != QUOTA_NL_NOWARN) {
687 xfs_quota_warn(mp, dqp, quota_nl);
694 * Change the reservation, but not the actual usage.
695 * Note that q_blk.reserved = q_blk.count + resv
697 blkres->reserved += (xfs_qcnt_t)nblks;
698 dqp->q_ino.reserved += (xfs_qcnt_t)ninos;
701 * note the reservation amt in the trans struct too,
702 * so that the transaction knows how much was reserved by
703 * it against this particular dquot.
704 * We don't do this when we are reserving for a delayed allocation,
705 * because we don't have the luxury of a transaction envelope then.
708 ASSERT(tp->t_dqinfo);
709 ASSERT(flags & XFS_QMOPT_RESBLK_MASK);
711 xfs_trans_mod_dquot(tp, dqp,
712 flags & XFS_QMOPT_RESBLK_MASK,
715 xfs_trans_mod_dquot(tp, dqp,
716 XFS_TRANS_DQ_RES_INOS,
719 ASSERT(dqp->q_blk.reserved >= dqp->q_blk.count);
720 ASSERT(dqp->q_rtb.reserved >= dqp->q_rtb.count);
721 ASSERT(dqp->q_ino.reserved >= dqp->q_ino.count);
728 if (xfs_dquot_type(dqp) == XFS_DQTYPE_PROJ)
735 * Given dquot(s), make disk block and/or inode reservations against them.
736 * The fact that this does the reservation against user, group and
737 * project quotas is important, because this follows a all-or-nothing
740 * flags = XFS_QMOPT_FORCE_RES evades limit enforcement. Used by chown.
741 * XFS_QMOPT_ENOSPC returns ENOSPC not EDQUOT. Used by pquota.
742 * XFS_TRANS_DQ_RES_BLKS reserves regular disk blocks
743 * XFS_TRANS_DQ_RES_RTBLKS reserves realtime disk blocks
744 * dquots are unlocked on return, if they were not locked by caller.
747 xfs_trans_reserve_quota_bydquots(
748 struct xfs_trans *tp,
749 struct xfs_mount *mp,
750 struct xfs_dquot *udqp,
751 struct xfs_dquot *gdqp,
752 struct xfs_dquot *pdqp,
759 if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
762 if (tp && tp->t_dqinfo == NULL)
763 xfs_trans_alloc_dqinfo(tp);
765 ASSERT(flags & XFS_QMOPT_RESBLK_MASK);
768 error = xfs_trans_dqresv(tp, mp, udqp, nblks, ninos, flags);
774 error = xfs_trans_dqresv(tp, mp, gdqp, nblks, ninos, flags);
780 error = xfs_trans_dqresv(tp, mp, pdqp, nblks, ninos, flags);
786 * Didn't change anything critical, so, no need to log
791 flags |= XFS_QMOPT_FORCE_RES;
793 xfs_trans_dqresv(tp, mp, gdqp, -nblks, -ninos, flags);
795 flags |= XFS_QMOPT_FORCE_RES;
797 xfs_trans_dqresv(tp, mp, udqp, -nblks, -ninos, flags);
803 * Lock the dquot and change the reservation if we can.
804 * This doesn't change the actual usage, just the reservation.
805 * The inode sent in is locked.
808 xfs_trans_reserve_quota_nblks(
809 struct xfs_trans *tp,
810 struct xfs_inode *ip,
815 struct xfs_mount *mp = ip->i_mount;
817 if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
820 ASSERT(!xfs_is_quota_inode(&mp->m_sb, ip->i_ino));
822 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
823 ASSERT((flags & ~(XFS_QMOPT_FORCE_RES)) == XFS_TRANS_DQ_RES_RTBLKS ||
824 (flags & ~(XFS_QMOPT_FORCE_RES)) == XFS_TRANS_DQ_RES_BLKS);
827 * Reserve nblks against these dquots, with trans as the mediator.
829 return xfs_trans_reserve_quota_bydquots(tp, mp,
830 ip->i_udquot, ip->i_gdquot,
832 nblks, ninos, flags);
836 * This routine is called to allocate a quotaoff log item.
838 struct xfs_qoff_logitem *
839 xfs_trans_get_qoff_item(
840 struct xfs_trans *tp,
841 struct xfs_qoff_logitem *startqoff,
844 struct xfs_qoff_logitem *q;
848 q = xfs_qm_qoff_logitem_init(tp->t_mountp, startqoff, flags);
852 * Get a log_item_desc to point at the new item.
854 xfs_trans_add_item(tp, &q->qql_item);
860 * This is called to mark the quotaoff logitem as needing
861 * to be logged when the transaction is committed. The logitem must
862 * already be associated with the given transaction.
865 xfs_trans_log_quotaoff_item(
866 struct xfs_trans *tp,
867 struct xfs_qoff_logitem *qlp)
869 tp->t_flags |= XFS_TRANS_DIRTY;
870 set_bit(XFS_LI_DIRTY, &qlp->qql_item.li_flags);
874 xfs_trans_alloc_dqinfo(
877 tp->t_dqinfo = kmem_cache_zalloc(xfs_qm_dqtrxzone,
878 GFP_KERNEL | __GFP_NOFAIL);
882 xfs_trans_free_dqinfo(
887 kmem_cache_free(xfs_qm_dqtrxzone, tp->t_dqinfo);