2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19 #include <linux/capability.h>
23 #include "xfs_shared.h"
24 #include "xfs_format.h"
25 #include "xfs_log_format.h"
26 #include "xfs_trans_resv.h"
29 #include "xfs_mount.h"
30 #include "xfs_inode.h"
31 #include "xfs_trans.h"
32 #include "xfs_error.h"
33 #include "xfs_quota.h"
35 #include "xfs_trace.h"
36 #include "xfs_icache.h"
38 STATIC int xfs_qm_log_quotaoff(xfs_mount_t *, xfs_qoff_logitem_t **, uint);
39 STATIC int xfs_qm_log_quotaoff_end(xfs_mount_t *, xfs_qoff_logitem_t *,
43 * Turn off quota accounting and/or enforcement for all udquots and/or
44 * gdquots. Called only at unmount time.
46 * This assumes that there are no dquots of this file system cached
47 * incore, and modifies the ondisk dquot directly. Therefore, for example,
48 * it is an error to call this twice, without purging the cache.
51 xfs_qm_scall_quotaoff(
55 struct xfs_quotainfo *q = mp->m_quotainfo;
58 uint inactivate_flags;
59 xfs_qoff_logitem_t *qoffstart;
62 * No file system can have quotas enabled on disk but not in core.
63 * Note that quota utilities (like quotaoff) _expect_
64 * errno == -EEXIST here.
66 if ((mp->m_qflags & flags) == 0)
70 flags &= (XFS_ALL_QUOTA_ACCT | XFS_ALL_QUOTA_ENFD);
73 * We don't want to deal with two quotaoffs messing up each other,
74 * so we're going to serialize it. quotaoff isn't exactly a performance
76 * If quotaoff, then we must be dealing with the root filesystem.
79 mutex_lock(&q->qi_quotaofflock);
82 * If we're just turning off quota enforcement, change mp and go.
84 if ((flags & XFS_ALL_QUOTA_ACCT) == 0) {
85 mp->m_qflags &= ~(flags);
87 spin_lock(&mp->m_sb_lock);
88 mp->m_sb.sb_qflags = mp->m_qflags;
89 spin_unlock(&mp->m_sb_lock);
90 mutex_unlock(&q->qi_quotaofflock);
92 /* XXX what to do if error ? Revert back to old vals incore ? */
93 return xfs_sync_sb(mp, false);
99 * If accounting is off, we must turn enforcement off, clear the
100 * quota 'CHKD' certificate to make it known that we have to
101 * do a quotacheck the next time this quota is turned on.
103 if (flags & XFS_UQUOTA_ACCT) {
104 dqtype |= XFS_QMOPT_UQUOTA;
105 flags |= (XFS_UQUOTA_CHKD | XFS_UQUOTA_ENFD);
106 inactivate_flags |= XFS_UQUOTA_ACTIVE;
108 if (flags & XFS_GQUOTA_ACCT) {
109 dqtype |= XFS_QMOPT_GQUOTA;
110 flags |= (XFS_GQUOTA_CHKD | XFS_GQUOTA_ENFD);
111 inactivate_flags |= XFS_GQUOTA_ACTIVE;
113 if (flags & XFS_PQUOTA_ACCT) {
114 dqtype |= XFS_QMOPT_PQUOTA;
115 flags |= (XFS_PQUOTA_CHKD | XFS_PQUOTA_ENFD);
116 inactivate_flags |= XFS_PQUOTA_ACTIVE;
120 * Nothing to do? Don't complain. This happens when we're just
121 * turning off quota enforcement.
123 if ((mp->m_qflags & flags) == 0)
127 * Write the LI_QUOTAOFF log record, and do SB changes atomically,
128 * and synchronously. If we fail to write, we should abort the
129 * operation as it cannot be recovered safely if we crash.
131 error = xfs_qm_log_quotaoff(mp, &qoffstart, flags);
136 * Next we clear the XFS_MOUNT_*DQ_ACTIVE bit(s) in the mount struct
137 * to take care of the race between dqget and quotaoff. We don't take
138 * any special locks to reset these bits. All processes need to check
139 * these bits *after* taking inode lock(s) to see if the particular
140 * quota type is in the process of being turned off. If *ACTIVE, it is
141 * guaranteed that all dquot structures and all quotainode ptrs will all
142 * stay valid as long as that inode is kept locked.
144 * There is no turning back after this.
146 mp->m_qflags &= ~inactivate_flags;
149 * Give back all the dquot reference(s) held by inodes.
150 * Here we go thru every single incore inode in this file system, and
151 * do a dqrele on the i_udquot/i_gdquot that it may have.
152 * Essentially, as long as somebody has an inode locked, this guarantees
153 * that quotas will not be turned off. This is handy because in a
154 * transaction once we lock the inode(s) and check for quotaon, we can
155 * depend on the quota inodes (and other things) being valid as long as
156 * we keep the lock(s).
158 xfs_qm_dqrele_all_inodes(mp, flags);
161 * Next we make the changes in the quota flag in the mount struct.
162 * This isn't protected by a particular lock directly, because we
163 * don't want to take a mrlock every time we depend on quotas being on.
165 mp->m_qflags &= ~flags;
168 * Go through all the dquots of this file system and purge them,
169 * according to what was turned off.
171 xfs_qm_dqpurge_all(mp, dqtype);
174 * Transactions that had started before ACTIVE state bit was cleared
175 * could have logged many dquots, so they'd have higher LSNs than
176 * the first QUOTAOFF log record does. If we happen to crash when
177 * the tail of the log has gone past the QUOTAOFF record, but
178 * before the last dquot modification, those dquots __will__
179 * recover, and that's not good.
181 * So, we have QUOTAOFF start and end logitems; the start
182 * logitem won't get overwritten until the end logitem appears...
184 error = xfs_qm_log_quotaoff_end(mp, qoffstart, flags);
186 /* We're screwed now. Shutdown is the only option. */
187 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
192 * If all quotas are completely turned off, close shop.
194 if (mp->m_qflags == 0) {
195 mutex_unlock(&q->qi_quotaofflock);
196 xfs_qm_destroy_quotainfo(mp);
201 * Release our quotainode references if we don't need them anymore.
203 if ((dqtype & XFS_QMOPT_UQUOTA) && q->qi_uquotaip) {
204 IRELE(q->qi_uquotaip);
205 q->qi_uquotaip = NULL;
207 if ((dqtype & XFS_QMOPT_GQUOTA) && q->qi_gquotaip) {
208 IRELE(q->qi_gquotaip);
209 q->qi_gquotaip = NULL;
211 if ((dqtype & XFS_QMOPT_PQUOTA) && q->qi_pquotaip) {
212 IRELE(q->qi_pquotaip);
213 q->qi_pquotaip = NULL;
217 mutex_unlock(&q->qi_quotaofflock);
222 xfs_qm_scall_trunc_qfile(
223 struct xfs_mount *mp,
226 struct xfs_inode *ip;
227 struct xfs_trans *tp;
230 if (ino == NULLFSINO)
233 error = xfs_iget(mp, NULL, ino, 0, 0, &ip);
237 xfs_ilock(ip, XFS_IOLOCK_EXCL);
239 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp);
241 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
245 xfs_ilock(ip, XFS_ILOCK_EXCL);
246 xfs_trans_ijoin(tp, ip, 0);
249 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
251 error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK, 0);
253 xfs_trans_cancel(tp);
257 ASSERT(ip->i_d.di_nextents == 0);
259 xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
260 error = xfs_trans_commit(tp);
263 xfs_iunlock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
270 xfs_qm_scall_trunc_qfiles(
276 if (!xfs_sb_version_hasquota(&mp->m_sb) || flags == 0 ||
277 (flags & ~XFS_DQ_ALLTYPES)) {
278 xfs_debug(mp, "%s: flags=%x m_qflags=%x",
279 __func__, flags, mp->m_qflags);
283 if (flags & XFS_DQ_USER) {
284 error = xfs_qm_scall_trunc_qfile(mp, mp->m_sb.sb_uquotino);
288 if (flags & XFS_DQ_GROUP) {
289 error = xfs_qm_scall_trunc_qfile(mp, mp->m_sb.sb_gquotino);
293 if (flags & XFS_DQ_PROJ)
294 error = xfs_qm_scall_trunc_qfile(mp, mp->m_sb.sb_pquotino);
300 * Switch on (a given) quota enforcement for a filesystem. This takes
301 * effect immediately.
302 * (Switching on quota accounting must be done at mount time.)
305 xfs_qm_scall_quotaon(
312 flags &= (XFS_ALL_QUOTA_ACCT | XFS_ALL_QUOTA_ENFD);
314 * Switching on quota accounting must be done at mount time.
316 flags &= ~(XFS_ALL_QUOTA_ACCT);
319 xfs_debug(mp, "%s: zero flags, m_qflags=%x",
320 __func__, mp->m_qflags);
325 * Can't enforce without accounting. We check the superblock
326 * qflags here instead of m_qflags because rootfs can have
327 * quota acct on ondisk without m_qflags' knowing.
329 if (((mp->m_sb.sb_qflags & XFS_UQUOTA_ACCT) == 0 &&
330 (flags & XFS_UQUOTA_ENFD)) ||
331 ((mp->m_sb.sb_qflags & XFS_GQUOTA_ACCT) == 0 &&
332 (flags & XFS_GQUOTA_ENFD)) ||
333 ((mp->m_sb.sb_qflags & XFS_PQUOTA_ACCT) == 0 &&
334 (flags & XFS_PQUOTA_ENFD))) {
336 "%s: Can't enforce without acct, flags=%x sbflags=%x",
337 __func__, flags, mp->m_sb.sb_qflags);
341 * If everything's up to-date incore, then don't waste time.
343 if ((mp->m_qflags & flags) == flags)
347 * Change sb_qflags on disk but not incore mp->qflags
348 * if this is the root filesystem.
350 spin_lock(&mp->m_sb_lock);
351 qf = mp->m_sb.sb_qflags;
352 mp->m_sb.sb_qflags = qf | flags;
353 spin_unlock(&mp->m_sb_lock);
356 * There's nothing to change if it's the same.
358 if ((qf & flags) == flags)
361 error = xfs_sync_sb(mp, false);
365 * If we aren't trying to switch on quota enforcement, we are done.
367 if (((mp->m_sb.sb_qflags & XFS_UQUOTA_ACCT) !=
368 (mp->m_qflags & XFS_UQUOTA_ACCT)) ||
369 ((mp->m_sb.sb_qflags & XFS_PQUOTA_ACCT) !=
370 (mp->m_qflags & XFS_PQUOTA_ACCT)) ||
371 ((mp->m_sb.sb_qflags & XFS_GQUOTA_ACCT) !=
372 (mp->m_qflags & XFS_GQUOTA_ACCT)))
375 if (! XFS_IS_QUOTA_RUNNING(mp))
379 * Switch on quota enforcement in core.
381 mutex_lock(&mp->m_quotainfo->qi_quotaofflock);
382 mp->m_qflags |= (flags & XFS_ALL_QUOTA_ENFD);
383 mutex_unlock(&mp->m_quotainfo->qi_quotaofflock);
388 #define XFS_QC_MASK \
389 (QC_LIMIT_MASK | QC_TIMER_MASK | QC_WARNS_MASK)
392 * Adjust quota limits, and start/stop timers accordingly.
395 xfs_qm_scall_setqlim(
396 struct xfs_mount *mp,
399 struct qc_dqblk *newlim)
401 struct xfs_quotainfo *q = mp->m_quotainfo;
402 struct xfs_disk_dquot *ddq;
403 struct xfs_dquot *dqp;
404 struct xfs_trans *tp;
405 struct xfs_def_quota *defq;
407 xfs_qcnt_t hard, soft;
409 if (newlim->d_fieldmask & ~XFS_QC_MASK)
411 if ((newlim->d_fieldmask & XFS_QC_MASK) == 0)
415 * We don't want to race with a quotaoff so take the quotaoff lock.
416 * We don't hold an inode lock, so there's nothing else to stop
417 * a quotaoff from happening.
419 mutex_lock(&q->qi_quotaofflock);
422 * Get the dquot (locked) before we start, as we need to do a
423 * transaction to allocate it if it doesn't exist. Once we have the
424 * dquot, unlock it so we can start the next transaction safely. We hold
425 * a reference to the dquot, so it's safe to do this unlock/lock without
426 * it being reclaimed in the mean time.
428 error = xfs_qm_dqget(mp, NULL, id, type, XFS_QMOPT_DQALLOC, &dqp);
430 ASSERT(error != -ENOENT);
434 defq = xfs_get_defquota(dqp, q);
437 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_qm_setqlim, 0, 0, 0, &tp);
442 xfs_trans_dqjoin(tp, dqp);
446 * Make sure that hardlimits are >= soft limits before changing.
448 hard = (newlim->d_fieldmask & QC_SPC_HARD) ?
449 (xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_spc_hardlimit) :
450 be64_to_cpu(ddq->d_blk_hardlimit);
451 soft = (newlim->d_fieldmask & QC_SPC_SOFT) ?
452 (xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_spc_softlimit) :
453 be64_to_cpu(ddq->d_blk_softlimit);
454 if (hard == 0 || hard >= soft) {
455 ddq->d_blk_hardlimit = cpu_to_be64(hard);
456 ddq->d_blk_softlimit = cpu_to_be64(soft);
457 xfs_dquot_set_prealloc_limits(dqp);
459 defq->bhardlimit = hard;
460 defq->bsoftlimit = soft;
463 xfs_debug(mp, "blkhard %Ld < blksoft %Ld", hard, soft);
465 hard = (newlim->d_fieldmask & QC_RT_SPC_HARD) ?
466 (xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_rt_spc_hardlimit) :
467 be64_to_cpu(ddq->d_rtb_hardlimit);
468 soft = (newlim->d_fieldmask & QC_RT_SPC_SOFT) ?
469 (xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_rt_spc_softlimit) :
470 be64_to_cpu(ddq->d_rtb_softlimit);
471 if (hard == 0 || hard >= soft) {
472 ddq->d_rtb_hardlimit = cpu_to_be64(hard);
473 ddq->d_rtb_softlimit = cpu_to_be64(soft);
475 defq->rtbhardlimit = hard;
476 defq->rtbsoftlimit = soft;
479 xfs_debug(mp, "rtbhard %Ld < rtbsoft %Ld", hard, soft);
482 hard = (newlim->d_fieldmask & QC_INO_HARD) ?
483 (xfs_qcnt_t) newlim->d_ino_hardlimit :
484 be64_to_cpu(ddq->d_ino_hardlimit);
485 soft = (newlim->d_fieldmask & QC_INO_SOFT) ?
486 (xfs_qcnt_t) newlim->d_ino_softlimit :
487 be64_to_cpu(ddq->d_ino_softlimit);
488 if (hard == 0 || hard >= soft) {
489 ddq->d_ino_hardlimit = cpu_to_be64(hard);
490 ddq->d_ino_softlimit = cpu_to_be64(soft);
492 defq->ihardlimit = hard;
493 defq->isoftlimit = soft;
496 xfs_debug(mp, "ihard %Ld < isoft %Ld", hard, soft);
500 * Update warnings counter(s) if requested
502 if (newlim->d_fieldmask & QC_SPC_WARNS)
503 ddq->d_bwarns = cpu_to_be16(newlim->d_spc_warns);
504 if (newlim->d_fieldmask & QC_INO_WARNS)
505 ddq->d_iwarns = cpu_to_be16(newlim->d_ino_warns);
506 if (newlim->d_fieldmask & QC_RT_SPC_WARNS)
507 ddq->d_rtbwarns = cpu_to_be16(newlim->d_rt_spc_warns);
511 * Timelimits for the super user set the relative time
512 * the other users can be over quota for this file system.
513 * If it is zero a default is used. Ditto for the default
514 * soft and hard limit values (already done, above), and
517 if (newlim->d_fieldmask & QC_SPC_TIMER) {
518 q->qi_btimelimit = newlim->d_spc_timer;
519 ddq->d_btimer = cpu_to_be32(newlim->d_spc_timer);
521 if (newlim->d_fieldmask & QC_INO_TIMER) {
522 q->qi_itimelimit = newlim->d_ino_timer;
523 ddq->d_itimer = cpu_to_be32(newlim->d_ino_timer);
525 if (newlim->d_fieldmask & QC_RT_SPC_TIMER) {
526 q->qi_rtbtimelimit = newlim->d_rt_spc_timer;
527 ddq->d_rtbtimer = cpu_to_be32(newlim->d_rt_spc_timer);
529 if (newlim->d_fieldmask & QC_SPC_WARNS)
530 q->qi_bwarnlimit = newlim->d_spc_warns;
531 if (newlim->d_fieldmask & QC_INO_WARNS)
532 q->qi_iwarnlimit = newlim->d_ino_warns;
533 if (newlim->d_fieldmask & QC_RT_SPC_WARNS)
534 q->qi_rtbwarnlimit = newlim->d_rt_spc_warns;
537 * If the user is now over quota, start the timelimit.
538 * The user will not be 'warned'.
539 * Note that we keep the timers ticking, whether enforcement
540 * is on or off. We don't really want to bother with iterating
541 * over all ondisk dquots and turning the timers on/off.
543 xfs_qm_adjust_dqtimers(mp, ddq);
545 dqp->dq_flags |= XFS_DQ_DIRTY;
546 xfs_trans_log_dquot(tp, dqp);
548 error = xfs_trans_commit(tp);
553 mutex_unlock(&q->qi_quotaofflock);
558 xfs_qm_log_quotaoff_end(
560 xfs_qoff_logitem_t *startqoff,
565 xfs_qoff_logitem_t *qoffi;
567 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_qm_equotaoff, 0, 0, 0, &tp);
571 qoffi = xfs_trans_get_qoff_item(tp, startqoff,
572 flags & XFS_ALL_QUOTA_ACCT);
573 xfs_trans_log_quotaoff_item(tp, qoffi);
576 * We have to make sure that the transaction is secure on disk before we
577 * return and actually stop quota accounting. So, make it synchronous.
578 * We don't care about quotoff's performance.
580 xfs_trans_set_sync(tp);
581 return xfs_trans_commit(tp);
588 xfs_qoff_logitem_t **qoffstartp,
593 xfs_qoff_logitem_t *qoffi;
597 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_qm_quotaoff, 0, 0, 0, &tp);
601 qoffi = xfs_trans_get_qoff_item(tp, NULL, flags & XFS_ALL_QUOTA_ACCT);
602 xfs_trans_log_quotaoff_item(tp, qoffi);
604 spin_lock(&mp->m_sb_lock);
605 mp->m_sb.sb_qflags = (mp->m_qflags & ~(flags)) & XFS_MOUNT_QUOTA_ALL;
606 spin_unlock(&mp->m_sb_lock);
611 * We have to make sure that the transaction is secure on disk before we
612 * return and actually stop quota accounting. So, make it synchronous.
613 * We don't care about quotoff's performance.
615 xfs_trans_set_sync(tp);
616 error = xfs_trans_commit(tp);
627 xfs_qm_scall_getquota(
628 struct xfs_mount *mp,
631 struct qc_dqblk *dst,
634 struct xfs_dquot *dqp;
638 * Try to get the dquot. We don't want it allocated on disk, so
639 * we aren't passing the XFS_QMOPT_DOALLOC flag. If it doesn't
640 * exist, we'll get ENOENT back.
642 error = xfs_qm_dqget(mp, NULL, *id, type, dqget_flags, &dqp);
647 * If everything's NULL, this dquot doesn't quite exist as far as
648 * our utility programs are concerned.
650 if (XFS_IS_DQUOT_UNINITIALIZED(dqp)) {
655 /* Fill in the ID we actually read from disk */
656 *id = be32_to_cpu(dqp->q_core.d_id);
658 memset(dst, 0, sizeof(*dst));
659 dst->d_spc_hardlimit =
660 XFS_FSB_TO_B(mp, be64_to_cpu(dqp->q_core.d_blk_hardlimit));
661 dst->d_spc_softlimit =
662 XFS_FSB_TO_B(mp, be64_to_cpu(dqp->q_core.d_blk_softlimit));
663 dst->d_ino_hardlimit = be64_to_cpu(dqp->q_core.d_ino_hardlimit);
664 dst->d_ino_softlimit = be64_to_cpu(dqp->q_core.d_ino_softlimit);
665 dst->d_space = XFS_FSB_TO_B(mp, dqp->q_res_bcount);
666 dst->d_ino_count = dqp->q_res_icount;
667 dst->d_spc_timer = be32_to_cpu(dqp->q_core.d_btimer);
668 dst->d_ino_timer = be32_to_cpu(dqp->q_core.d_itimer);
669 dst->d_ino_warns = be16_to_cpu(dqp->q_core.d_iwarns);
670 dst->d_spc_warns = be16_to_cpu(dqp->q_core.d_bwarns);
671 dst->d_rt_spc_hardlimit =
672 XFS_FSB_TO_B(mp, be64_to_cpu(dqp->q_core.d_rtb_hardlimit));
673 dst->d_rt_spc_softlimit =
674 XFS_FSB_TO_B(mp, be64_to_cpu(dqp->q_core.d_rtb_softlimit));
675 dst->d_rt_space = XFS_FSB_TO_B(mp, dqp->q_res_rtbcount);
676 dst->d_rt_spc_timer = be32_to_cpu(dqp->q_core.d_rtbtimer);
677 dst->d_rt_spc_warns = be16_to_cpu(dqp->q_core.d_rtbwarns);
680 * Internally, we don't reset all the timers when quota enforcement
681 * gets turned off. No need to confuse the user level code,
682 * so return zeroes in that case.
684 if ((!XFS_IS_UQUOTA_ENFORCED(mp) &&
685 dqp->q_core.d_flags == XFS_DQ_USER) ||
686 (!XFS_IS_GQUOTA_ENFORCED(mp) &&
687 dqp->q_core.d_flags == XFS_DQ_GROUP) ||
688 (!XFS_IS_PQUOTA_ENFORCED(mp) &&
689 dqp->q_core.d_flags == XFS_DQ_PROJ)) {
690 dst->d_spc_timer = 0;
691 dst->d_ino_timer = 0;
692 dst->d_rt_spc_timer = 0;
696 if (((XFS_IS_UQUOTA_ENFORCED(mp) && type == XFS_DQ_USER) ||
697 (XFS_IS_GQUOTA_ENFORCED(mp) && type == XFS_DQ_GROUP) ||
698 (XFS_IS_PQUOTA_ENFORCED(mp) && type == XFS_DQ_PROJ)) &&
700 if ((dst->d_space > dst->d_spc_softlimit) &&
701 (dst->d_spc_softlimit > 0)) {
702 ASSERT(dst->d_spc_timer != 0);
704 if ((dst->d_ino_count > dst->d_ino_softlimit) &&
705 (dst->d_ino_softlimit > 0)) {
706 ASSERT(dst->d_ino_timer != 0);
718 struct xfs_inode *ip,
722 /* skip quota inodes */
723 if (ip == ip->i_mount->m_quotainfo->qi_uquotaip ||
724 ip == ip->i_mount->m_quotainfo->qi_gquotaip ||
725 ip == ip->i_mount->m_quotainfo->qi_pquotaip) {
726 ASSERT(ip->i_udquot == NULL);
727 ASSERT(ip->i_gdquot == NULL);
728 ASSERT(ip->i_pdquot == NULL);
732 xfs_ilock(ip, XFS_ILOCK_EXCL);
733 if ((flags & XFS_UQUOTA_ACCT) && ip->i_udquot) {
734 xfs_qm_dqrele(ip->i_udquot);
737 if ((flags & XFS_GQUOTA_ACCT) && ip->i_gdquot) {
738 xfs_qm_dqrele(ip->i_gdquot);
741 if ((flags & XFS_PQUOTA_ACCT) && ip->i_pdquot) {
742 xfs_qm_dqrele(ip->i_pdquot);
745 xfs_iunlock(ip, XFS_ILOCK_EXCL);
751 * Go thru all the inodes in the file system, releasing their dquots.
753 * Note that the mount structure gets modified to indicate that quotas are off
754 * AFTER this, in the case of quotaoff.
757 xfs_qm_dqrele_all_inodes(
758 struct xfs_mount *mp,
761 ASSERT(mp->m_quotainfo);
762 xfs_inode_ag_iterator_flags(mp, xfs_dqrele_inode, flags, NULL,
763 XFS_AGITER_INEW_WAIT);