1 // SPDX-License-Identifier: GPL-2.0
3 * Implementation of the diskquota system for the LINUX operating system. QUOTA
4 * is implemented using the BSD system call interface as the means of
5 * communication with the user level. This file contains the generic routines
6 * called by the different filesystems on allocation of an inode or block.
7 * These routines take care of the administration needed to have a consistent
8 * diskquota tracking system. The ideas of both user and group quotas are based
9 * on the Melbourne quota system as used on BSD derived systems. The internal
10 * implementation is based on one of the several variants of the LINUX
11 * inode-subsystem with added complexity of the diskquota system.
13 * Author: Marco van Wieringen <mvw@planets.elm.net>
15 * Fixes: Dmitry Gorodchanin <pgmdsg@ibi.com>, 11 Feb 96
17 * Revised list management to avoid races
18 * -- Bill Hawes, <whawes@star.net>, 9/98
20 * Fixed races in dquot_transfer(), dqget() and dquot_alloc_...().
21 * As the consequence the locking was moved from dquot_decr_...(),
22 * dquot_incr_...() to calling functions.
23 * invalidate_dquots() now writes modified dquots.
24 * Serialized quota_off() and quota_on() for mount point.
25 * Fixed a few bugs in grow_dquots().
26 * Fixed deadlock in write_dquot() - we no longer account quotas on
28 * remove_dquot_ref() moved to inode.c - it now traverses through inodes
29 * add_dquot_ref() restarts after blocking
30 * Added check for bogus uid and fixed check for group in quotactl.
31 * Jan Kara, <jack@suse.cz>, sponsored by SuSE CR, 10-11/99
33 * Used struct list_head instead of own list struct
34 * Invalidation of referenced dquots is no longer possible
35 * Improved free_dquots list management
36 * Quota and i_blocks are now updated in one place to avoid races
37 * Warnings are now delayed so we won't block in critical section
38 * Write updated not to require dquot lock
39 * Jan Kara, <jack@suse.cz>, 9/2000
41 * Added dynamic quota structure allocation
42 * Jan Kara <jack@suse.cz> 12/2000
44 * Rewritten quota interface. Implemented new quota format and
45 * formats registering.
46 * Jan Kara, <jack@suse.cz>, 2001,2002
49 * Jan Kara, <jack@suse.cz>, 10/2002
51 * Added journalled quota support, fix lock inversion problems
52 * Jan Kara, <jack@suse.cz>, 2003,2004
54 * (C) Copyright 1994 - 1997 Marco van Wieringen
57 #include <linux/errno.h>
58 #include <linux/kernel.h>
60 #include <linux/mount.h>
62 #include <linux/time.h>
63 #include <linux/types.h>
64 #include <linux/string.h>
65 #include <linux/fcntl.h>
66 #include <linux/stat.h>
67 #include <linux/tty.h>
68 #include <linux/file.h>
69 #include <linux/slab.h>
70 #include <linux/sysctl.h>
71 #include <linux/init.h>
72 #include <linux/module.h>
73 #include <linux/proc_fs.h>
74 #include <linux/security.h>
75 #include <linux/sched.h>
76 #include <linux/cred.h>
77 #include <linux/kmod.h>
78 #include <linux/namei.h>
79 #include <linux/capability.h>
80 #include <linux/quotaops.h>
81 #include <linux/blkdev.h>
82 #include <linux/sched/mm.h>
83 #include "../internal.h" /* ugh */
85 #include <linux/uaccess.h>
88 * There are five quota SMP locks:
89 * * dq_list_lock protects all lists with quotas and quota formats.
90 * * dquot->dq_dqb_lock protects data from dq_dqb
91 * * inode->i_lock protects inode->i_blocks, i_bytes and also guards
92 * consistency of dquot->dq_dqb with inode->i_blocks, i_bytes so that
93 * dquot_transfer() can stabilize amount it transfers
94 * * dq_data_lock protects mem_dqinfo structures and modifications of dquot
95 * pointers in the inode
96 * * dq_state_lock protects modifications of quota state (on quotaon and
97 * quotaoff) and readers who care about latest values take it as well.
99 * The spinlock ordering is hence:
100 * dq_data_lock > dq_list_lock > i_lock > dquot->dq_dqb_lock,
101 * dq_list_lock > dq_state_lock
103 * Note that some things (eg. sb pointer, type, id) doesn't change during
104 * the life of the dquot structure and so needn't to be protected by a lock
106 * Operation accessing dquots via inode pointers are protected by dquot_srcu.
107 * Operation of reading pointer needs srcu_read_lock(&dquot_srcu), and
108 * synchronize_srcu(&dquot_srcu) is called after clearing pointers from
109 * inode and before dropping dquot references to avoid use of dquots after
110 * they are freed. dq_data_lock is used to serialize the pointer setting and
111 * clearing operations.
112 * Special care needs to be taken about S_NOQUOTA inode flag (marking that
113 * inode is a quota file). Functions adding pointers from inode to dquots have
114 * to check this flag under dq_data_lock and then (if S_NOQUOTA is not set) they
115 * have to do all pointer modifications before dropping dq_data_lock. This makes
116 * sure they cannot race with quotaon which first sets S_NOQUOTA flag and
117 * then drops all pointers to dquots from an inode.
119 * Each dquot has its dq_lock mutex. Dquot is locked when it is being read to
120 * memory (or space for it is being allocated) on the first dqget(), when it is
121 * being written out, and when it is being released on the last dqput(). The
122 * allocation and release operations are serialized by the dq_lock and by
123 * checking the use count in dquot_release().
125 * Lock ordering (including related VFS locks) is the following:
126 * s_umount > i_mutex > journal_lock > dquot->dq_lock > dqio_sem
129 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_list_lock);
130 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_state_lock);
131 __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_data_lock);
132 EXPORT_SYMBOL(dq_data_lock);
133 DEFINE_STATIC_SRCU(dquot_srcu);
135 static DECLARE_WAIT_QUEUE_HEAD(dquot_ref_wq);
137 void __quota_error(struct super_block *sb, const char *func,
138 const char *fmt, ...)
140 if (printk_ratelimit()) {
142 struct va_format vaf;
149 printk(KERN_ERR "Quota error (device %s): %s: %pV\n",
150 sb->s_id, func, &vaf);
155 EXPORT_SYMBOL(__quota_error);
157 #if defined(CONFIG_QUOTA_DEBUG) || defined(CONFIG_PRINT_QUOTA_WARNING)
158 static char *quotatypes[] = INITQFNAMES;
160 static struct quota_format_type *quota_formats; /* List of registered formats */
161 static struct quota_module_name module_names[] = INIT_QUOTA_MODULE_NAMES;
163 /* SLAB cache for dquot structures */
164 static struct kmem_cache *dquot_cachep;
166 int register_quota_format(struct quota_format_type *fmt)
168 spin_lock(&dq_list_lock);
169 fmt->qf_next = quota_formats;
171 spin_unlock(&dq_list_lock);
174 EXPORT_SYMBOL(register_quota_format);
176 void unregister_quota_format(struct quota_format_type *fmt)
178 struct quota_format_type **actqf;
180 spin_lock(&dq_list_lock);
181 for (actqf = "a_formats; *actqf && *actqf != fmt;
182 actqf = &(*actqf)->qf_next)
185 *actqf = (*actqf)->qf_next;
186 spin_unlock(&dq_list_lock);
188 EXPORT_SYMBOL(unregister_quota_format);
190 static struct quota_format_type *find_quota_format(int id)
192 struct quota_format_type *actqf;
194 spin_lock(&dq_list_lock);
195 for (actqf = quota_formats; actqf && actqf->qf_fmt_id != id;
196 actqf = actqf->qf_next)
198 if (!actqf || !try_module_get(actqf->qf_owner)) {
201 spin_unlock(&dq_list_lock);
203 for (qm = 0; module_names[qm].qm_fmt_id &&
204 module_names[qm].qm_fmt_id != id; qm++)
206 if (!module_names[qm].qm_fmt_id ||
207 request_module(module_names[qm].qm_mod_name))
210 spin_lock(&dq_list_lock);
211 for (actqf = quota_formats; actqf && actqf->qf_fmt_id != id;
212 actqf = actqf->qf_next)
214 if (actqf && !try_module_get(actqf->qf_owner))
217 spin_unlock(&dq_list_lock);
221 static void put_quota_format(struct quota_format_type *fmt)
223 module_put(fmt->qf_owner);
227 * Dquot List Management:
228 * The quota code uses five lists for dquot management: the inuse_list,
229 * releasing_dquots, free_dquots, dqi_dirty_list, and dquot_hash[] array.
230 * A single dquot structure may be on some of those lists, depending on
233 * All dquots are placed to the end of inuse_list when first created, and this
234 * list is used for invalidate operation, which must look at every dquot.
236 * When the last reference of a dquot is dropped, the dquot is added to
237 * releasing_dquots. We'll then queue work item which will call
238 * synchronize_srcu() and after that perform the final cleanup of all the
239 * dquots on the list. Each cleaned up dquot is moved to free_dquots list.
240 * Both releasing_dquots and free_dquots use the dq_free list_head in the dquot
243 * Unused and cleaned up dquots are in the free_dquots list and this list is
244 * searched whenever we need an available dquot. Dquots are removed from the
245 * list as soon as they are used again and dqstats.free_dquots gives the number
246 * of dquots on the list. When dquot is invalidated it's completely released
249 * Dirty dquots are added to the dqi_dirty_list of quota_info when mark
250 * dirtied, and this list is searched when writing dirty dquots back to
251 * quota file. Note that some filesystems do dirty dquot tracking on their
252 * own (e.g. in a journal) and thus don't use dqi_dirty_list.
254 * Dquots with a specific identity (device, type and id) are placed on
255 * one of the dquot_hash[] hash chains. The provides an efficient search
256 * mechanism to locate a specific dquot.
259 static LIST_HEAD(inuse_list);
260 static LIST_HEAD(free_dquots);
261 static LIST_HEAD(releasing_dquots);
262 static unsigned int dq_hash_bits, dq_hash_mask;
263 static struct hlist_head *dquot_hash;
265 struct dqstats dqstats;
266 EXPORT_SYMBOL(dqstats);
268 static qsize_t inode_get_rsv_space(struct inode *inode);
269 static qsize_t __inode_get_rsv_space(struct inode *inode);
270 static int __dquot_initialize(struct inode *inode, int type);
272 static void quota_release_workfn(struct work_struct *work);
273 static DECLARE_DELAYED_WORK(quota_release_work, quota_release_workfn);
275 static inline unsigned int
276 hashfn(const struct super_block *sb, struct kqid qid)
278 unsigned int id = from_kqid(&init_user_ns, qid);
282 tmp = (((unsigned long)sb>>L1_CACHE_SHIFT) ^ id) * (MAXQUOTAS - type);
283 return (tmp + (tmp >> dq_hash_bits)) & dq_hash_mask;
287 * Following list functions expect dq_list_lock to be held
289 static inline void insert_dquot_hash(struct dquot *dquot)
291 struct hlist_head *head;
292 head = dquot_hash + hashfn(dquot->dq_sb, dquot->dq_id);
293 hlist_add_head(&dquot->dq_hash, head);
296 static inline void remove_dquot_hash(struct dquot *dquot)
298 hlist_del_init(&dquot->dq_hash);
301 static struct dquot *find_dquot(unsigned int hashent, struct super_block *sb,
304 struct hlist_node *node;
307 hlist_for_each (node, dquot_hash+hashent) {
308 dquot = hlist_entry(node, struct dquot, dq_hash);
309 if (dquot->dq_sb == sb && qid_eq(dquot->dq_id, qid))
315 /* Add a dquot to the tail of the free list */
316 static inline void put_dquot_last(struct dquot *dquot)
318 list_add_tail(&dquot->dq_free, &free_dquots);
319 dqstats_inc(DQST_FREE_DQUOTS);
322 static inline void put_releasing_dquots(struct dquot *dquot)
324 list_add_tail(&dquot->dq_free, &releasing_dquots);
325 set_bit(DQ_RELEASING_B, &dquot->dq_flags);
328 static inline void remove_free_dquot(struct dquot *dquot)
330 if (list_empty(&dquot->dq_free))
332 list_del_init(&dquot->dq_free);
333 if (!test_bit(DQ_RELEASING_B, &dquot->dq_flags))
334 dqstats_dec(DQST_FREE_DQUOTS);
336 clear_bit(DQ_RELEASING_B, &dquot->dq_flags);
339 static inline void put_inuse(struct dquot *dquot)
341 /* We add to the back of inuse list so we don't have to restart
342 * when traversing this list and we block */
343 list_add_tail(&dquot->dq_inuse, &inuse_list);
344 dqstats_inc(DQST_ALLOC_DQUOTS);
347 static inline void remove_inuse(struct dquot *dquot)
349 dqstats_dec(DQST_ALLOC_DQUOTS);
350 list_del(&dquot->dq_inuse);
353 * End of list functions needing dq_list_lock
356 static void wait_on_dquot(struct dquot *dquot)
358 mutex_lock(&dquot->dq_lock);
359 mutex_unlock(&dquot->dq_lock);
362 static inline int dquot_active(struct dquot *dquot)
364 return test_bit(DQ_ACTIVE_B, &dquot->dq_flags);
367 static inline int dquot_dirty(struct dquot *dquot)
369 return test_bit(DQ_MOD_B, &dquot->dq_flags);
372 static inline int mark_dquot_dirty(struct dquot *dquot)
374 return dquot->dq_sb->dq_op->mark_dirty(dquot);
377 /* Mark dquot dirty in atomic manner, and return it's old dirty flag state */
378 int dquot_mark_dquot_dirty(struct dquot *dquot)
382 if (!dquot_active(dquot))
385 if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NOLIST_DIRTY)
386 return test_and_set_bit(DQ_MOD_B, &dquot->dq_flags);
388 /* If quota is dirty already, we don't have to acquire dq_list_lock */
389 if (dquot_dirty(dquot))
392 spin_lock(&dq_list_lock);
393 if (!test_and_set_bit(DQ_MOD_B, &dquot->dq_flags)) {
394 list_add(&dquot->dq_dirty, &sb_dqopt(dquot->dq_sb)->
395 info[dquot->dq_id.type].dqi_dirty_list);
398 spin_unlock(&dq_list_lock);
401 EXPORT_SYMBOL(dquot_mark_dquot_dirty);
403 /* Dirtify all the dquots - this can block when journalling */
404 static inline int mark_all_dquot_dirty(struct dquot __rcu * const *dquots)
410 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
411 dquot = srcu_dereference(dquots[cnt], &dquot_srcu);
413 /* Even in case of error we have to continue */
414 ret = mark_dquot_dirty(dquot);
421 static inline void dqput_all(struct dquot **dquot)
425 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
429 static inline int clear_dquot_dirty(struct dquot *dquot)
431 if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NOLIST_DIRTY)
432 return test_and_clear_bit(DQ_MOD_B, &dquot->dq_flags);
434 spin_lock(&dq_list_lock);
435 if (!test_and_clear_bit(DQ_MOD_B, &dquot->dq_flags)) {
436 spin_unlock(&dq_list_lock);
439 list_del_init(&dquot->dq_dirty);
440 spin_unlock(&dq_list_lock);
444 void mark_info_dirty(struct super_block *sb, int type)
446 spin_lock(&dq_data_lock);
447 sb_dqopt(sb)->info[type].dqi_flags |= DQF_INFO_DIRTY;
448 spin_unlock(&dq_data_lock);
450 EXPORT_SYMBOL(mark_info_dirty);
453 * Read dquot from disk and alloc space for it
456 int dquot_acquire(struct dquot *dquot)
458 int ret = 0, ret2 = 0;
459 unsigned int memalloc;
460 struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
462 mutex_lock(&dquot->dq_lock);
463 memalloc = memalloc_nofs_save();
464 if (!test_bit(DQ_READ_B, &dquot->dq_flags)) {
465 ret = dqopt->ops[dquot->dq_id.type]->read_dqblk(dquot);
469 /* Make sure flags update is visible after dquot has been filled */
470 smp_mb__before_atomic();
471 set_bit(DQ_READ_B, &dquot->dq_flags);
472 /* Instantiate dquot if needed */
473 if (!dquot_active(dquot) && !dquot->dq_off) {
474 ret = dqopt->ops[dquot->dq_id.type]->commit_dqblk(dquot);
475 /* Write the info if needed */
476 if (info_dirty(&dqopt->info[dquot->dq_id.type])) {
477 ret2 = dqopt->ops[dquot->dq_id.type]->write_file_info(
478 dquot->dq_sb, dquot->dq_id.type);
488 * Make sure flags update is visible after on-disk struct has been
489 * allocated. Paired with smp_rmb() in dqget().
491 smp_mb__before_atomic();
492 set_bit(DQ_ACTIVE_B, &dquot->dq_flags);
494 memalloc_nofs_restore(memalloc);
495 mutex_unlock(&dquot->dq_lock);
498 EXPORT_SYMBOL(dquot_acquire);
501 * Write dquot to disk
503 int dquot_commit(struct dquot *dquot)
506 unsigned int memalloc;
507 struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
509 mutex_lock(&dquot->dq_lock);
510 memalloc = memalloc_nofs_save();
511 if (!clear_dquot_dirty(dquot))
513 /* Inactive dquot can be only if there was error during read/init
514 * => we have better not writing it */
515 if (dquot_active(dquot))
516 ret = dqopt->ops[dquot->dq_id.type]->commit_dqblk(dquot);
520 memalloc_nofs_restore(memalloc);
521 mutex_unlock(&dquot->dq_lock);
524 EXPORT_SYMBOL(dquot_commit);
529 int dquot_release(struct dquot *dquot)
531 int ret = 0, ret2 = 0;
532 unsigned int memalloc;
533 struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
535 mutex_lock(&dquot->dq_lock);
536 memalloc = memalloc_nofs_save();
537 /* Check whether we are not racing with some other dqget() */
538 if (dquot_is_busy(dquot))
540 if (dqopt->ops[dquot->dq_id.type]->release_dqblk) {
541 ret = dqopt->ops[dquot->dq_id.type]->release_dqblk(dquot);
543 if (info_dirty(&dqopt->info[dquot->dq_id.type])) {
544 ret2 = dqopt->ops[dquot->dq_id.type]->write_file_info(
545 dquot->dq_sb, dquot->dq_id.type);
550 clear_bit(DQ_ACTIVE_B, &dquot->dq_flags);
552 memalloc_nofs_restore(memalloc);
553 mutex_unlock(&dquot->dq_lock);
556 EXPORT_SYMBOL(dquot_release);
558 void dquot_destroy(struct dquot *dquot)
560 kmem_cache_free(dquot_cachep, dquot);
562 EXPORT_SYMBOL(dquot_destroy);
564 static inline void do_destroy_dquot(struct dquot *dquot)
566 dquot->dq_sb->dq_op->destroy_dquot(dquot);
569 /* Invalidate all dquots on the list. Note that this function is called after
570 * quota is disabled and pointers from inodes removed so there cannot be new
571 * quota users. There can still be some users of quotas due to inodes being
572 * just deleted or pruned by prune_icache() (those are not attached to any
573 * list) or parallel quotactl call. We have to wait for such users.
575 static void invalidate_dquots(struct super_block *sb, int type)
577 struct dquot *dquot, *tmp;
580 flush_delayed_work("a_release_work);
582 spin_lock(&dq_list_lock);
583 list_for_each_entry_safe(dquot, tmp, &inuse_list, dq_inuse) {
584 if (dquot->dq_sb != sb)
586 if (dquot->dq_id.type != type)
588 /* Wait for dquot users */
589 if (atomic_read(&dquot->dq_count)) {
590 atomic_inc(&dquot->dq_count);
591 spin_unlock(&dq_list_lock);
593 * Once dqput() wakes us up, we know it's time to free
595 * IMPORTANT: we rely on the fact that there is always
596 * at most one process waiting for dquot to free.
597 * Otherwise dq_count would be > 1 and we would never
600 wait_event(dquot_ref_wq,
601 atomic_read(&dquot->dq_count) == 1);
603 /* At this moment dquot() need not exist (it could be
604 * reclaimed by prune_dqcache(). Hence we must
609 * The last user already dropped its reference but dquot didn't
610 * get fully cleaned up yet. Restart the scan which flushes the
611 * work cleaning up released dquots.
613 if (test_bit(DQ_RELEASING_B, &dquot->dq_flags)) {
614 spin_unlock(&dq_list_lock);
618 * Quota now has no users and it has been written on last
621 remove_dquot_hash(dquot);
622 remove_free_dquot(dquot);
624 do_destroy_dquot(dquot);
626 spin_unlock(&dq_list_lock);
629 /* Call callback for every active dquot on given filesystem */
630 int dquot_scan_active(struct super_block *sb,
631 int (*fn)(struct dquot *dquot, unsigned long priv),
634 struct dquot *dquot, *old_dquot = NULL;
637 WARN_ON_ONCE(!rwsem_is_locked(&sb->s_umount));
639 spin_lock(&dq_list_lock);
640 list_for_each_entry(dquot, &inuse_list, dq_inuse) {
641 if (!dquot_active(dquot))
643 if (dquot->dq_sb != sb)
645 /* Now we have active dquot so we can just increase use count */
646 atomic_inc(&dquot->dq_count);
647 spin_unlock(&dq_list_lock);
651 * ->release_dquot() can be racing with us. Our reference
652 * protects us from new calls to it so just wait for any
653 * outstanding call and recheck the DQ_ACTIVE_B after that.
655 wait_on_dquot(dquot);
656 if (dquot_active(dquot)) {
657 ret = fn(dquot, priv);
661 spin_lock(&dq_list_lock);
662 /* We are safe to continue now because our dquot could not
663 * be moved out of the inuse list while we hold the reference */
665 spin_unlock(&dq_list_lock);
670 EXPORT_SYMBOL(dquot_scan_active);
672 static inline int dquot_write_dquot(struct dquot *dquot)
674 int ret = dquot->dq_sb->dq_op->write_dquot(dquot);
676 quota_error(dquot->dq_sb, "Can't write quota structure "
677 "(error %d). Quota may get out of sync!", ret);
678 /* Clear dirty bit anyway to avoid infinite loop. */
679 clear_dquot_dirty(dquot);
684 /* Write all dquot structures to quota files */
685 int dquot_writeback_dquots(struct super_block *sb, int type)
687 struct list_head dirty;
689 struct quota_info *dqopt = sb_dqopt(sb);
693 WARN_ON_ONCE(!rwsem_is_locked(&sb->s_umount));
695 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
696 if (type != -1 && cnt != type)
698 if (!sb_has_quota_active(sb, cnt))
700 spin_lock(&dq_list_lock);
701 /* Move list away to avoid livelock. */
702 list_replace_init(&dqopt->info[cnt].dqi_dirty_list, &dirty);
703 while (!list_empty(&dirty)) {
704 dquot = list_first_entry(&dirty, struct dquot,
707 WARN_ON(!dquot_active(dquot));
708 /* If the dquot is releasing we should not touch it */
709 if (test_bit(DQ_RELEASING_B, &dquot->dq_flags)) {
710 spin_unlock(&dq_list_lock);
711 flush_delayed_work("a_release_work);
712 spin_lock(&dq_list_lock);
716 /* Now we have active dquot from which someone is
717 * holding reference so we can safely just increase
720 spin_unlock(&dq_list_lock);
721 err = dquot_write_dquot(dquot);
725 spin_lock(&dq_list_lock);
727 spin_unlock(&dq_list_lock);
730 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
731 if ((cnt == type || type == -1) && sb_has_quota_active(sb, cnt)
732 && info_dirty(&dqopt->info[cnt]))
733 sb->dq_op->write_info(sb, cnt);
734 dqstats_inc(DQST_SYNCS);
738 EXPORT_SYMBOL(dquot_writeback_dquots);
740 /* Write all dquot structures to disk and make them visible from userspace */
741 int dquot_quota_sync(struct super_block *sb, int type)
743 struct quota_info *dqopt = sb_dqopt(sb);
747 ret = dquot_writeback_dquots(sb, type);
750 if (dqopt->flags & DQUOT_QUOTA_SYS_FILE)
753 /* This is not very clever (and fast) but currently I don't know about
754 * any other simple way of getting quota data to disk and we must get
755 * them there for userspace to be visible... */
756 if (sb->s_op->sync_fs) {
757 ret = sb->s_op->sync_fs(sb, 1);
761 ret = sync_blockdev(sb->s_bdev);
766 * Now when everything is written we can discard the pagecache so
767 * that userspace sees the changes.
769 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
770 if (type != -1 && cnt != type)
772 if (!sb_has_quota_active(sb, cnt))
774 inode_lock(dqopt->files[cnt]);
775 truncate_inode_pages(&dqopt->files[cnt]->i_data, 0);
776 inode_unlock(dqopt->files[cnt]);
781 EXPORT_SYMBOL(dquot_quota_sync);
784 dqcache_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
787 unsigned long freed = 0;
789 spin_lock(&dq_list_lock);
790 while (!list_empty(&free_dquots) && sc->nr_to_scan) {
791 dquot = list_first_entry(&free_dquots, struct dquot, dq_free);
792 remove_dquot_hash(dquot);
793 remove_free_dquot(dquot);
795 do_destroy_dquot(dquot);
799 spin_unlock(&dq_list_lock);
804 dqcache_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
806 return vfs_pressure_ratio(
807 percpu_counter_read_positive(&dqstats.counter[DQST_FREE_DQUOTS]));
810 static struct shrinker dqcache_shrinker = {
811 .count_objects = dqcache_shrink_count,
812 .scan_objects = dqcache_shrink_scan,
813 .seeks = DEFAULT_SEEKS,
817 * Safely release dquot and put reference to dquot.
819 static void quota_release_workfn(struct work_struct *work)
822 struct list_head rls_head;
824 spin_lock(&dq_list_lock);
825 /* Exchange the list head to avoid livelock. */
826 list_replace_init(&releasing_dquots, &rls_head);
827 spin_unlock(&dq_list_lock);
828 synchronize_srcu(&dquot_srcu);
831 spin_lock(&dq_list_lock);
832 while (!list_empty(&rls_head)) {
833 dquot = list_first_entry(&rls_head, struct dquot, dq_free);
834 WARN_ON_ONCE(atomic_read(&dquot->dq_count));
836 * Note that DQ_RELEASING_B protects us from racing with
837 * invalidate_dquots() calls so we are safe to work with the
838 * dquot even after we drop dq_list_lock.
840 if (dquot_dirty(dquot)) {
841 spin_unlock(&dq_list_lock);
842 /* Commit dquot before releasing */
843 dquot_write_dquot(dquot);
846 if (dquot_active(dquot)) {
847 spin_unlock(&dq_list_lock);
848 dquot->dq_sb->dq_op->release_dquot(dquot);
851 /* Dquot is inactive and clean, now move it to free list */
852 remove_free_dquot(dquot);
853 put_dquot_last(dquot);
855 spin_unlock(&dq_list_lock);
859 * Put reference to dquot
861 void dqput(struct dquot *dquot)
865 #ifdef CONFIG_QUOTA_DEBUG
866 if (!atomic_read(&dquot->dq_count)) {
867 quota_error(dquot->dq_sb, "trying to free free dquot of %s %d",
868 quotatypes[dquot->dq_id.type],
869 from_kqid(&init_user_ns, dquot->dq_id));
873 dqstats_inc(DQST_DROPS);
875 spin_lock(&dq_list_lock);
876 if (atomic_read(&dquot->dq_count) > 1) {
877 /* We have more than one user... nothing to do */
878 atomic_dec(&dquot->dq_count);
879 /* Releasing dquot during quotaoff phase? */
880 if (!sb_has_quota_active(dquot->dq_sb, dquot->dq_id.type) &&
881 atomic_read(&dquot->dq_count) == 1)
882 wake_up(&dquot_ref_wq);
883 spin_unlock(&dq_list_lock);
887 /* Need to release dquot? */
888 #ifdef CONFIG_QUOTA_DEBUG
890 BUG_ON(!list_empty(&dquot->dq_free));
892 put_releasing_dquots(dquot);
893 atomic_dec(&dquot->dq_count);
894 spin_unlock(&dq_list_lock);
895 queue_delayed_work(system_unbound_wq, "a_release_work, 1);
897 EXPORT_SYMBOL(dqput);
899 struct dquot *dquot_alloc(struct super_block *sb, int type)
901 return kmem_cache_zalloc(dquot_cachep, GFP_NOFS);
903 EXPORT_SYMBOL(dquot_alloc);
905 static struct dquot *get_empty_dquot(struct super_block *sb, int type)
909 dquot = sb->dq_op->alloc_dquot(sb, type);
913 mutex_init(&dquot->dq_lock);
914 INIT_LIST_HEAD(&dquot->dq_free);
915 INIT_LIST_HEAD(&dquot->dq_inuse);
916 INIT_HLIST_NODE(&dquot->dq_hash);
917 INIT_LIST_HEAD(&dquot->dq_dirty);
919 dquot->dq_id = make_kqid_invalid(type);
920 atomic_set(&dquot->dq_count, 1);
921 spin_lock_init(&dquot->dq_dqb_lock);
927 * Get reference to dquot
929 * Locking is slightly tricky here. We are guarded from parallel quotaoff()
930 * destroying our dquot by:
931 * a) checking for quota flags under dq_list_lock and
932 * b) getting a reference to dquot before we release dq_list_lock
934 struct dquot *dqget(struct super_block *sb, struct kqid qid)
936 unsigned int hashent = hashfn(sb, qid);
937 struct dquot *dquot, *empty = NULL;
939 if (!qid_has_mapping(sb->s_user_ns, qid))
940 return ERR_PTR(-EINVAL);
942 if (!sb_has_quota_active(sb, qid.type))
943 return ERR_PTR(-ESRCH);
945 spin_lock(&dq_list_lock);
946 spin_lock(&dq_state_lock);
947 if (!sb_has_quota_active(sb, qid.type)) {
948 spin_unlock(&dq_state_lock);
949 spin_unlock(&dq_list_lock);
950 dquot = ERR_PTR(-ESRCH);
953 spin_unlock(&dq_state_lock);
955 dquot = find_dquot(hashent, sb, qid);
958 spin_unlock(&dq_list_lock);
959 empty = get_empty_dquot(sb, qid.type);
961 schedule(); /* Try to wait for a moment... */
967 /* all dquots go on the inuse_list */
969 /* hash it first so it can be found */
970 insert_dquot_hash(dquot);
971 spin_unlock(&dq_list_lock);
972 dqstats_inc(DQST_LOOKUPS);
974 if (!atomic_read(&dquot->dq_count))
975 remove_free_dquot(dquot);
976 atomic_inc(&dquot->dq_count);
977 spin_unlock(&dq_list_lock);
978 dqstats_inc(DQST_CACHE_HITS);
979 dqstats_inc(DQST_LOOKUPS);
981 /* Wait for dq_lock - after this we know that either dquot_release() is
982 * already finished or it will be canceled due to dq_count > 0 test */
983 wait_on_dquot(dquot);
984 /* Read the dquot / allocate space in quota file */
985 if (!dquot_active(dquot)) {
988 err = sb->dq_op->acquire_dquot(dquot);
991 dquot = ERR_PTR(err);
996 * Make sure following reads see filled structure - paired with
997 * smp_mb__before_atomic() in dquot_acquire().
1000 #ifdef CONFIG_QUOTA_DEBUG
1001 BUG_ON(!dquot->dq_sb); /* Has somebody invalidated entry under us? */
1005 do_destroy_dquot(empty);
1009 EXPORT_SYMBOL(dqget);
1011 static inline struct dquot __rcu **i_dquot(struct inode *inode)
1013 /* Force __rcu for now until filesystems are fixed */
1014 return (struct dquot __rcu **)inode->i_sb->s_op->get_dquots(inode);
1017 static int dqinit_needed(struct inode *inode, int type)
1019 struct dquot __rcu * const *dquots;
1022 if (IS_NOQUOTA(inode))
1025 dquots = i_dquot(inode);
1027 return !dquots[type];
1028 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
1034 /* This routine is guarded by s_umount semaphore */
1035 static int add_dquot_ref(struct super_block *sb, int type)
1037 struct inode *inode, *old_inode = NULL;
1038 #ifdef CONFIG_QUOTA_DEBUG
1043 spin_lock(&sb->s_inode_list_lock);
1044 list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
1045 spin_lock(&inode->i_lock);
1046 if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) ||
1047 !atomic_read(&inode->i_writecount) ||
1048 !dqinit_needed(inode, type)) {
1049 spin_unlock(&inode->i_lock);
1053 spin_unlock(&inode->i_lock);
1054 spin_unlock(&sb->s_inode_list_lock);
1056 #ifdef CONFIG_QUOTA_DEBUG
1057 if (unlikely(inode_get_rsv_space(inode) > 0))
1061 err = __dquot_initialize(inode, type);
1068 * We hold a reference to 'inode' so it couldn't have been
1069 * removed from s_inodes list while we dropped the
1070 * s_inode_list_lock. We cannot iput the inode now as we can be
1071 * holding the last reference and we cannot iput it under
1072 * s_inode_list_lock. So we keep the reference and iput it
1077 spin_lock(&sb->s_inode_list_lock);
1079 spin_unlock(&sb->s_inode_list_lock);
1082 #ifdef CONFIG_QUOTA_DEBUG
1084 quota_error(sb, "Writes happened before quota was turned on "
1085 "thus quota information is probably inconsistent. "
1086 "Please run quotacheck(8)");
1092 static void remove_dquot_ref(struct super_block *sb, int type)
1094 struct inode *inode;
1095 #ifdef CONFIG_QUOTA_DEBUG
1099 spin_lock(&sb->s_inode_list_lock);
1100 list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
1102 * We have to scan also I_NEW inodes because they can already
1103 * have quota pointer initialized. Luckily, we need to touch
1104 * only quota pointers and these have separate locking
1107 spin_lock(&dq_data_lock);
1108 if (!IS_NOQUOTA(inode)) {
1109 struct dquot __rcu **dquots = i_dquot(inode);
1110 struct dquot *dquot = srcu_dereference_check(
1111 dquots[type], &dquot_srcu,
1112 lockdep_is_held(&dq_data_lock));
1114 #ifdef CONFIG_QUOTA_DEBUG
1115 if (unlikely(inode_get_rsv_space(inode) > 0))
1118 rcu_assign_pointer(dquots[type], NULL);
1122 spin_unlock(&dq_data_lock);
1124 spin_unlock(&sb->s_inode_list_lock);
1125 #ifdef CONFIG_QUOTA_DEBUG
1127 printk(KERN_WARNING "VFS (%s): Writes happened after quota"
1128 " was disabled thus quota information is probably "
1129 "inconsistent. Please run quotacheck(8).\n", sb->s_id);
1134 /* Gather all references from inodes and drop them */
1135 static void drop_dquot_ref(struct super_block *sb, int type)
1138 remove_dquot_ref(sb, type);
1142 void dquot_free_reserved_space(struct dquot *dquot, qsize_t number)
1144 if (dquot->dq_dqb.dqb_rsvspace >= number)
1145 dquot->dq_dqb.dqb_rsvspace -= number;
1148 dquot->dq_dqb.dqb_rsvspace = 0;
1150 if (dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace <=
1151 dquot->dq_dqb.dqb_bsoftlimit)
1152 dquot->dq_dqb.dqb_btime = (time64_t) 0;
1153 clear_bit(DQ_BLKS_B, &dquot->dq_flags);
1156 static void dquot_decr_inodes(struct dquot *dquot, qsize_t number)
1158 if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NEGATIVE_USAGE ||
1159 dquot->dq_dqb.dqb_curinodes >= number)
1160 dquot->dq_dqb.dqb_curinodes -= number;
1162 dquot->dq_dqb.dqb_curinodes = 0;
1163 if (dquot->dq_dqb.dqb_curinodes <= dquot->dq_dqb.dqb_isoftlimit)
1164 dquot->dq_dqb.dqb_itime = (time64_t) 0;
1165 clear_bit(DQ_INODES_B, &dquot->dq_flags);
1168 static void dquot_decr_space(struct dquot *dquot, qsize_t number)
1170 if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NEGATIVE_USAGE ||
1171 dquot->dq_dqb.dqb_curspace >= number)
1172 dquot->dq_dqb.dqb_curspace -= number;
1174 dquot->dq_dqb.dqb_curspace = 0;
1175 if (dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace <=
1176 dquot->dq_dqb.dqb_bsoftlimit)
1177 dquot->dq_dqb.dqb_btime = (time64_t) 0;
1178 clear_bit(DQ_BLKS_B, &dquot->dq_flags);
1182 struct super_block *w_sb;
1183 struct kqid w_dq_id;
1187 static int warning_issued(struct dquot *dquot, const int warntype)
1189 int flag = (warntype == QUOTA_NL_BHARDWARN ||
1190 warntype == QUOTA_NL_BSOFTLONGWARN) ? DQ_BLKS_B :
1191 ((warntype == QUOTA_NL_IHARDWARN ||
1192 warntype == QUOTA_NL_ISOFTLONGWARN) ? DQ_INODES_B : 0);
1196 return test_and_set_bit(flag, &dquot->dq_flags);
1199 #ifdef CONFIG_PRINT_QUOTA_WARNING
1200 static int flag_print_warnings = 1;
1202 static int need_print_warning(struct dquot_warn *warn)
1204 if (!flag_print_warnings)
1207 switch (warn->w_dq_id.type) {
1209 return uid_eq(current_fsuid(), warn->w_dq_id.uid);
1211 return in_group_p(warn->w_dq_id.gid);
1218 /* Print warning to user which exceeded quota */
1219 static void print_warning(struct dquot_warn *warn)
1222 struct tty_struct *tty;
1223 int warntype = warn->w_type;
1225 if (warntype == QUOTA_NL_IHARDBELOW ||
1226 warntype == QUOTA_NL_ISOFTBELOW ||
1227 warntype == QUOTA_NL_BHARDBELOW ||
1228 warntype == QUOTA_NL_BSOFTBELOW || !need_print_warning(warn))
1231 tty = get_current_tty();
1234 tty_write_message(tty, warn->w_sb->s_id);
1235 if (warntype == QUOTA_NL_ISOFTWARN || warntype == QUOTA_NL_BSOFTWARN)
1236 tty_write_message(tty, ": warning, ");
1238 tty_write_message(tty, ": write failed, ");
1239 tty_write_message(tty, quotatypes[warn->w_dq_id.type]);
1241 case QUOTA_NL_IHARDWARN:
1242 msg = " file limit reached.\r\n";
1244 case QUOTA_NL_ISOFTLONGWARN:
1245 msg = " file quota exceeded too long.\r\n";
1247 case QUOTA_NL_ISOFTWARN:
1248 msg = " file quota exceeded.\r\n";
1250 case QUOTA_NL_BHARDWARN:
1251 msg = " block limit reached.\r\n";
1253 case QUOTA_NL_BSOFTLONGWARN:
1254 msg = " block quota exceeded too long.\r\n";
1256 case QUOTA_NL_BSOFTWARN:
1257 msg = " block quota exceeded.\r\n";
1260 tty_write_message(tty, msg);
1265 static void prepare_warning(struct dquot_warn *warn, struct dquot *dquot,
1268 if (warning_issued(dquot, warntype))
1270 warn->w_type = warntype;
1271 warn->w_sb = dquot->dq_sb;
1272 warn->w_dq_id = dquot->dq_id;
1276 * Write warnings to the console and send warning messages over netlink.
1278 * Note that this function can call into tty and networking code.
1280 static void flush_warnings(struct dquot_warn *warn)
1284 for (i = 0; i < MAXQUOTAS; i++) {
1285 if (warn[i].w_type == QUOTA_NL_NOWARN)
1287 #ifdef CONFIG_PRINT_QUOTA_WARNING
1288 print_warning(&warn[i]);
1290 quota_send_warning(warn[i].w_dq_id,
1291 warn[i].w_sb->s_dev, warn[i].w_type);
1295 static int ignore_hardlimit(struct dquot *dquot)
1297 struct mem_dqinfo *info = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_id.type];
1299 return capable(CAP_SYS_RESOURCE) &&
1300 (info->dqi_format->qf_fmt_id != QFMT_VFS_OLD ||
1301 !(info->dqi_flags & DQF_ROOT_SQUASH));
1304 static int dquot_add_inodes(struct dquot *dquot, qsize_t inodes,
1305 struct dquot_warn *warn)
1310 spin_lock(&dquot->dq_dqb_lock);
1311 newinodes = dquot->dq_dqb.dqb_curinodes + inodes;
1312 if (!sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_id.type) ||
1313 test_bit(DQ_FAKE_B, &dquot->dq_flags))
1316 if (dquot->dq_dqb.dqb_ihardlimit &&
1317 newinodes > dquot->dq_dqb.dqb_ihardlimit &&
1318 !ignore_hardlimit(dquot)) {
1319 prepare_warning(warn, dquot, QUOTA_NL_IHARDWARN);
1324 if (dquot->dq_dqb.dqb_isoftlimit &&
1325 newinodes > dquot->dq_dqb.dqb_isoftlimit &&
1326 dquot->dq_dqb.dqb_itime &&
1327 ktime_get_real_seconds() >= dquot->dq_dqb.dqb_itime &&
1328 !ignore_hardlimit(dquot)) {
1329 prepare_warning(warn, dquot, QUOTA_NL_ISOFTLONGWARN);
1334 if (dquot->dq_dqb.dqb_isoftlimit &&
1335 newinodes > dquot->dq_dqb.dqb_isoftlimit &&
1336 dquot->dq_dqb.dqb_itime == 0) {
1337 prepare_warning(warn, dquot, QUOTA_NL_ISOFTWARN);
1338 dquot->dq_dqb.dqb_itime = ktime_get_real_seconds() +
1339 sb_dqopt(dquot->dq_sb)->info[dquot->dq_id.type].dqi_igrace;
1342 dquot->dq_dqb.dqb_curinodes = newinodes;
1345 spin_unlock(&dquot->dq_dqb_lock);
1349 static int dquot_add_space(struct dquot *dquot, qsize_t space,
1350 qsize_t rsv_space, unsigned int flags,
1351 struct dquot_warn *warn)
1354 struct super_block *sb = dquot->dq_sb;
1357 spin_lock(&dquot->dq_dqb_lock);
1358 if (!sb_has_quota_limits_enabled(sb, dquot->dq_id.type) ||
1359 test_bit(DQ_FAKE_B, &dquot->dq_flags))
1362 tspace = dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace
1363 + space + rsv_space;
1365 if (dquot->dq_dqb.dqb_bhardlimit &&
1366 tspace > dquot->dq_dqb.dqb_bhardlimit &&
1367 !ignore_hardlimit(dquot)) {
1368 if (flags & DQUOT_SPACE_WARN)
1369 prepare_warning(warn, dquot, QUOTA_NL_BHARDWARN);
1374 if (dquot->dq_dqb.dqb_bsoftlimit &&
1375 tspace > dquot->dq_dqb.dqb_bsoftlimit &&
1376 dquot->dq_dqb.dqb_btime &&
1377 ktime_get_real_seconds() >= dquot->dq_dqb.dqb_btime &&
1378 !ignore_hardlimit(dquot)) {
1379 if (flags & DQUOT_SPACE_WARN)
1380 prepare_warning(warn, dquot, QUOTA_NL_BSOFTLONGWARN);
1385 if (dquot->dq_dqb.dqb_bsoftlimit &&
1386 tspace > dquot->dq_dqb.dqb_bsoftlimit &&
1387 dquot->dq_dqb.dqb_btime == 0) {
1388 if (flags & DQUOT_SPACE_WARN) {
1389 prepare_warning(warn, dquot, QUOTA_NL_BSOFTWARN);
1390 dquot->dq_dqb.dqb_btime = ktime_get_real_seconds() +
1391 sb_dqopt(sb)->info[dquot->dq_id.type].dqi_bgrace;
1394 * We don't allow preallocation to exceed softlimit so exceeding will
1403 * We have to be careful and go through warning generation & grace time
1404 * setting even if DQUOT_SPACE_NOFAIL is set. That's why we check it
1407 if (flags & DQUOT_SPACE_NOFAIL)
1410 dquot->dq_dqb.dqb_rsvspace += rsv_space;
1411 dquot->dq_dqb.dqb_curspace += space;
1413 spin_unlock(&dquot->dq_dqb_lock);
1417 static int info_idq_free(struct dquot *dquot, qsize_t inodes)
1421 if (test_bit(DQ_FAKE_B, &dquot->dq_flags) ||
1422 dquot->dq_dqb.dqb_curinodes <= dquot->dq_dqb.dqb_isoftlimit ||
1423 !sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_id.type))
1424 return QUOTA_NL_NOWARN;
1426 newinodes = dquot->dq_dqb.dqb_curinodes - inodes;
1427 if (newinodes <= dquot->dq_dqb.dqb_isoftlimit)
1428 return QUOTA_NL_ISOFTBELOW;
1429 if (dquot->dq_dqb.dqb_curinodes >= dquot->dq_dqb.dqb_ihardlimit &&
1430 newinodes < dquot->dq_dqb.dqb_ihardlimit)
1431 return QUOTA_NL_IHARDBELOW;
1432 return QUOTA_NL_NOWARN;
1435 static int info_bdq_free(struct dquot *dquot, qsize_t space)
1439 tspace = dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace;
1441 if (test_bit(DQ_FAKE_B, &dquot->dq_flags) ||
1442 tspace <= dquot->dq_dqb.dqb_bsoftlimit)
1443 return QUOTA_NL_NOWARN;
1445 if (tspace - space <= dquot->dq_dqb.dqb_bsoftlimit)
1446 return QUOTA_NL_BSOFTBELOW;
1447 if (tspace >= dquot->dq_dqb.dqb_bhardlimit &&
1448 tspace - space < dquot->dq_dqb.dqb_bhardlimit)
1449 return QUOTA_NL_BHARDBELOW;
1450 return QUOTA_NL_NOWARN;
1453 static int inode_quota_active(const struct inode *inode)
1455 struct super_block *sb = inode->i_sb;
1457 if (IS_NOQUOTA(inode))
1459 return sb_any_quota_loaded(sb) & ~sb_any_quota_suspended(sb);
1463 * Initialize quota pointers in inode
1465 * It is better to call this function outside of any transaction as it
1466 * might need a lot of space in journal for dquot structure allocation.
1468 static int __dquot_initialize(struct inode *inode, int type)
1470 int cnt, init_needed = 0;
1471 struct dquot __rcu **dquots;
1472 struct dquot *got[MAXQUOTAS] = {};
1473 struct super_block *sb = inode->i_sb;
1477 if (!inode_quota_active(inode))
1480 dquots = i_dquot(inode);
1482 /* First get references to structures we might need. */
1483 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1487 struct dquot *dquot;
1489 if (type != -1 && cnt != type)
1492 * The i_dquot should have been initialized in most cases,
1493 * we check it without locking here to avoid unnecessary
1494 * dqget()/dqput() calls.
1499 if (!sb_has_quota_active(sb, cnt))
1506 qid = make_kqid_uid(inode->i_uid);
1509 qid = make_kqid_gid(inode->i_gid);
1512 rc = inode->i_sb->dq_op->get_projid(inode, &projid);
1515 qid = make_kqid_projid(projid);
1518 dquot = dqget(sb, qid);
1519 if (IS_ERR(dquot)) {
1520 /* We raced with somebody turning quotas off... */
1521 if (PTR_ERR(dquot) != -ESRCH) {
1522 ret = PTR_ERR(dquot);
1530 /* All required i_dquot has been initialized */
1534 spin_lock(&dq_data_lock);
1535 if (IS_NOQUOTA(inode))
1537 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1538 if (type != -1 && cnt != type)
1540 /* Avoid races with quotaoff() */
1541 if (!sb_has_quota_active(sb, cnt))
1543 /* We could race with quotaon or dqget() could have failed */
1547 rcu_assign_pointer(dquots[cnt], got[cnt]);
1550 * Make quota reservation system happy if someone
1551 * did a write before quota was turned on
1553 rsv = inode_get_rsv_space(inode);
1554 if (unlikely(rsv)) {
1555 struct dquot *dquot = srcu_dereference_check(
1556 dquots[cnt], &dquot_srcu,
1557 lockdep_is_held(&dq_data_lock));
1559 spin_lock(&inode->i_lock);
1560 /* Get reservation again under proper lock */
1561 rsv = __inode_get_rsv_space(inode);
1562 spin_lock(&dquot->dq_dqb_lock);
1563 dquot->dq_dqb.dqb_rsvspace += rsv;
1564 spin_unlock(&dquot->dq_dqb_lock);
1565 spin_unlock(&inode->i_lock);
1570 spin_unlock(&dq_data_lock);
1572 /* Drop unused references */
1578 int dquot_initialize(struct inode *inode)
1580 return __dquot_initialize(inode, -1);
1582 EXPORT_SYMBOL(dquot_initialize);
1584 bool dquot_initialize_needed(struct inode *inode)
1586 struct dquot __rcu **dquots;
1589 if (!inode_quota_active(inode))
1592 dquots = i_dquot(inode);
1593 for (i = 0; i < MAXQUOTAS; i++)
1594 if (!dquots[i] && sb_has_quota_active(inode->i_sb, i))
1598 EXPORT_SYMBOL(dquot_initialize_needed);
1601 * Release all quotas referenced by inode.
1603 * This function only be called on inode free or converting
1604 * a file to quota file, no other users for the i_dquot in
1605 * both cases, so we needn't call synchronize_srcu() after
1608 static void __dquot_drop(struct inode *inode)
1611 struct dquot __rcu **dquots = i_dquot(inode);
1612 struct dquot *put[MAXQUOTAS];
1614 spin_lock(&dq_data_lock);
1615 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1616 put[cnt] = srcu_dereference_check(dquots[cnt], &dquot_srcu,
1617 lockdep_is_held(&dq_data_lock));
1618 rcu_assign_pointer(dquots[cnt], NULL);
1620 spin_unlock(&dq_data_lock);
1624 void dquot_drop(struct inode *inode)
1626 struct dquot __rcu * const *dquots;
1629 if (IS_NOQUOTA(inode))
1633 * Test before calling to rule out calls from proc and such
1634 * where we are not allowed to block. Note that this is
1635 * actually reliable test even without the lock - the caller
1636 * must assure that nobody can come after the DQUOT_DROP and
1637 * add quota pointers back anyway.
1639 dquots = i_dquot(inode);
1640 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1645 if (cnt < MAXQUOTAS)
1646 __dquot_drop(inode);
1648 EXPORT_SYMBOL(dquot_drop);
1651 * inode_reserved_space is managed internally by quota, and protected by
1652 * i_lock similar to i_blocks+i_bytes.
1654 static qsize_t *inode_reserved_space(struct inode * inode)
1656 /* Filesystem must explicitly define it's own method in order to use
1657 * quota reservation interface */
1658 BUG_ON(!inode->i_sb->dq_op->get_reserved_space);
1659 return inode->i_sb->dq_op->get_reserved_space(inode);
1662 static qsize_t __inode_get_rsv_space(struct inode *inode)
1664 if (!inode->i_sb->dq_op->get_reserved_space)
1666 return *inode_reserved_space(inode);
1669 static qsize_t inode_get_rsv_space(struct inode *inode)
1673 if (!inode->i_sb->dq_op->get_reserved_space)
1675 spin_lock(&inode->i_lock);
1676 ret = __inode_get_rsv_space(inode);
1677 spin_unlock(&inode->i_lock);
1682 * This functions updates i_blocks+i_bytes fields and quota information
1683 * (together with appropriate checks).
1685 * NOTE: We absolutely rely on the fact that caller dirties the inode
1686 * (usually helpers in quotaops.h care about this) and holds a handle for
1687 * the current transaction so that dquot write and inode write go into the
1692 * This operation can block, but only after everything is updated
1694 int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags)
1696 int cnt, ret = 0, index;
1697 struct dquot_warn warn[MAXQUOTAS];
1698 int reserve = flags & DQUOT_SPACE_RESERVE;
1699 struct dquot __rcu **dquots;
1700 struct dquot *dquot;
1702 if (!inode_quota_active(inode)) {
1704 spin_lock(&inode->i_lock);
1705 *inode_reserved_space(inode) += number;
1706 spin_unlock(&inode->i_lock);
1708 inode_add_bytes(inode, number);
1713 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
1714 warn[cnt].w_type = QUOTA_NL_NOWARN;
1716 dquots = i_dquot(inode);
1717 index = srcu_read_lock(&dquot_srcu);
1718 spin_lock(&inode->i_lock);
1719 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1720 dquot = srcu_dereference(dquots[cnt], &dquot_srcu);
1724 ret = dquot_add_space(dquot, 0, number, flags, &warn[cnt]);
1726 ret = dquot_add_space(dquot, number, 0, flags, &warn[cnt]);
1729 /* Back out changes we already did */
1730 for (cnt--; cnt >= 0; cnt--) {
1731 dquot = srcu_dereference(dquots[cnt], &dquot_srcu);
1734 spin_lock(&dquot->dq_dqb_lock);
1736 dquot_free_reserved_space(dquot, number);
1738 dquot_decr_space(dquot, number);
1739 spin_unlock(&dquot->dq_dqb_lock);
1741 spin_unlock(&inode->i_lock);
1742 goto out_flush_warn;
1746 *inode_reserved_space(inode) += number;
1748 __inode_add_bytes(inode, number);
1749 spin_unlock(&inode->i_lock);
1752 goto out_flush_warn;
1753 mark_all_dquot_dirty(dquots);
1755 srcu_read_unlock(&dquot_srcu, index);
1756 flush_warnings(warn);
1760 EXPORT_SYMBOL(__dquot_alloc_space);
1763 * This operation can block, but only after everything is updated
1765 int dquot_alloc_inode(struct inode *inode)
1767 int cnt, ret = 0, index;
1768 struct dquot_warn warn[MAXQUOTAS];
1769 struct dquot __rcu * const *dquots;
1770 struct dquot *dquot;
1772 if (!inode_quota_active(inode))
1774 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
1775 warn[cnt].w_type = QUOTA_NL_NOWARN;
1777 dquots = i_dquot(inode);
1778 index = srcu_read_lock(&dquot_srcu);
1779 spin_lock(&inode->i_lock);
1780 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1781 dquot = srcu_dereference(dquots[cnt], &dquot_srcu);
1784 ret = dquot_add_inodes(dquot, 1, &warn[cnt]);
1786 for (cnt--; cnt >= 0; cnt--) {
1787 dquot = srcu_dereference(dquots[cnt], &dquot_srcu);
1790 /* Back out changes we already did */
1791 spin_lock(&dquot->dq_dqb_lock);
1792 dquot_decr_inodes(dquot, 1);
1793 spin_unlock(&dquot->dq_dqb_lock);
1800 spin_unlock(&inode->i_lock);
1802 mark_all_dquot_dirty(dquots);
1803 srcu_read_unlock(&dquot_srcu, index);
1804 flush_warnings(warn);
1807 EXPORT_SYMBOL(dquot_alloc_inode);
1810 * Convert in-memory reserved quotas to real consumed quotas
1812 int dquot_claim_space_nodirty(struct inode *inode, qsize_t number)
1814 struct dquot __rcu **dquots;
1815 struct dquot *dquot;
1818 if (!inode_quota_active(inode)) {
1819 spin_lock(&inode->i_lock);
1820 *inode_reserved_space(inode) -= number;
1821 __inode_add_bytes(inode, number);
1822 spin_unlock(&inode->i_lock);
1826 dquots = i_dquot(inode);
1827 index = srcu_read_lock(&dquot_srcu);
1828 spin_lock(&inode->i_lock);
1829 /* Claim reserved quotas to allocated quotas */
1830 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1831 dquot = srcu_dereference(dquots[cnt], &dquot_srcu);
1833 spin_lock(&dquot->dq_dqb_lock);
1834 if (WARN_ON_ONCE(dquot->dq_dqb.dqb_rsvspace < number))
1835 number = dquot->dq_dqb.dqb_rsvspace;
1836 dquot->dq_dqb.dqb_curspace += number;
1837 dquot->dq_dqb.dqb_rsvspace -= number;
1838 spin_unlock(&dquot->dq_dqb_lock);
1841 /* Update inode bytes */
1842 *inode_reserved_space(inode) -= number;
1843 __inode_add_bytes(inode, number);
1844 spin_unlock(&inode->i_lock);
1845 mark_all_dquot_dirty(dquots);
1846 srcu_read_unlock(&dquot_srcu, index);
1849 EXPORT_SYMBOL(dquot_claim_space_nodirty);
1852 * Convert allocated space back to in-memory reserved quotas
1854 void dquot_reclaim_space_nodirty(struct inode *inode, qsize_t number)
1856 struct dquot __rcu **dquots;
1857 struct dquot *dquot;
1860 if (!inode_quota_active(inode)) {
1861 spin_lock(&inode->i_lock);
1862 *inode_reserved_space(inode) += number;
1863 __inode_sub_bytes(inode, number);
1864 spin_unlock(&inode->i_lock);
1868 dquots = i_dquot(inode);
1869 index = srcu_read_lock(&dquot_srcu);
1870 spin_lock(&inode->i_lock);
1871 /* Claim reserved quotas to allocated quotas */
1872 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1873 dquot = srcu_dereference(dquots[cnt], &dquot_srcu);
1875 spin_lock(&dquot->dq_dqb_lock);
1876 if (WARN_ON_ONCE(dquot->dq_dqb.dqb_curspace < number))
1877 number = dquot->dq_dqb.dqb_curspace;
1878 dquot->dq_dqb.dqb_rsvspace += number;
1879 dquot->dq_dqb.dqb_curspace -= number;
1880 spin_unlock(&dquot->dq_dqb_lock);
1883 /* Update inode bytes */
1884 *inode_reserved_space(inode) += number;
1885 __inode_sub_bytes(inode, number);
1886 spin_unlock(&inode->i_lock);
1887 mark_all_dquot_dirty(dquots);
1888 srcu_read_unlock(&dquot_srcu, index);
1891 EXPORT_SYMBOL(dquot_reclaim_space_nodirty);
1894 * This operation can block, but only after everything is updated
1896 void __dquot_free_space(struct inode *inode, qsize_t number, int flags)
1899 struct dquot_warn warn[MAXQUOTAS];
1900 struct dquot __rcu **dquots;
1901 struct dquot *dquot;
1902 int reserve = flags & DQUOT_SPACE_RESERVE, index;
1904 if (!inode_quota_active(inode)) {
1906 spin_lock(&inode->i_lock);
1907 *inode_reserved_space(inode) -= number;
1908 spin_unlock(&inode->i_lock);
1910 inode_sub_bytes(inode, number);
1915 dquots = i_dquot(inode);
1916 index = srcu_read_lock(&dquot_srcu);
1917 spin_lock(&inode->i_lock);
1918 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1921 warn[cnt].w_type = QUOTA_NL_NOWARN;
1922 dquot = srcu_dereference(dquots[cnt], &dquot_srcu);
1925 spin_lock(&dquot->dq_dqb_lock);
1926 wtype = info_bdq_free(dquot, number);
1927 if (wtype != QUOTA_NL_NOWARN)
1928 prepare_warning(&warn[cnt], dquot, wtype);
1930 dquot_free_reserved_space(dquot, number);
1932 dquot_decr_space(dquot, number);
1933 spin_unlock(&dquot->dq_dqb_lock);
1936 *inode_reserved_space(inode) -= number;
1938 __inode_sub_bytes(inode, number);
1939 spin_unlock(&inode->i_lock);
1943 mark_all_dquot_dirty(dquots);
1945 srcu_read_unlock(&dquot_srcu, index);
1946 flush_warnings(warn);
1948 EXPORT_SYMBOL(__dquot_free_space);
1951 * This operation can block, but only after everything is updated
1953 void dquot_free_inode(struct inode *inode)
1956 struct dquot_warn warn[MAXQUOTAS];
1957 struct dquot __rcu * const *dquots;
1958 struct dquot *dquot;
1961 if (!inode_quota_active(inode))
1964 dquots = i_dquot(inode);
1965 index = srcu_read_lock(&dquot_srcu);
1966 spin_lock(&inode->i_lock);
1967 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1969 warn[cnt].w_type = QUOTA_NL_NOWARN;
1970 dquot = srcu_dereference(dquots[cnt], &dquot_srcu);
1973 spin_lock(&dquot->dq_dqb_lock);
1974 wtype = info_idq_free(dquot, 1);
1975 if (wtype != QUOTA_NL_NOWARN)
1976 prepare_warning(&warn[cnt], dquot, wtype);
1977 dquot_decr_inodes(dquot, 1);
1978 spin_unlock(&dquot->dq_dqb_lock);
1980 spin_unlock(&inode->i_lock);
1981 mark_all_dquot_dirty(dquots);
1982 srcu_read_unlock(&dquot_srcu, index);
1983 flush_warnings(warn);
1985 EXPORT_SYMBOL(dquot_free_inode);
1988 * Transfer the number of inode and blocks from one diskquota to an other.
1989 * On success, dquot references in transfer_to are consumed and references
1990 * to original dquots that need to be released are placed there. On failure,
1991 * references are kept untouched.
1993 * This operation can block, but only after everything is updated
1994 * A transaction must be started when entering this function.
1996 * We are holding reference on transfer_from & transfer_to, no need to
1997 * protect them by srcu_read_lock().
1999 int __dquot_transfer(struct inode *inode, struct dquot **transfer_to)
2002 qsize_t rsv_space = 0;
2003 qsize_t inode_usage = 1;
2004 struct dquot __rcu **dquots;
2005 struct dquot *transfer_from[MAXQUOTAS] = {};
2006 int cnt, index, ret = 0;
2007 char is_valid[MAXQUOTAS] = {};
2008 struct dquot_warn warn_to[MAXQUOTAS];
2009 struct dquot_warn warn_from_inodes[MAXQUOTAS];
2010 struct dquot_warn warn_from_space[MAXQUOTAS];
2012 if (IS_NOQUOTA(inode))
2015 if (inode->i_sb->dq_op->get_inode_usage) {
2016 ret = inode->i_sb->dq_op->get_inode_usage(inode, &inode_usage);
2021 /* Initialize the arrays */
2022 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
2023 warn_to[cnt].w_type = QUOTA_NL_NOWARN;
2024 warn_from_inodes[cnt].w_type = QUOTA_NL_NOWARN;
2025 warn_from_space[cnt].w_type = QUOTA_NL_NOWARN;
2028 spin_lock(&dq_data_lock);
2029 spin_lock(&inode->i_lock);
2030 if (IS_NOQUOTA(inode)) { /* File without quota accounting? */
2031 spin_unlock(&inode->i_lock);
2032 spin_unlock(&dq_data_lock);
2035 cur_space = __inode_get_bytes(inode);
2036 rsv_space = __inode_get_rsv_space(inode);
2037 dquots = i_dquot(inode);
2039 * Build the transfer_from list, check limits, and update usage in
2040 * the target structures.
2042 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
2044 * Skip changes for same uid or gid or for turned off quota-type.
2046 if (!transfer_to[cnt])
2048 /* Avoid races with quotaoff() */
2049 if (!sb_has_quota_active(inode->i_sb, cnt))
2052 transfer_from[cnt] = srcu_dereference_check(dquots[cnt],
2053 &dquot_srcu, lockdep_is_held(&dq_data_lock));
2054 ret = dquot_add_inodes(transfer_to[cnt], inode_usage,
2058 ret = dquot_add_space(transfer_to[cnt], cur_space, rsv_space,
2059 DQUOT_SPACE_WARN, &warn_to[cnt]);
2061 spin_lock(&transfer_to[cnt]->dq_dqb_lock);
2062 dquot_decr_inodes(transfer_to[cnt], inode_usage);
2063 spin_unlock(&transfer_to[cnt]->dq_dqb_lock);
2068 /* Decrease usage for source structures and update quota pointers */
2069 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
2072 /* Due to IO error we might not have transfer_from[] structure */
2073 if (transfer_from[cnt]) {
2076 spin_lock(&transfer_from[cnt]->dq_dqb_lock);
2077 wtype = info_idq_free(transfer_from[cnt], inode_usage);
2078 if (wtype != QUOTA_NL_NOWARN)
2079 prepare_warning(&warn_from_inodes[cnt],
2080 transfer_from[cnt], wtype);
2081 wtype = info_bdq_free(transfer_from[cnt],
2082 cur_space + rsv_space);
2083 if (wtype != QUOTA_NL_NOWARN)
2084 prepare_warning(&warn_from_space[cnt],
2085 transfer_from[cnt], wtype);
2086 dquot_decr_inodes(transfer_from[cnt], inode_usage);
2087 dquot_decr_space(transfer_from[cnt], cur_space);
2088 dquot_free_reserved_space(transfer_from[cnt],
2090 spin_unlock(&transfer_from[cnt]->dq_dqb_lock);
2092 rcu_assign_pointer(dquots[cnt], transfer_to[cnt]);
2094 spin_unlock(&inode->i_lock);
2095 spin_unlock(&dq_data_lock);
2098 * These arrays are local and we hold dquot references so we don't need
2099 * the srcu protection but still take dquot_srcu to avoid warning in
2100 * mark_all_dquot_dirty().
2102 index = srcu_read_lock(&dquot_srcu);
2103 mark_all_dquot_dirty((struct dquot __rcu **)transfer_from);
2104 mark_all_dquot_dirty((struct dquot __rcu **)transfer_to);
2105 srcu_read_unlock(&dquot_srcu, index);
2107 flush_warnings(warn_to);
2108 flush_warnings(warn_from_inodes);
2109 flush_warnings(warn_from_space);
2110 /* Pass back references to put */
2111 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
2113 transfer_to[cnt] = transfer_from[cnt];
2116 /* Back out changes we already did */
2117 for (cnt--; cnt >= 0; cnt--) {
2120 spin_lock(&transfer_to[cnt]->dq_dqb_lock);
2121 dquot_decr_inodes(transfer_to[cnt], inode_usage);
2122 dquot_decr_space(transfer_to[cnt], cur_space);
2123 dquot_free_reserved_space(transfer_to[cnt], rsv_space);
2124 spin_unlock(&transfer_to[cnt]->dq_dqb_lock);
2126 spin_unlock(&inode->i_lock);
2127 spin_unlock(&dq_data_lock);
2128 flush_warnings(warn_to);
2131 EXPORT_SYMBOL(__dquot_transfer);
2133 /* Wrapper for transferring ownership of an inode for uid/gid only
2134 * Called from FSXXX_setattr()
2136 int dquot_transfer(struct inode *inode, struct iattr *iattr)
2138 struct dquot *transfer_to[MAXQUOTAS] = {};
2139 struct dquot *dquot;
2140 struct super_block *sb = inode->i_sb;
2143 if (!inode_quota_active(inode))
2146 if (iattr->ia_valid & ATTR_UID && !uid_eq(iattr->ia_uid, inode->i_uid)){
2147 dquot = dqget(sb, make_kqid_uid(iattr->ia_uid));
2148 if (IS_ERR(dquot)) {
2149 if (PTR_ERR(dquot) != -ESRCH) {
2150 ret = PTR_ERR(dquot);
2155 transfer_to[USRQUOTA] = dquot;
2157 if (iattr->ia_valid & ATTR_GID && !gid_eq(iattr->ia_gid, inode->i_gid)){
2158 dquot = dqget(sb, make_kqid_gid(iattr->ia_gid));
2159 if (IS_ERR(dquot)) {
2160 if (PTR_ERR(dquot) != -ESRCH) {
2161 ret = PTR_ERR(dquot);
2166 transfer_to[GRPQUOTA] = dquot;
2168 ret = __dquot_transfer(inode, transfer_to);
2170 dqput_all(transfer_to);
2173 EXPORT_SYMBOL(dquot_transfer);
2176 * Write info of quota file to disk
2178 int dquot_commit_info(struct super_block *sb, int type)
2180 struct quota_info *dqopt = sb_dqopt(sb);
2182 return dqopt->ops[type]->write_file_info(sb, type);
2184 EXPORT_SYMBOL(dquot_commit_info);
2186 int dquot_get_next_id(struct super_block *sb, struct kqid *qid)
2188 struct quota_info *dqopt = sb_dqopt(sb);
2190 if (!sb_has_quota_active(sb, qid->type))
2192 if (!dqopt->ops[qid->type]->get_next_id)
2194 return dqopt->ops[qid->type]->get_next_id(sb, qid);
2196 EXPORT_SYMBOL(dquot_get_next_id);
2199 * Definitions of diskquota operations.
2201 const struct dquot_operations dquot_operations = {
2202 .write_dquot = dquot_commit,
2203 .acquire_dquot = dquot_acquire,
2204 .release_dquot = dquot_release,
2205 .mark_dirty = dquot_mark_dquot_dirty,
2206 .write_info = dquot_commit_info,
2207 .alloc_dquot = dquot_alloc,
2208 .destroy_dquot = dquot_destroy,
2209 .get_next_id = dquot_get_next_id,
2211 EXPORT_SYMBOL(dquot_operations);
2214 * Generic helper for ->open on filesystems supporting disk quotas.
2216 int dquot_file_open(struct inode *inode, struct file *file)
2220 error = generic_file_open(inode, file);
2221 if (!error && (file->f_mode & FMODE_WRITE))
2222 error = dquot_initialize(inode);
2225 EXPORT_SYMBOL(dquot_file_open);
2227 static void vfs_cleanup_quota_inode(struct super_block *sb, int type)
2229 struct quota_info *dqopt = sb_dqopt(sb);
2230 struct inode *inode = dqopt->files[type];
2234 if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE)) {
2236 inode->i_flags &= ~S_NOQUOTA;
2237 inode_unlock(inode);
2239 dqopt->files[type] = NULL;
2244 * Turn quota off on a device. type == -1 ==> quotaoff for all types (umount)
2246 int dquot_disable(struct super_block *sb, int type, unsigned int flags)
2249 struct quota_info *dqopt = sb_dqopt(sb);
2251 /* s_umount should be held in exclusive mode */
2252 if (WARN_ON_ONCE(down_read_trylock(&sb->s_umount)))
2253 up_read(&sb->s_umount);
2255 /* Cannot turn off usage accounting without turning off limits, or
2256 * suspend quotas and simultaneously turn quotas off. */
2257 if ((flags & DQUOT_USAGE_ENABLED && !(flags & DQUOT_LIMITS_ENABLED))
2258 || (flags & DQUOT_SUSPENDED && flags & (DQUOT_LIMITS_ENABLED |
2259 DQUOT_USAGE_ENABLED)))
2263 * Skip everything if there's nothing to do. We have to do this because
2264 * sometimes we are called when fill_super() failed and calling
2265 * sync_fs() in such cases does no good.
2267 if (!sb_any_quota_loaded(sb))
2270 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
2271 if (type != -1 && cnt != type)
2273 if (!sb_has_quota_loaded(sb, cnt))
2276 if (flags & DQUOT_SUSPENDED) {
2277 spin_lock(&dq_state_lock);
2279 dquot_state_flag(DQUOT_SUSPENDED, cnt);
2280 spin_unlock(&dq_state_lock);
2282 spin_lock(&dq_state_lock);
2283 dqopt->flags &= ~dquot_state_flag(flags, cnt);
2284 /* Turning off suspended quotas? */
2285 if (!sb_has_quota_loaded(sb, cnt) &&
2286 sb_has_quota_suspended(sb, cnt)) {
2287 dqopt->flags &= ~dquot_state_flag(
2288 DQUOT_SUSPENDED, cnt);
2289 spin_unlock(&dq_state_lock);
2290 vfs_cleanup_quota_inode(sb, cnt);
2293 spin_unlock(&dq_state_lock);
2296 /* We still have to keep quota loaded? */
2297 if (sb_has_quota_loaded(sb, cnt) && !(flags & DQUOT_SUSPENDED))
2300 /* Note: these are blocking operations */
2301 drop_dquot_ref(sb, cnt);
2302 invalidate_dquots(sb, cnt);
2304 * Now all dquots should be invalidated, all writes done so we
2305 * should be only users of the info. No locks needed.
2307 if (info_dirty(&dqopt->info[cnt]))
2308 sb->dq_op->write_info(sb, cnt);
2309 if (dqopt->ops[cnt]->free_file_info)
2310 dqopt->ops[cnt]->free_file_info(sb, cnt);
2311 put_quota_format(dqopt->info[cnt].dqi_format);
2312 dqopt->info[cnt].dqi_flags = 0;
2313 dqopt->info[cnt].dqi_igrace = 0;
2314 dqopt->info[cnt].dqi_bgrace = 0;
2315 dqopt->ops[cnt] = NULL;
2318 /* Skip syncing and setting flags if quota files are hidden */
2319 if (dqopt->flags & DQUOT_QUOTA_SYS_FILE)
2322 /* Sync the superblock so that buffers with quota data are written to
2323 * disk (and so userspace sees correct data afterwards). */
2324 if (sb->s_op->sync_fs)
2325 sb->s_op->sync_fs(sb, 1);
2326 sync_blockdev(sb->s_bdev);
2327 /* Now the quota files are just ordinary files and we can set the
2328 * inode flags back. Moreover we discard the pagecache so that
2329 * userspace sees the writes we did bypassing the pagecache. We
2330 * must also discard the blockdev buffers so that we see the
2331 * changes done by userspace on the next quotaon() */
2332 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
2333 if (!sb_has_quota_loaded(sb, cnt) && dqopt->files[cnt]) {
2334 inode_lock(dqopt->files[cnt]);
2335 truncate_inode_pages(&dqopt->files[cnt]->i_data, 0);
2336 inode_unlock(dqopt->files[cnt]);
2339 invalidate_bdev(sb->s_bdev);
2341 /* We are done when suspending quotas */
2342 if (flags & DQUOT_SUSPENDED)
2345 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
2346 if (!sb_has_quota_loaded(sb, cnt))
2347 vfs_cleanup_quota_inode(sb, cnt);
2350 EXPORT_SYMBOL(dquot_disable);
2352 int dquot_quota_off(struct super_block *sb, int type)
2354 return dquot_disable(sb, type,
2355 DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED);
2357 EXPORT_SYMBOL(dquot_quota_off);
2360 * Turn quotas on on a device
2363 static int vfs_setup_quota_inode(struct inode *inode, int type)
2365 struct super_block *sb = inode->i_sb;
2366 struct quota_info *dqopt = sb_dqopt(sb);
2368 if (is_bad_inode(inode))
2370 if (!S_ISREG(inode->i_mode))
2372 if (IS_RDONLY(inode))
2374 if (sb_has_quota_loaded(sb, type))
2378 * Quota files should never be encrypted. They should be thought of as
2379 * filesystem metadata, not user data. New-style internal quota files
2380 * cannot be encrypted by users anyway, but old-style external quota
2381 * files could potentially be incorrectly created in an encrypted
2382 * directory, hence this explicit check. Some reasons why encrypted
2383 * quota files don't work include: (1) some filesystems that support
2384 * encryption don't handle it in their quota_read and quota_write, and
2385 * (2) cleaning up encrypted quota files at unmount would need special
2386 * consideration, as quota files are cleaned up later than user files.
2388 if (IS_ENCRYPTED(inode))
2391 dqopt->files[type] = igrab(inode);
2392 if (!dqopt->files[type])
2394 if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE)) {
2395 /* We don't want quota and atime on quota files (deadlocks
2396 * possible) Also nobody should write to the file - we use
2397 * special IO operations which ignore the immutable bit. */
2399 inode->i_flags |= S_NOQUOTA;
2400 inode_unlock(inode);
2402 * When S_NOQUOTA is set, remove dquot references as no more
2403 * references can be added
2405 __dquot_drop(inode);
2410 int dquot_load_quota_sb(struct super_block *sb, int type, int format_id,
2413 struct quota_format_type *fmt = find_quota_format(format_id);
2414 struct quota_info *dqopt = sb_dqopt(sb);
2417 /* Just unsuspend quotas? */
2418 BUG_ON(flags & DQUOT_SUSPENDED);
2419 /* s_umount should be held in exclusive mode */
2420 if (WARN_ON_ONCE(down_read_trylock(&sb->s_umount)))
2421 up_read(&sb->s_umount);
2425 if (!sb->s_op->quota_write || !sb->s_op->quota_read ||
2426 (type == PRJQUOTA && sb->dq_op->get_projid == NULL)) {
2430 /* Filesystems outside of init_user_ns not yet supported */
2431 if (sb->s_user_ns != &init_user_ns) {
2435 /* Usage always has to be set... */
2436 if (!(flags & DQUOT_USAGE_ENABLED)) {
2440 if (sb_has_quota_loaded(sb, type)) {
2445 if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE)) {
2446 /* As we bypass the pagecache we must now flush all the
2447 * dirty data and invalidate caches so that kernel sees
2448 * changes from userspace. It is not enough to just flush
2449 * the quota file since if blocksize < pagesize, invalidation
2450 * of the cache could fail because of other unrelated dirty
2452 sync_filesystem(sb);
2453 invalidate_bdev(sb->s_bdev);
2457 if (!fmt->qf_ops->check_quota_file(sb, type))
2460 dqopt->ops[type] = fmt->qf_ops;
2461 dqopt->info[type].dqi_format = fmt;
2462 dqopt->info[type].dqi_fmt_id = format_id;
2463 INIT_LIST_HEAD(&dqopt->info[type].dqi_dirty_list);
2464 error = dqopt->ops[type]->read_file_info(sb, type);
2467 if (dqopt->flags & DQUOT_QUOTA_SYS_FILE) {
2468 spin_lock(&dq_data_lock);
2469 dqopt->info[type].dqi_flags |= DQF_SYS_FILE;
2470 spin_unlock(&dq_data_lock);
2472 spin_lock(&dq_state_lock);
2473 dqopt->flags |= dquot_state_flag(flags, type);
2474 spin_unlock(&dq_state_lock);
2476 error = add_dquot_ref(sb, type);
2478 dquot_disable(sb, type,
2479 DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED);
2483 put_quota_format(fmt);
2487 EXPORT_SYMBOL(dquot_load_quota_sb);
2490 * More powerful function for turning on quotas on given quota inode allowing
2491 * setting of individual quota flags
2493 int dquot_load_quota_inode(struct inode *inode, int type, int format_id,
2498 err = vfs_setup_quota_inode(inode, type);
2501 err = dquot_load_quota_sb(inode->i_sb, type, format_id, flags);
2503 vfs_cleanup_quota_inode(inode->i_sb, type);
2506 EXPORT_SYMBOL(dquot_load_quota_inode);
2508 /* Reenable quotas on remount RW */
2509 int dquot_resume(struct super_block *sb, int type)
2511 struct quota_info *dqopt = sb_dqopt(sb);
2515 /* s_umount should be held in exclusive mode */
2516 if (WARN_ON_ONCE(down_read_trylock(&sb->s_umount)))
2517 up_read(&sb->s_umount);
2519 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
2520 if (type != -1 && cnt != type)
2522 if (!sb_has_quota_suspended(sb, cnt))
2525 spin_lock(&dq_state_lock);
2526 flags = dqopt->flags & dquot_state_flag(DQUOT_USAGE_ENABLED |
2527 DQUOT_LIMITS_ENABLED,
2529 dqopt->flags &= ~dquot_state_flag(DQUOT_STATE_FLAGS, cnt);
2530 spin_unlock(&dq_state_lock);
2532 flags = dquot_generic_flag(flags, cnt);
2533 ret = dquot_load_quota_sb(sb, cnt, dqopt->info[cnt].dqi_fmt_id,
2536 vfs_cleanup_quota_inode(sb, cnt);
2541 EXPORT_SYMBOL(dquot_resume);
2543 int dquot_quota_on(struct super_block *sb, int type, int format_id,
2544 const struct path *path)
2546 int error = security_quota_on(path->dentry);
2549 /* Quota file not on the same filesystem? */
2550 if (path->dentry->d_sb != sb)
2553 error = dquot_load_quota_inode(d_inode(path->dentry), type,
2554 format_id, DQUOT_USAGE_ENABLED |
2555 DQUOT_LIMITS_ENABLED);
2558 EXPORT_SYMBOL(dquot_quota_on);
2561 * This function is used when filesystem needs to initialize quotas
2562 * during mount time.
2564 int dquot_quota_on_mount(struct super_block *sb, char *qf_name,
2565 int format_id, int type)
2567 struct dentry *dentry;
2570 dentry = lookup_positive_unlocked(qf_name, sb->s_root, strlen(qf_name));
2572 return PTR_ERR(dentry);
2574 error = security_quota_on(dentry);
2576 error = dquot_load_quota_inode(d_inode(dentry), type, format_id,
2577 DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED);
2582 EXPORT_SYMBOL(dquot_quota_on_mount);
2584 static int dquot_quota_enable(struct super_block *sb, unsigned int flags)
2588 struct quota_info *dqopt = sb_dqopt(sb);
2590 if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE))
2592 /* Accounting cannot be turned on while fs is mounted */
2593 flags &= ~(FS_QUOTA_UDQ_ACCT | FS_QUOTA_GDQ_ACCT | FS_QUOTA_PDQ_ACCT);
2596 for (type = 0; type < MAXQUOTAS; type++) {
2597 if (!(flags & qtype_enforce_flag(type)))
2599 /* Can't enforce without accounting */
2600 if (!sb_has_quota_usage_enabled(sb, type)) {
2604 if (sb_has_quota_limits_enabled(sb, type)) {
2608 spin_lock(&dq_state_lock);
2609 dqopt->flags |= dquot_state_flag(DQUOT_LIMITS_ENABLED, type);
2610 spin_unlock(&dq_state_lock);
2614 /* Backout enforcement enablement we already did */
2615 for (type--; type >= 0; type--) {
2616 if (flags & qtype_enforce_flag(type))
2617 dquot_disable(sb, type, DQUOT_LIMITS_ENABLED);
2619 /* Error code translation for better compatibility with XFS */
2625 static int dquot_quota_disable(struct super_block *sb, unsigned int flags)
2629 struct quota_info *dqopt = sb_dqopt(sb);
2631 if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE))
2634 * We don't support turning off accounting via quotactl. In principle
2635 * quota infrastructure can do this but filesystems don't expect
2636 * userspace to be able to do it.
2639 (FS_QUOTA_UDQ_ACCT | FS_QUOTA_GDQ_ACCT | FS_QUOTA_PDQ_ACCT))
2642 /* Filter out limits not enabled */
2643 for (type = 0; type < MAXQUOTAS; type++)
2644 if (!sb_has_quota_limits_enabled(sb, type))
2645 flags &= ~qtype_enforce_flag(type);
2649 for (type = 0; type < MAXQUOTAS; type++) {
2650 if (flags & qtype_enforce_flag(type)) {
2651 ret = dquot_disable(sb, type, DQUOT_LIMITS_ENABLED);
2658 /* Backout enforcement disabling we already did */
2659 for (type--; type >= 0; type--) {
2660 if (flags & qtype_enforce_flag(type)) {
2661 spin_lock(&dq_state_lock);
2663 dquot_state_flag(DQUOT_LIMITS_ENABLED, type);
2664 spin_unlock(&dq_state_lock);
2670 /* Generic routine for getting common part of quota structure */
2671 static void do_get_dqblk(struct dquot *dquot, struct qc_dqblk *di)
2673 struct mem_dqblk *dm = &dquot->dq_dqb;
2675 memset(di, 0, sizeof(*di));
2676 spin_lock(&dquot->dq_dqb_lock);
2677 di->d_spc_hardlimit = dm->dqb_bhardlimit;
2678 di->d_spc_softlimit = dm->dqb_bsoftlimit;
2679 di->d_ino_hardlimit = dm->dqb_ihardlimit;
2680 di->d_ino_softlimit = dm->dqb_isoftlimit;
2681 di->d_space = dm->dqb_curspace + dm->dqb_rsvspace;
2682 di->d_ino_count = dm->dqb_curinodes;
2683 di->d_spc_timer = dm->dqb_btime;
2684 di->d_ino_timer = dm->dqb_itime;
2685 spin_unlock(&dquot->dq_dqb_lock);
2688 int dquot_get_dqblk(struct super_block *sb, struct kqid qid,
2689 struct qc_dqblk *di)
2691 struct dquot *dquot;
2693 dquot = dqget(sb, qid);
2695 return PTR_ERR(dquot);
2696 do_get_dqblk(dquot, di);
2701 EXPORT_SYMBOL(dquot_get_dqblk);
2703 int dquot_get_next_dqblk(struct super_block *sb, struct kqid *qid,
2704 struct qc_dqblk *di)
2706 struct dquot *dquot;
2709 if (!sb->dq_op->get_next_id)
2711 err = sb->dq_op->get_next_id(sb, qid);
2714 dquot = dqget(sb, *qid);
2716 return PTR_ERR(dquot);
2717 do_get_dqblk(dquot, di);
2722 EXPORT_SYMBOL(dquot_get_next_dqblk);
2724 #define VFS_QC_MASK \
2725 (QC_SPACE | QC_SPC_SOFT | QC_SPC_HARD | \
2726 QC_INO_COUNT | QC_INO_SOFT | QC_INO_HARD | \
2727 QC_SPC_TIMER | QC_INO_TIMER)
2729 /* Generic routine for setting common part of quota structure */
2730 static int do_set_dqblk(struct dquot *dquot, struct qc_dqblk *di)
2732 struct mem_dqblk *dm = &dquot->dq_dqb;
2733 int check_blim = 0, check_ilim = 0;
2734 struct mem_dqinfo *dqi = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_id.type];
2736 if (di->d_fieldmask & ~VFS_QC_MASK)
2739 if (((di->d_fieldmask & QC_SPC_SOFT) &&
2740 di->d_spc_softlimit > dqi->dqi_max_spc_limit) ||
2741 ((di->d_fieldmask & QC_SPC_HARD) &&
2742 di->d_spc_hardlimit > dqi->dqi_max_spc_limit) ||
2743 ((di->d_fieldmask & QC_INO_SOFT) &&
2744 (di->d_ino_softlimit > dqi->dqi_max_ino_limit)) ||
2745 ((di->d_fieldmask & QC_INO_HARD) &&
2746 (di->d_ino_hardlimit > dqi->dqi_max_ino_limit)))
2749 spin_lock(&dquot->dq_dqb_lock);
2750 if (di->d_fieldmask & QC_SPACE) {
2751 dm->dqb_curspace = di->d_space - dm->dqb_rsvspace;
2753 set_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags);
2756 if (di->d_fieldmask & QC_SPC_SOFT)
2757 dm->dqb_bsoftlimit = di->d_spc_softlimit;
2758 if (di->d_fieldmask & QC_SPC_HARD)
2759 dm->dqb_bhardlimit = di->d_spc_hardlimit;
2760 if (di->d_fieldmask & (QC_SPC_SOFT | QC_SPC_HARD)) {
2762 set_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags);
2765 if (di->d_fieldmask & QC_INO_COUNT) {
2766 dm->dqb_curinodes = di->d_ino_count;
2768 set_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags);
2771 if (di->d_fieldmask & QC_INO_SOFT)
2772 dm->dqb_isoftlimit = di->d_ino_softlimit;
2773 if (di->d_fieldmask & QC_INO_HARD)
2774 dm->dqb_ihardlimit = di->d_ino_hardlimit;
2775 if (di->d_fieldmask & (QC_INO_SOFT | QC_INO_HARD)) {
2777 set_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags);
2780 if (di->d_fieldmask & QC_SPC_TIMER) {
2781 dm->dqb_btime = di->d_spc_timer;
2783 set_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags);
2786 if (di->d_fieldmask & QC_INO_TIMER) {
2787 dm->dqb_itime = di->d_ino_timer;
2789 set_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags);
2793 if (!dm->dqb_bsoftlimit ||
2794 dm->dqb_curspace + dm->dqb_rsvspace <= dm->dqb_bsoftlimit) {
2796 clear_bit(DQ_BLKS_B, &dquot->dq_flags);
2797 } else if (!(di->d_fieldmask & QC_SPC_TIMER))
2798 /* Set grace only if user hasn't provided his own... */
2799 dm->dqb_btime = ktime_get_real_seconds() + dqi->dqi_bgrace;
2802 if (!dm->dqb_isoftlimit ||
2803 dm->dqb_curinodes <= dm->dqb_isoftlimit) {
2805 clear_bit(DQ_INODES_B, &dquot->dq_flags);
2806 } else if (!(di->d_fieldmask & QC_INO_TIMER))
2807 /* Set grace only if user hasn't provided his own... */
2808 dm->dqb_itime = ktime_get_real_seconds() + dqi->dqi_igrace;
2810 if (dm->dqb_bhardlimit || dm->dqb_bsoftlimit || dm->dqb_ihardlimit ||
2812 clear_bit(DQ_FAKE_B, &dquot->dq_flags);
2814 set_bit(DQ_FAKE_B, &dquot->dq_flags);
2815 spin_unlock(&dquot->dq_dqb_lock);
2816 mark_dquot_dirty(dquot);
2821 int dquot_set_dqblk(struct super_block *sb, struct kqid qid,
2822 struct qc_dqblk *di)
2824 struct dquot *dquot;
2827 dquot = dqget(sb, qid);
2828 if (IS_ERR(dquot)) {
2829 rc = PTR_ERR(dquot);
2832 rc = do_set_dqblk(dquot, di);
2837 EXPORT_SYMBOL(dquot_set_dqblk);
2839 /* Generic routine for getting common part of quota file information */
2840 int dquot_get_state(struct super_block *sb, struct qc_state *state)
2842 struct mem_dqinfo *mi;
2843 struct qc_type_state *tstate;
2844 struct quota_info *dqopt = sb_dqopt(sb);
2847 memset(state, 0, sizeof(*state));
2848 for (type = 0; type < MAXQUOTAS; type++) {
2849 if (!sb_has_quota_active(sb, type))
2851 tstate = state->s_state + type;
2852 mi = sb_dqopt(sb)->info + type;
2853 tstate->flags = QCI_ACCT_ENABLED;
2854 spin_lock(&dq_data_lock);
2855 if (mi->dqi_flags & DQF_SYS_FILE)
2856 tstate->flags |= QCI_SYSFILE;
2857 if (mi->dqi_flags & DQF_ROOT_SQUASH)
2858 tstate->flags |= QCI_ROOT_SQUASH;
2859 if (sb_has_quota_limits_enabled(sb, type))
2860 tstate->flags |= QCI_LIMITS_ENFORCED;
2861 tstate->spc_timelimit = mi->dqi_bgrace;
2862 tstate->ino_timelimit = mi->dqi_igrace;
2863 if (dqopt->files[type]) {
2864 tstate->ino = dqopt->files[type]->i_ino;
2865 tstate->blocks = dqopt->files[type]->i_blocks;
2867 tstate->nextents = 1; /* We don't know... */
2868 spin_unlock(&dq_data_lock);
2872 EXPORT_SYMBOL(dquot_get_state);
2874 /* Generic routine for setting common part of quota file information */
2875 int dquot_set_dqinfo(struct super_block *sb, int type, struct qc_info *ii)
2877 struct mem_dqinfo *mi;
2880 if ((ii->i_fieldmask & QC_WARNS_MASK) ||
2881 (ii->i_fieldmask & QC_RT_SPC_TIMER))
2883 if (!sb_has_quota_active(sb, type))
2885 mi = sb_dqopt(sb)->info + type;
2886 if (ii->i_fieldmask & QC_FLAGS) {
2887 if ((ii->i_flags & QCI_ROOT_SQUASH &&
2888 mi->dqi_format->qf_fmt_id != QFMT_VFS_OLD))
2891 spin_lock(&dq_data_lock);
2892 if (ii->i_fieldmask & QC_SPC_TIMER)
2893 mi->dqi_bgrace = ii->i_spc_timelimit;
2894 if (ii->i_fieldmask & QC_INO_TIMER)
2895 mi->dqi_igrace = ii->i_ino_timelimit;
2896 if (ii->i_fieldmask & QC_FLAGS) {
2897 if (ii->i_flags & QCI_ROOT_SQUASH)
2898 mi->dqi_flags |= DQF_ROOT_SQUASH;
2900 mi->dqi_flags &= ~DQF_ROOT_SQUASH;
2902 spin_unlock(&dq_data_lock);
2903 mark_info_dirty(sb, type);
2904 /* Force write to disk */
2905 sb->dq_op->write_info(sb, type);
2908 EXPORT_SYMBOL(dquot_set_dqinfo);
2910 const struct quotactl_ops dquot_quotactl_sysfile_ops = {
2911 .quota_enable = dquot_quota_enable,
2912 .quota_disable = dquot_quota_disable,
2913 .quota_sync = dquot_quota_sync,
2914 .get_state = dquot_get_state,
2915 .set_info = dquot_set_dqinfo,
2916 .get_dqblk = dquot_get_dqblk,
2917 .get_nextdqblk = dquot_get_next_dqblk,
2918 .set_dqblk = dquot_set_dqblk
2920 EXPORT_SYMBOL(dquot_quotactl_sysfile_ops);
2922 static int do_proc_dqstats(struct ctl_table *table, int write,
2923 void *buffer, size_t *lenp, loff_t *ppos)
2925 unsigned int type = (unsigned long *)table->data - dqstats.stat;
2926 s64 value = percpu_counter_sum(&dqstats.counter[type]);
2928 /* Filter negative values for non-monotonic counters */
2929 if (value < 0 && (type == DQST_ALLOC_DQUOTS ||
2930 type == DQST_FREE_DQUOTS))
2933 /* Update global table */
2934 dqstats.stat[type] = value;
2935 return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
2938 static struct ctl_table fs_dqstats_table[] = {
2940 .procname = "lookups",
2941 .data = &dqstats.stat[DQST_LOOKUPS],
2942 .maxlen = sizeof(unsigned long),
2944 .proc_handler = do_proc_dqstats,
2947 .procname = "drops",
2948 .data = &dqstats.stat[DQST_DROPS],
2949 .maxlen = sizeof(unsigned long),
2951 .proc_handler = do_proc_dqstats,
2954 .procname = "reads",
2955 .data = &dqstats.stat[DQST_READS],
2956 .maxlen = sizeof(unsigned long),
2958 .proc_handler = do_proc_dqstats,
2961 .procname = "writes",
2962 .data = &dqstats.stat[DQST_WRITES],
2963 .maxlen = sizeof(unsigned long),
2965 .proc_handler = do_proc_dqstats,
2968 .procname = "cache_hits",
2969 .data = &dqstats.stat[DQST_CACHE_HITS],
2970 .maxlen = sizeof(unsigned long),
2972 .proc_handler = do_proc_dqstats,
2975 .procname = "allocated_dquots",
2976 .data = &dqstats.stat[DQST_ALLOC_DQUOTS],
2977 .maxlen = sizeof(unsigned long),
2979 .proc_handler = do_proc_dqstats,
2982 .procname = "free_dquots",
2983 .data = &dqstats.stat[DQST_FREE_DQUOTS],
2984 .maxlen = sizeof(unsigned long),
2986 .proc_handler = do_proc_dqstats,
2989 .procname = "syncs",
2990 .data = &dqstats.stat[DQST_SYNCS],
2991 .maxlen = sizeof(unsigned long),
2993 .proc_handler = do_proc_dqstats,
2995 #ifdef CONFIG_PRINT_QUOTA_WARNING
2997 .procname = "warnings",
2998 .data = &flag_print_warnings,
2999 .maxlen = sizeof(int),
3001 .proc_handler = proc_dointvec,
3007 static struct ctl_table fs_table[] = {
3009 .procname = "quota",
3011 .child = fs_dqstats_table,
3016 static struct ctl_table sys_table[] = {
3025 static int __init dquot_init(void)
3028 unsigned long nr_hash, order;
3030 printk(KERN_NOTICE "VFS: Disk quotas %s\n", __DQUOT_VERSION__);
3032 register_sysctl_table(sys_table);
3034 dquot_cachep = kmem_cache_create("dquot",
3035 sizeof(struct dquot), sizeof(unsigned long) * 4,
3036 (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|
3037 SLAB_MEM_SPREAD|SLAB_PANIC),
3041 dquot_hash = (struct hlist_head *)__get_free_pages(GFP_KERNEL, order);
3043 panic("Cannot create dquot hash table");
3045 for (i = 0; i < _DQST_DQSTAT_LAST; i++) {
3046 ret = percpu_counter_init(&dqstats.counter[i], 0, GFP_KERNEL);
3048 panic("Cannot create dquot stat counters");
3051 /* Find power-of-two hlist_heads which can fit into allocation */
3052 nr_hash = (1UL << order) * PAGE_SIZE / sizeof(struct hlist_head);
3053 dq_hash_bits = ilog2(nr_hash);
3055 nr_hash = 1UL << dq_hash_bits;
3056 dq_hash_mask = nr_hash - 1;
3057 for (i = 0; i < nr_hash; i++)
3058 INIT_HLIST_HEAD(dquot_hash + i);
3060 pr_info("VFS: Dquot-cache hash table entries: %ld (order %ld,"
3061 " %ld bytes)\n", nr_hash, order, (PAGE_SIZE << order));
3063 if (register_shrinker(&dqcache_shrinker))
3064 panic("Cannot register dquot shrinker");
3068 fs_initcall(dquot_init);