1 // SPDX-License-Identifier: GPL-2.0
3 * Implementation of the diskquota system for the LINUX operating system. QUOTA
4 * is implemented using the BSD system call interface as the means of
5 * communication with the user level. This file contains the generic routines
6 * called by the different filesystems on allocation of an inode or block.
7 * These routines take care of the administration needed to have a consistent
8 * diskquota tracking system. The ideas of both user and group quotas are based
9 * on the Melbourne quota system as used on BSD derived systems. The internal
10 * implementation is based on one of the several variants of the LINUX
11 * inode-subsystem with added complexity of the diskquota system.
13 * Author: Marco van Wieringen <mvw@planets.elm.net>
15 * Fixes: Dmitry Gorodchanin <pgmdsg@ibi.com>, 11 Feb 96
17 * Revised list management to avoid races
18 * -- Bill Hawes, <whawes@star.net>, 9/98
20 * Fixed races in dquot_transfer(), dqget() and dquot_alloc_...().
21 * As the consequence the locking was moved from dquot_decr_...(),
22 * dquot_incr_...() to calling functions.
23 * invalidate_dquots() now writes modified dquots.
24 * Serialized quota_off() and quota_on() for mount point.
25 * Fixed a few bugs in grow_dquots().
26 * Fixed deadlock in write_dquot() - we no longer account quotas on
28 * remove_dquot_ref() moved to inode.c - it now traverses through inodes
29 * add_dquot_ref() restarts after blocking
30 * Added check for bogus uid and fixed check for group in quotactl.
31 * Jan Kara, <jack@suse.cz>, sponsored by SuSE CR, 10-11/99
33 * Used struct list_head instead of own list struct
34 * Invalidation of referenced dquots is no longer possible
35 * Improved free_dquots list management
36 * Quota and i_blocks are now updated in one place to avoid races
37 * Warnings are now delayed so we won't block in critical section
38 * Write updated not to require dquot lock
39 * Jan Kara, <jack@suse.cz>, 9/2000
41 * Added dynamic quota structure allocation
42 * Jan Kara <jack@suse.cz> 12/2000
44 * Rewritten quota interface. Implemented new quota format and
45 * formats registering.
46 * Jan Kara, <jack@suse.cz>, 2001,2002
49 * Jan Kara, <jack@suse.cz>, 10/2002
51 * Added journalled quota support, fix lock inversion problems
52 * Jan Kara, <jack@suse.cz>, 2003,2004
54 * (C) Copyright 1994 - 1997 Marco van Wieringen
57 #include <linux/errno.h>
58 #include <linux/kernel.h>
60 #include <linux/mount.h>
62 #include <linux/time.h>
63 #include <linux/types.h>
64 #include <linux/string.h>
65 #include <linux/fcntl.h>
66 #include <linux/stat.h>
67 #include <linux/tty.h>
68 #include <linux/file.h>
69 #include <linux/slab.h>
70 #include <linux/sysctl.h>
71 #include <linux/init.h>
72 #include <linux/module.h>
73 #include <linux/proc_fs.h>
74 #include <linux/security.h>
75 #include <linux/sched.h>
76 #include <linux/cred.h>
77 #include <linux/kmod.h>
78 #include <linux/namei.h>
79 #include <linux/capability.h>
80 #include <linux/quotaops.h>
81 #include <linux/blkdev.h>
82 #include <linux/sched/mm.h>
83 #include "../internal.h" /* ugh */
85 #include <linux/uaccess.h>
88 * There are five quota SMP locks:
89 * * dq_list_lock protects all lists with quotas and quota formats.
90 * * dquot->dq_dqb_lock protects data from dq_dqb
91 * * inode->i_lock protects inode->i_blocks, i_bytes and also guards
92 * consistency of dquot->dq_dqb with inode->i_blocks, i_bytes so that
93 * dquot_transfer() can stabilize amount it transfers
94 * * dq_data_lock protects mem_dqinfo structures and modifications of dquot
95 * pointers in the inode
96 * * dq_state_lock protects modifications of quota state (on quotaon and
97 * quotaoff) and readers who care about latest values take it as well.
99 * The spinlock ordering is hence:
100 * dq_data_lock > dq_list_lock > i_lock > dquot->dq_dqb_lock,
101 * dq_list_lock > dq_state_lock
103 * Note that some things (eg. sb pointer, type, id) doesn't change during
104 * the life of the dquot structure and so needn't to be protected by a lock
106 * Operation accessing dquots via inode pointers are protected by dquot_srcu.
107 * Operation of reading pointer needs srcu_read_lock(&dquot_srcu), and
108 * synchronize_srcu(&dquot_srcu) is called after clearing pointers from
109 * inode and before dropping dquot references to avoid use of dquots after
110 * they are freed. dq_data_lock is used to serialize the pointer setting and
111 * clearing operations.
112 * Special care needs to be taken about S_NOQUOTA inode flag (marking that
113 * inode is a quota file). Functions adding pointers from inode to dquots have
114 * to check this flag under dq_data_lock and then (if S_NOQUOTA is not set) they
115 * have to do all pointer modifications before dropping dq_data_lock. This makes
116 * sure they cannot race with quotaon which first sets S_NOQUOTA flag and
117 * then drops all pointers to dquots from an inode.
119 * Each dquot has its dq_lock mutex. Dquot is locked when it is being read to
120 * memory (or space for it is being allocated) on the first dqget(), when it is
121 * being written out, and when it is being released on the last dqput(). The
122 * allocation and release operations are serialized by the dq_lock and by
123 * checking the use count in dquot_release().
125 * Lock ordering (including related VFS locks) is the following:
126 * s_umount > i_mutex > journal_lock > dquot->dq_lock > dqio_sem
129 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_list_lock);
130 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_state_lock);
131 __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_data_lock);
132 EXPORT_SYMBOL(dq_data_lock);
133 DEFINE_STATIC_SRCU(dquot_srcu);
135 static DECLARE_WAIT_QUEUE_HEAD(dquot_ref_wq);
137 void __quota_error(struct super_block *sb, const char *func,
138 const char *fmt, ...)
140 if (printk_ratelimit()) {
142 struct va_format vaf;
149 printk(KERN_ERR "Quota error (device %s): %s: %pV\n",
150 sb->s_id, func, &vaf);
155 EXPORT_SYMBOL(__quota_error);
157 #if defined(CONFIG_QUOTA_DEBUG) || defined(CONFIG_PRINT_QUOTA_WARNING)
158 static char *quotatypes[] = INITQFNAMES;
160 static struct quota_format_type *quota_formats; /* List of registered formats */
161 static struct quota_module_name module_names[] = INIT_QUOTA_MODULE_NAMES;
163 /* SLAB cache for dquot structures */
164 static struct kmem_cache *dquot_cachep;
166 int register_quota_format(struct quota_format_type *fmt)
168 spin_lock(&dq_list_lock);
169 fmt->qf_next = quota_formats;
171 spin_unlock(&dq_list_lock);
174 EXPORT_SYMBOL(register_quota_format);
176 void unregister_quota_format(struct quota_format_type *fmt)
178 struct quota_format_type **actqf;
180 spin_lock(&dq_list_lock);
181 for (actqf = "a_formats; *actqf && *actqf != fmt;
182 actqf = &(*actqf)->qf_next)
185 *actqf = (*actqf)->qf_next;
186 spin_unlock(&dq_list_lock);
188 EXPORT_SYMBOL(unregister_quota_format);
190 static struct quota_format_type *find_quota_format(int id)
192 struct quota_format_type *actqf;
194 spin_lock(&dq_list_lock);
195 for (actqf = quota_formats; actqf && actqf->qf_fmt_id != id;
196 actqf = actqf->qf_next)
198 if (!actqf || !try_module_get(actqf->qf_owner)) {
201 spin_unlock(&dq_list_lock);
203 for (qm = 0; module_names[qm].qm_fmt_id &&
204 module_names[qm].qm_fmt_id != id; qm++)
206 if (!module_names[qm].qm_fmt_id ||
207 request_module(module_names[qm].qm_mod_name))
210 spin_lock(&dq_list_lock);
211 for (actqf = quota_formats; actqf && actqf->qf_fmt_id != id;
212 actqf = actqf->qf_next)
214 if (actqf && !try_module_get(actqf->qf_owner))
217 spin_unlock(&dq_list_lock);
221 static void put_quota_format(struct quota_format_type *fmt)
223 module_put(fmt->qf_owner);
227 * Dquot List Management:
228 * The quota code uses five lists for dquot management: the inuse_list,
229 * releasing_dquots, free_dquots, dqi_dirty_list, and dquot_hash[] array.
230 * A single dquot structure may be on some of those lists, depending on
233 * All dquots are placed to the end of inuse_list when first created, and this
234 * list is used for invalidate operation, which must look at every dquot.
236 * When the last reference of a dquot is dropped, the dquot is added to
237 * releasing_dquots. We'll then queue work item which will call
238 * synchronize_srcu() and after that perform the final cleanup of all the
239 * dquots on the list. Each cleaned up dquot is moved to free_dquots list.
240 * Both releasing_dquots and free_dquots use the dq_free list_head in the dquot
243 * Unused and cleaned up dquots are in the free_dquots list and this list is
244 * searched whenever we need an available dquot. Dquots are removed from the
245 * list as soon as they are used again and dqstats.free_dquots gives the number
246 * of dquots on the list. When dquot is invalidated it's completely released
249 * Dirty dquots are added to the dqi_dirty_list of quota_info when mark
250 * dirtied, and this list is searched when writing dirty dquots back to
251 * quota file. Note that some filesystems do dirty dquot tracking on their
252 * own (e.g. in a journal) and thus don't use dqi_dirty_list.
254 * Dquots with a specific identity (device, type and id) are placed on
255 * one of the dquot_hash[] hash chains. The provides an efficient search
256 * mechanism to locate a specific dquot.
259 static LIST_HEAD(inuse_list);
260 static LIST_HEAD(free_dquots);
261 static LIST_HEAD(releasing_dquots);
262 static unsigned int dq_hash_bits, dq_hash_mask;
263 static struct hlist_head *dquot_hash;
265 struct dqstats dqstats;
266 EXPORT_SYMBOL(dqstats);
268 static qsize_t inode_get_rsv_space(struct inode *inode);
269 static qsize_t __inode_get_rsv_space(struct inode *inode);
270 static int __dquot_initialize(struct inode *inode, int type);
272 static void quota_release_workfn(struct work_struct *work);
273 static DECLARE_DELAYED_WORK(quota_release_work, quota_release_workfn);
275 static inline unsigned int
276 hashfn(const struct super_block *sb, struct kqid qid)
278 unsigned int id = from_kqid(&init_user_ns, qid);
282 tmp = (((unsigned long)sb>>L1_CACHE_SHIFT) ^ id) * (MAXQUOTAS - type);
283 return (tmp + (tmp >> dq_hash_bits)) & dq_hash_mask;
287 * Following list functions expect dq_list_lock to be held
289 static inline void insert_dquot_hash(struct dquot *dquot)
291 struct hlist_head *head;
292 head = dquot_hash + hashfn(dquot->dq_sb, dquot->dq_id);
293 hlist_add_head(&dquot->dq_hash, head);
296 static inline void remove_dquot_hash(struct dquot *dquot)
298 hlist_del_init(&dquot->dq_hash);
301 static struct dquot *find_dquot(unsigned int hashent, struct super_block *sb,
306 hlist_for_each_entry(dquot, dquot_hash+hashent, dq_hash)
307 if (dquot->dq_sb == sb && qid_eq(dquot->dq_id, qid))
313 /* Add a dquot to the tail of the free list */
314 static inline void put_dquot_last(struct dquot *dquot)
316 list_add_tail(&dquot->dq_free, &free_dquots);
317 dqstats_inc(DQST_FREE_DQUOTS);
320 static inline void put_releasing_dquots(struct dquot *dquot)
322 list_add_tail(&dquot->dq_free, &releasing_dquots);
323 set_bit(DQ_RELEASING_B, &dquot->dq_flags);
326 static inline void remove_free_dquot(struct dquot *dquot)
328 if (list_empty(&dquot->dq_free))
330 list_del_init(&dquot->dq_free);
331 if (!test_bit(DQ_RELEASING_B, &dquot->dq_flags))
332 dqstats_dec(DQST_FREE_DQUOTS);
334 clear_bit(DQ_RELEASING_B, &dquot->dq_flags);
337 static inline void put_inuse(struct dquot *dquot)
339 /* We add to the back of inuse list so we don't have to restart
340 * when traversing this list and we block */
341 list_add_tail(&dquot->dq_inuse, &inuse_list);
342 dqstats_inc(DQST_ALLOC_DQUOTS);
345 static inline void remove_inuse(struct dquot *dquot)
347 dqstats_dec(DQST_ALLOC_DQUOTS);
348 list_del(&dquot->dq_inuse);
351 * End of list functions needing dq_list_lock
354 static void wait_on_dquot(struct dquot *dquot)
356 mutex_lock(&dquot->dq_lock);
357 mutex_unlock(&dquot->dq_lock);
360 static inline int dquot_active(struct dquot *dquot)
362 return test_bit(DQ_ACTIVE_B, &dquot->dq_flags);
365 static inline int dquot_dirty(struct dquot *dquot)
367 return test_bit(DQ_MOD_B, &dquot->dq_flags);
370 static inline int mark_dquot_dirty(struct dquot *dquot)
372 return dquot->dq_sb->dq_op->mark_dirty(dquot);
375 /* Mark dquot dirty in atomic manner, and return it's old dirty flag state */
376 int dquot_mark_dquot_dirty(struct dquot *dquot)
380 if (!dquot_active(dquot))
383 if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NOLIST_DIRTY)
384 return test_and_set_bit(DQ_MOD_B, &dquot->dq_flags);
386 /* If quota is dirty already, we don't have to acquire dq_list_lock */
387 if (dquot_dirty(dquot))
390 spin_lock(&dq_list_lock);
391 if (!test_and_set_bit(DQ_MOD_B, &dquot->dq_flags)) {
392 list_add(&dquot->dq_dirty, &sb_dqopt(dquot->dq_sb)->
393 info[dquot->dq_id.type].dqi_dirty_list);
396 spin_unlock(&dq_list_lock);
399 EXPORT_SYMBOL(dquot_mark_dquot_dirty);
401 /* Dirtify all the dquots - this can block when journalling */
402 static inline int mark_all_dquot_dirty(struct dquot __rcu * const *dquots)
408 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
409 dquot = srcu_dereference(dquots[cnt], &dquot_srcu);
411 /* Even in case of error we have to continue */
412 ret = mark_dquot_dirty(dquot);
419 static inline void dqput_all(struct dquot **dquot)
423 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
427 static inline int clear_dquot_dirty(struct dquot *dquot)
429 if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NOLIST_DIRTY)
430 return test_and_clear_bit(DQ_MOD_B, &dquot->dq_flags);
432 spin_lock(&dq_list_lock);
433 if (!test_and_clear_bit(DQ_MOD_B, &dquot->dq_flags)) {
434 spin_unlock(&dq_list_lock);
437 list_del_init(&dquot->dq_dirty);
438 spin_unlock(&dq_list_lock);
442 void mark_info_dirty(struct super_block *sb, int type)
444 spin_lock(&dq_data_lock);
445 sb_dqopt(sb)->info[type].dqi_flags |= DQF_INFO_DIRTY;
446 spin_unlock(&dq_data_lock);
448 EXPORT_SYMBOL(mark_info_dirty);
451 * Read dquot from disk and alloc space for it
454 int dquot_acquire(struct dquot *dquot)
456 int ret = 0, ret2 = 0;
457 unsigned int memalloc;
458 struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
460 mutex_lock(&dquot->dq_lock);
461 memalloc = memalloc_nofs_save();
462 if (!test_bit(DQ_READ_B, &dquot->dq_flags)) {
463 ret = dqopt->ops[dquot->dq_id.type]->read_dqblk(dquot);
467 /* Make sure flags update is visible after dquot has been filled */
468 smp_mb__before_atomic();
469 set_bit(DQ_READ_B, &dquot->dq_flags);
470 /* Instantiate dquot if needed */
471 if (!dquot_active(dquot) && !dquot->dq_off) {
472 ret = dqopt->ops[dquot->dq_id.type]->commit_dqblk(dquot);
473 /* Write the info if needed */
474 if (info_dirty(&dqopt->info[dquot->dq_id.type])) {
475 ret2 = dqopt->ops[dquot->dq_id.type]->write_file_info(
476 dquot->dq_sb, dquot->dq_id.type);
486 * Make sure flags update is visible after on-disk struct has been
487 * allocated. Paired with smp_rmb() in dqget().
489 smp_mb__before_atomic();
490 set_bit(DQ_ACTIVE_B, &dquot->dq_flags);
492 memalloc_nofs_restore(memalloc);
493 mutex_unlock(&dquot->dq_lock);
496 EXPORT_SYMBOL(dquot_acquire);
499 * Write dquot to disk
501 int dquot_commit(struct dquot *dquot)
504 unsigned int memalloc;
505 struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
507 mutex_lock(&dquot->dq_lock);
508 memalloc = memalloc_nofs_save();
509 if (!clear_dquot_dirty(dquot))
511 /* Inactive dquot can be only if there was error during read/init
512 * => we have better not writing it */
513 if (dquot_active(dquot))
514 ret = dqopt->ops[dquot->dq_id.type]->commit_dqblk(dquot);
518 memalloc_nofs_restore(memalloc);
519 mutex_unlock(&dquot->dq_lock);
522 EXPORT_SYMBOL(dquot_commit);
527 int dquot_release(struct dquot *dquot)
529 int ret = 0, ret2 = 0;
530 unsigned int memalloc;
531 struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
533 mutex_lock(&dquot->dq_lock);
534 memalloc = memalloc_nofs_save();
535 /* Check whether we are not racing with some other dqget() */
536 if (dquot_is_busy(dquot))
538 if (dqopt->ops[dquot->dq_id.type]->release_dqblk) {
539 ret = dqopt->ops[dquot->dq_id.type]->release_dqblk(dquot);
541 if (info_dirty(&dqopt->info[dquot->dq_id.type])) {
542 ret2 = dqopt->ops[dquot->dq_id.type]->write_file_info(
543 dquot->dq_sb, dquot->dq_id.type);
548 clear_bit(DQ_ACTIVE_B, &dquot->dq_flags);
550 memalloc_nofs_restore(memalloc);
551 mutex_unlock(&dquot->dq_lock);
554 EXPORT_SYMBOL(dquot_release);
556 void dquot_destroy(struct dquot *dquot)
558 kmem_cache_free(dquot_cachep, dquot);
560 EXPORT_SYMBOL(dquot_destroy);
562 static inline void do_destroy_dquot(struct dquot *dquot)
564 dquot->dq_sb->dq_op->destroy_dquot(dquot);
567 /* Invalidate all dquots on the list. Note that this function is called after
568 * quota is disabled and pointers from inodes removed so there cannot be new
569 * quota users. There can still be some users of quotas due to inodes being
570 * just deleted or pruned by prune_icache() (those are not attached to any
571 * list) or parallel quotactl call. We have to wait for such users.
573 static void invalidate_dquots(struct super_block *sb, int type)
575 struct dquot *dquot, *tmp;
578 flush_delayed_work("a_release_work);
580 spin_lock(&dq_list_lock);
581 list_for_each_entry_safe(dquot, tmp, &inuse_list, dq_inuse) {
582 if (dquot->dq_sb != sb)
584 if (dquot->dq_id.type != type)
586 /* Wait for dquot users */
587 if (atomic_read(&dquot->dq_count)) {
588 atomic_inc(&dquot->dq_count);
589 spin_unlock(&dq_list_lock);
591 * Once dqput() wakes us up, we know it's time to free
593 * IMPORTANT: we rely on the fact that there is always
594 * at most one process waiting for dquot to free.
595 * Otherwise dq_count would be > 1 and we would never
598 wait_event(dquot_ref_wq,
599 atomic_read(&dquot->dq_count) == 1);
601 /* At this moment dquot() need not exist (it could be
602 * reclaimed by prune_dqcache(). Hence we must
607 * The last user already dropped its reference but dquot didn't
608 * get fully cleaned up yet. Restart the scan which flushes the
609 * work cleaning up released dquots.
611 if (test_bit(DQ_RELEASING_B, &dquot->dq_flags)) {
612 spin_unlock(&dq_list_lock);
616 * Quota now has no users and it has been written on last
619 remove_dquot_hash(dquot);
620 remove_free_dquot(dquot);
622 do_destroy_dquot(dquot);
624 spin_unlock(&dq_list_lock);
627 /* Call callback for every active dquot on given filesystem */
628 int dquot_scan_active(struct super_block *sb,
629 int (*fn)(struct dquot *dquot, unsigned long priv),
632 struct dquot *dquot, *old_dquot = NULL;
635 WARN_ON_ONCE(!rwsem_is_locked(&sb->s_umount));
637 spin_lock(&dq_list_lock);
638 list_for_each_entry(dquot, &inuse_list, dq_inuse) {
639 if (!dquot_active(dquot))
641 if (dquot->dq_sb != sb)
643 /* Now we have active dquot so we can just increase use count */
644 atomic_inc(&dquot->dq_count);
645 spin_unlock(&dq_list_lock);
649 * ->release_dquot() can be racing with us. Our reference
650 * protects us from new calls to it so just wait for any
651 * outstanding call and recheck the DQ_ACTIVE_B after that.
653 wait_on_dquot(dquot);
654 if (dquot_active(dquot)) {
655 ret = fn(dquot, priv);
659 spin_lock(&dq_list_lock);
660 /* We are safe to continue now because our dquot could not
661 * be moved out of the inuse list while we hold the reference */
663 spin_unlock(&dq_list_lock);
668 EXPORT_SYMBOL(dquot_scan_active);
670 static inline int dquot_write_dquot(struct dquot *dquot)
672 int ret = dquot->dq_sb->dq_op->write_dquot(dquot);
674 quota_error(dquot->dq_sb, "Can't write quota structure "
675 "(error %d). Quota may get out of sync!", ret);
676 /* Clear dirty bit anyway to avoid infinite loop. */
677 clear_dquot_dirty(dquot);
682 /* Write all dquot structures to quota files */
683 int dquot_writeback_dquots(struct super_block *sb, int type)
685 struct list_head dirty;
687 struct quota_info *dqopt = sb_dqopt(sb);
691 WARN_ON_ONCE(!rwsem_is_locked(&sb->s_umount));
693 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
694 if (type != -1 && cnt != type)
696 if (!sb_has_quota_active(sb, cnt))
698 spin_lock(&dq_list_lock);
699 /* Move list away to avoid livelock. */
700 list_replace_init(&dqopt->info[cnt].dqi_dirty_list, &dirty);
701 while (!list_empty(&dirty)) {
702 dquot = list_first_entry(&dirty, struct dquot,
705 WARN_ON(!dquot_active(dquot));
706 /* If the dquot is releasing we should not touch it */
707 if (test_bit(DQ_RELEASING_B, &dquot->dq_flags)) {
708 spin_unlock(&dq_list_lock);
709 flush_delayed_work("a_release_work);
710 spin_lock(&dq_list_lock);
714 /* Now we have active dquot from which someone is
715 * holding reference so we can safely just increase
718 spin_unlock(&dq_list_lock);
719 err = dquot_write_dquot(dquot);
723 spin_lock(&dq_list_lock);
725 spin_unlock(&dq_list_lock);
728 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
729 if ((cnt == type || type == -1) && sb_has_quota_active(sb, cnt)
730 && info_dirty(&dqopt->info[cnt]))
731 sb->dq_op->write_info(sb, cnt);
732 dqstats_inc(DQST_SYNCS);
736 EXPORT_SYMBOL(dquot_writeback_dquots);
738 /* Write all dquot structures to disk and make them visible from userspace */
739 int dquot_quota_sync(struct super_block *sb, int type)
741 struct quota_info *dqopt = sb_dqopt(sb);
745 ret = dquot_writeback_dquots(sb, type);
748 if (dqopt->flags & DQUOT_QUOTA_SYS_FILE)
751 /* This is not very clever (and fast) but currently I don't know about
752 * any other simple way of getting quota data to disk and we must get
753 * them there for userspace to be visible... */
754 if (sb->s_op->sync_fs) {
755 ret = sb->s_op->sync_fs(sb, 1);
759 ret = sync_blockdev(sb->s_bdev);
764 * Now when everything is written we can discard the pagecache so
765 * that userspace sees the changes.
767 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
768 if (type != -1 && cnt != type)
770 if (!sb_has_quota_active(sb, cnt))
772 inode_lock(dqopt->files[cnt]);
773 truncate_inode_pages(&dqopt->files[cnt]->i_data, 0);
774 inode_unlock(dqopt->files[cnt]);
779 EXPORT_SYMBOL(dquot_quota_sync);
782 dqcache_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
785 unsigned long freed = 0;
787 spin_lock(&dq_list_lock);
788 while (!list_empty(&free_dquots) && sc->nr_to_scan) {
789 dquot = list_first_entry(&free_dquots, struct dquot, dq_free);
790 remove_dquot_hash(dquot);
791 remove_free_dquot(dquot);
793 do_destroy_dquot(dquot);
797 spin_unlock(&dq_list_lock);
802 dqcache_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
804 return vfs_pressure_ratio(
805 percpu_counter_read_positive(&dqstats.counter[DQST_FREE_DQUOTS]));
808 static struct shrinker dqcache_shrinker = {
809 .count_objects = dqcache_shrink_count,
810 .scan_objects = dqcache_shrink_scan,
811 .seeks = DEFAULT_SEEKS,
815 * Safely release dquot and put reference to dquot.
817 static void quota_release_workfn(struct work_struct *work)
820 struct list_head rls_head;
822 spin_lock(&dq_list_lock);
823 /* Exchange the list head to avoid livelock. */
824 list_replace_init(&releasing_dquots, &rls_head);
825 spin_unlock(&dq_list_lock);
826 synchronize_srcu(&dquot_srcu);
829 spin_lock(&dq_list_lock);
830 while (!list_empty(&rls_head)) {
831 dquot = list_first_entry(&rls_head, struct dquot, dq_free);
832 WARN_ON_ONCE(atomic_read(&dquot->dq_count));
834 * Note that DQ_RELEASING_B protects us from racing with
835 * invalidate_dquots() calls so we are safe to work with the
836 * dquot even after we drop dq_list_lock.
838 if (dquot_dirty(dquot)) {
839 spin_unlock(&dq_list_lock);
840 /* Commit dquot before releasing */
841 dquot_write_dquot(dquot);
844 if (dquot_active(dquot)) {
845 spin_unlock(&dq_list_lock);
846 dquot->dq_sb->dq_op->release_dquot(dquot);
849 /* Dquot is inactive and clean, now move it to free list */
850 remove_free_dquot(dquot);
851 put_dquot_last(dquot);
853 spin_unlock(&dq_list_lock);
857 * Put reference to dquot
859 void dqput(struct dquot *dquot)
863 #ifdef CONFIG_QUOTA_DEBUG
864 if (!atomic_read(&dquot->dq_count)) {
865 quota_error(dquot->dq_sb, "trying to free free dquot of %s %d",
866 quotatypes[dquot->dq_id.type],
867 from_kqid(&init_user_ns, dquot->dq_id));
871 dqstats_inc(DQST_DROPS);
873 spin_lock(&dq_list_lock);
874 if (atomic_read(&dquot->dq_count) > 1) {
875 /* We have more than one user... nothing to do */
876 atomic_dec(&dquot->dq_count);
877 /* Releasing dquot during quotaoff phase? */
878 if (!sb_has_quota_active(dquot->dq_sb, dquot->dq_id.type) &&
879 atomic_read(&dquot->dq_count) == 1)
880 wake_up(&dquot_ref_wq);
881 spin_unlock(&dq_list_lock);
885 /* Need to release dquot? */
886 #ifdef CONFIG_QUOTA_DEBUG
888 BUG_ON(!list_empty(&dquot->dq_free));
890 put_releasing_dquots(dquot);
891 atomic_dec(&dquot->dq_count);
892 spin_unlock(&dq_list_lock);
893 queue_delayed_work(system_unbound_wq, "a_release_work, 1);
895 EXPORT_SYMBOL(dqput);
897 struct dquot *dquot_alloc(struct super_block *sb, int type)
899 return kmem_cache_zalloc(dquot_cachep, GFP_NOFS);
901 EXPORT_SYMBOL(dquot_alloc);
903 static struct dquot *get_empty_dquot(struct super_block *sb, int type)
907 dquot = sb->dq_op->alloc_dquot(sb, type);
911 mutex_init(&dquot->dq_lock);
912 INIT_LIST_HEAD(&dquot->dq_free);
913 INIT_LIST_HEAD(&dquot->dq_inuse);
914 INIT_HLIST_NODE(&dquot->dq_hash);
915 INIT_LIST_HEAD(&dquot->dq_dirty);
917 dquot->dq_id = make_kqid_invalid(type);
918 atomic_set(&dquot->dq_count, 1);
919 spin_lock_init(&dquot->dq_dqb_lock);
925 * Get reference to dquot
927 * Locking is slightly tricky here. We are guarded from parallel quotaoff()
928 * destroying our dquot by:
929 * a) checking for quota flags under dq_list_lock and
930 * b) getting a reference to dquot before we release dq_list_lock
932 struct dquot *dqget(struct super_block *sb, struct kqid qid)
934 unsigned int hashent = hashfn(sb, qid);
935 struct dquot *dquot, *empty = NULL;
937 if (!qid_has_mapping(sb->s_user_ns, qid))
938 return ERR_PTR(-EINVAL);
940 if (!sb_has_quota_active(sb, qid.type))
941 return ERR_PTR(-ESRCH);
943 spin_lock(&dq_list_lock);
944 spin_lock(&dq_state_lock);
945 if (!sb_has_quota_active(sb, qid.type)) {
946 spin_unlock(&dq_state_lock);
947 spin_unlock(&dq_list_lock);
948 dquot = ERR_PTR(-ESRCH);
951 spin_unlock(&dq_state_lock);
953 dquot = find_dquot(hashent, sb, qid);
956 spin_unlock(&dq_list_lock);
957 empty = get_empty_dquot(sb, qid.type);
959 schedule(); /* Try to wait for a moment... */
965 /* all dquots go on the inuse_list */
967 /* hash it first so it can be found */
968 insert_dquot_hash(dquot);
969 spin_unlock(&dq_list_lock);
970 dqstats_inc(DQST_LOOKUPS);
972 if (!atomic_read(&dquot->dq_count))
973 remove_free_dquot(dquot);
974 atomic_inc(&dquot->dq_count);
975 spin_unlock(&dq_list_lock);
976 dqstats_inc(DQST_CACHE_HITS);
977 dqstats_inc(DQST_LOOKUPS);
979 /* Wait for dq_lock - after this we know that either dquot_release() is
980 * already finished or it will be canceled due to dq_count > 0 test */
981 wait_on_dquot(dquot);
982 /* Read the dquot / allocate space in quota file */
983 if (!dquot_active(dquot)) {
986 err = sb->dq_op->acquire_dquot(dquot);
989 dquot = ERR_PTR(err);
994 * Make sure following reads see filled structure - paired with
995 * smp_mb__before_atomic() in dquot_acquire().
998 #ifdef CONFIG_QUOTA_DEBUG
999 BUG_ON(!dquot->dq_sb); /* Has somebody invalidated entry under us? */
1003 do_destroy_dquot(empty);
1007 EXPORT_SYMBOL(dqget);
1009 static inline struct dquot __rcu **i_dquot(struct inode *inode)
1011 /* Force __rcu for now until filesystems are fixed */
1012 return (struct dquot __rcu **)inode->i_sb->s_op->get_dquots(inode);
1015 static int dqinit_needed(struct inode *inode, int type)
1017 struct dquot __rcu * const *dquots;
1020 if (IS_NOQUOTA(inode))
1023 dquots = i_dquot(inode);
1025 return !dquots[type];
1026 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
1032 /* This routine is guarded by s_umount semaphore */
1033 static int add_dquot_ref(struct super_block *sb, int type)
1035 struct inode *inode, *old_inode = NULL;
1036 #ifdef CONFIG_QUOTA_DEBUG
1041 spin_lock(&sb->s_inode_list_lock);
1042 list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
1043 spin_lock(&inode->i_lock);
1044 if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) ||
1045 !atomic_read(&inode->i_writecount) ||
1046 !dqinit_needed(inode, type)) {
1047 spin_unlock(&inode->i_lock);
1051 spin_unlock(&inode->i_lock);
1052 spin_unlock(&sb->s_inode_list_lock);
1054 #ifdef CONFIG_QUOTA_DEBUG
1055 if (unlikely(inode_get_rsv_space(inode) > 0))
1059 err = __dquot_initialize(inode, type);
1066 * We hold a reference to 'inode' so it couldn't have been
1067 * removed from s_inodes list while we dropped the
1068 * s_inode_list_lock. We cannot iput the inode now as we can be
1069 * holding the last reference and we cannot iput it under
1070 * s_inode_list_lock. So we keep the reference and iput it
1075 spin_lock(&sb->s_inode_list_lock);
1077 spin_unlock(&sb->s_inode_list_lock);
1080 #ifdef CONFIG_QUOTA_DEBUG
1082 quota_error(sb, "Writes happened before quota was turned on "
1083 "thus quota information is probably inconsistent. "
1084 "Please run quotacheck(8)");
1090 static void remove_dquot_ref(struct super_block *sb, int type)
1092 struct inode *inode;
1093 #ifdef CONFIG_QUOTA_DEBUG
1097 spin_lock(&sb->s_inode_list_lock);
1098 list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
1100 * We have to scan also I_NEW inodes because they can already
1101 * have quota pointer initialized. Luckily, we need to touch
1102 * only quota pointers and these have separate locking
1105 spin_lock(&dq_data_lock);
1106 if (!IS_NOQUOTA(inode)) {
1107 struct dquot __rcu **dquots = i_dquot(inode);
1108 struct dquot *dquot = srcu_dereference_check(
1109 dquots[type], &dquot_srcu,
1110 lockdep_is_held(&dq_data_lock));
1112 #ifdef CONFIG_QUOTA_DEBUG
1113 if (unlikely(inode_get_rsv_space(inode) > 0))
1116 rcu_assign_pointer(dquots[type], NULL);
1120 spin_unlock(&dq_data_lock);
1122 spin_unlock(&sb->s_inode_list_lock);
1123 #ifdef CONFIG_QUOTA_DEBUG
1125 printk(KERN_WARNING "VFS (%s): Writes happened after quota"
1126 " was disabled thus quota information is probably "
1127 "inconsistent. Please run quotacheck(8).\n", sb->s_id);
1132 /* Gather all references from inodes and drop them */
1133 static void drop_dquot_ref(struct super_block *sb, int type)
1136 remove_dquot_ref(sb, type);
1140 void dquot_free_reserved_space(struct dquot *dquot, qsize_t number)
1142 if (dquot->dq_dqb.dqb_rsvspace >= number)
1143 dquot->dq_dqb.dqb_rsvspace -= number;
1146 dquot->dq_dqb.dqb_rsvspace = 0;
1148 if (dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace <=
1149 dquot->dq_dqb.dqb_bsoftlimit)
1150 dquot->dq_dqb.dqb_btime = (time64_t) 0;
1151 clear_bit(DQ_BLKS_B, &dquot->dq_flags);
1154 static void dquot_decr_inodes(struct dquot *dquot, qsize_t number)
1156 if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NEGATIVE_USAGE ||
1157 dquot->dq_dqb.dqb_curinodes >= number)
1158 dquot->dq_dqb.dqb_curinodes -= number;
1160 dquot->dq_dqb.dqb_curinodes = 0;
1161 if (dquot->dq_dqb.dqb_curinodes <= dquot->dq_dqb.dqb_isoftlimit)
1162 dquot->dq_dqb.dqb_itime = (time64_t) 0;
1163 clear_bit(DQ_INODES_B, &dquot->dq_flags);
1166 static void dquot_decr_space(struct dquot *dquot, qsize_t number)
1168 if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NEGATIVE_USAGE ||
1169 dquot->dq_dqb.dqb_curspace >= number)
1170 dquot->dq_dqb.dqb_curspace -= number;
1172 dquot->dq_dqb.dqb_curspace = 0;
1173 if (dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace <=
1174 dquot->dq_dqb.dqb_bsoftlimit)
1175 dquot->dq_dqb.dqb_btime = (time64_t) 0;
1176 clear_bit(DQ_BLKS_B, &dquot->dq_flags);
1180 struct super_block *w_sb;
1181 struct kqid w_dq_id;
1185 static int warning_issued(struct dquot *dquot, const int warntype)
1187 int flag = (warntype == QUOTA_NL_BHARDWARN ||
1188 warntype == QUOTA_NL_BSOFTLONGWARN) ? DQ_BLKS_B :
1189 ((warntype == QUOTA_NL_IHARDWARN ||
1190 warntype == QUOTA_NL_ISOFTLONGWARN) ? DQ_INODES_B : 0);
1194 return test_and_set_bit(flag, &dquot->dq_flags);
1197 #ifdef CONFIG_PRINT_QUOTA_WARNING
1198 static int flag_print_warnings = 1;
1200 static int need_print_warning(struct dquot_warn *warn)
1202 if (!flag_print_warnings)
1205 switch (warn->w_dq_id.type) {
1207 return uid_eq(current_fsuid(), warn->w_dq_id.uid);
1209 return in_group_p(warn->w_dq_id.gid);
1216 /* Print warning to user which exceeded quota */
1217 static void print_warning(struct dquot_warn *warn)
1220 struct tty_struct *tty;
1221 int warntype = warn->w_type;
1223 if (warntype == QUOTA_NL_IHARDBELOW ||
1224 warntype == QUOTA_NL_ISOFTBELOW ||
1225 warntype == QUOTA_NL_BHARDBELOW ||
1226 warntype == QUOTA_NL_BSOFTBELOW || !need_print_warning(warn))
1229 tty = get_current_tty();
1232 tty_write_message(tty, warn->w_sb->s_id);
1233 if (warntype == QUOTA_NL_ISOFTWARN || warntype == QUOTA_NL_BSOFTWARN)
1234 tty_write_message(tty, ": warning, ");
1236 tty_write_message(tty, ": write failed, ");
1237 tty_write_message(tty, quotatypes[warn->w_dq_id.type]);
1239 case QUOTA_NL_IHARDWARN:
1240 msg = " file limit reached.\r\n";
1242 case QUOTA_NL_ISOFTLONGWARN:
1243 msg = " file quota exceeded too long.\r\n";
1245 case QUOTA_NL_ISOFTWARN:
1246 msg = " file quota exceeded.\r\n";
1248 case QUOTA_NL_BHARDWARN:
1249 msg = " block limit reached.\r\n";
1251 case QUOTA_NL_BSOFTLONGWARN:
1252 msg = " block quota exceeded too long.\r\n";
1254 case QUOTA_NL_BSOFTWARN:
1255 msg = " block quota exceeded.\r\n";
1258 tty_write_message(tty, msg);
1263 static void prepare_warning(struct dquot_warn *warn, struct dquot *dquot,
1266 if (warning_issued(dquot, warntype))
1268 warn->w_type = warntype;
1269 warn->w_sb = dquot->dq_sb;
1270 warn->w_dq_id = dquot->dq_id;
1274 * Write warnings to the console and send warning messages over netlink.
1276 * Note that this function can call into tty and networking code.
1278 static void flush_warnings(struct dquot_warn *warn)
1282 for (i = 0; i < MAXQUOTAS; i++) {
1283 if (warn[i].w_type == QUOTA_NL_NOWARN)
1285 #ifdef CONFIG_PRINT_QUOTA_WARNING
1286 print_warning(&warn[i]);
1288 quota_send_warning(warn[i].w_dq_id,
1289 warn[i].w_sb->s_dev, warn[i].w_type);
1293 static int ignore_hardlimit(struct dquot *dquot)
1295 struct mem_dqinfo *info = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_id.type];
1297 return capable(CAP_SYS_RESOURCE) &&
1298 (info->dqi_format->qf_fmt_id != QFMT_VFS_OLD ||
1299 !(info->dqi_flags & DQF_ROOT_SQUASH));
1302 static int dquot_add_inodes(struct dquot *dquot, qsize_t inodes,
1303 struct dquot_warn *warn)
1308 spin_lock(&dquot->dq_dqb_lock);
1309 newinodes = dquot->dq_dqb.dqb_curinodes + inodes;
1310 if (!sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_id.type) ||
1311 test_bit(DQ_FAKE_B, &dquot->dq_flags))
1314 if (dquot->dq_dqb.dqb_ihardlimit &&
1315 newinodes > dquot->dq_dqb.dqb_ihardlimit &&
1316 !ignore_hardlimit(dquot)) {
1317 prepare_warning(warn, dquot, QUOTA_NL_IHARDWARN);
1322 if (dquot->dq_dqb.dqb_isoftlimit &&
1323 newinodes > dquot->dq_dqb.dqb_isoftlimit &&
1324 dquot->dq_dqb.dqb_itime &&
1325 ktime_get_real_seconds() >= dquot->dq_dqb.dqb_itime &&
1326 !ignore_hardlimit(dquot)) {
1327 prepare_warning(warn, dquot, QUOTA_NL_ISOFTLONGWARN);
1332 if (dquot->dq_dqb.dqb_isoftlimit &&
1333 newinodes > dquot->dq_dqb.dqb_isoftlimit &&
1334 dquot->dq_dqb.dqb_itime == 0) {
1335 prepare_warning(warn, dquot, QUOTA_NL_ISOFTWARN);
1336 dquot->dq_dqb.dqb_itime = ktime_get_real_seconds() +
1337 sb_dqopt(dquot->dq_sb)->info[dquot->dq_id.type].dqi_igrace;
1340 dquot->dq_dqb.dqb_curinodes = newinodes;
1343 spin_unlock(&dquot->dq_dqb_lock);
1347 static int dquot_add_space(struct dquot *dquot, qsize_t space,
1348 qsize_t rsv_space, unsigned int flags,
1349 struct dquot_warn *warn)
1352 struct super_block *sb = dquot->dq_sb;
1355 spin_lock(&dquot->dq_dqb_lock);
1356 if (!sb_has_quota_limits_enabled(sb, dquot->dq_id.type) ||
1357 test_bit(DQ_FAKE_B, &dquot->dq_flags))
1360 tspace = dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace
1361 + space + rsv_space;
1363 if (dquot->dq_dqb.dqb_bhardlimit &&
1364 tspace > dquot->dq_dqb.dqb_bhardlimit &&
1365 !ignore_hardlimit(dquot)) {
1366 if (flags & DQUOT_SPACE_WARN)
1367 prepare_warning(warn, dquot, QUOTA_NL_BHARDWARN);
1372 if (dquot->dq_dqb.dqb_bsoftlimit &&
1373 tspace > dquot->dq_dqb.dqb_bsoftlimit &&
1374 dquot->dq_dqb.dqb_btime &&
1375 ktime_get_real_seconds() >= dquot->dq_dqb.dqb_btime &&
1376 !ignore_hardlimit(dquot)) {
1377 if (flags & DQUOT_SPACE_WARN)
1378 prepare_warning(warn, dquot, QUOTA_NL_BSOFTLONGWARN);
1383 if (dquot->dq_dqb.dqb_bsoftlimit &&
1384 tspace > dquot->dq_dqb.dqb_bsoftlimit &&
1385 dquot->dq_dqb.dqb_btime == 0) {
1386 if (flags & DQUOT_SPACE_WARN) {
1387 prepare_warning(warn, dquot, QUOTA_NL_BSOFTWARN);
1388 dquot->dq_dqb.dqb_btime = ktime_get_real_seconds() +
1389 sb_dqopt(sb)->info[dquot->dq_id.type].dqi_bgrace;
1392 * We don't allow preallocation to exceed softlimit so exceeding will
1401 * We have to be careful and go through warning generation & grace time
1402 * setting even if DQUOT_SPACE_NOFAIL is set. That's why we check it
1405 if (flags & DQUOT_SPACE_NOFAIL)
1408 dquot->dq_dqb.dqb_rsvspace += rsv_space;
1409 dquot->dq_dqb.dqb_curspace += space;
1411 spin_unlock(&dquot->dq_dqb_lock);
1415 static int info_idq_free(struct dquot *dquot, qsize_t inodes)
1419 if (test_bit(DQ_FAKE_B, &dquot->dq_flags) ||
1420 dquot->dq_dqb.dqb_curinodes <= dquot->dq_dqb.dqb_isoftlimit ||
1421 !sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_id.type))
1422 return QUOTA_NL_NOWARN;
1424 newinodes = dquot->dq_dqb.dqb_curinodes - inodes;
1425 if (newinodes <= dquot->dq_dqb.dqb_isoftlimit)
1426 return QUOTA_NL_ISOFTBELOW;
1427 if (dquot->dq_dqb.dqb_curinodes >= dquot->dq_dqb.dqb_ihardlimit &&
1428 newinodes < dquot->dq_dqb.dqb_ihardlimit)
1429 return QUOTA_NL_IHARDBELOW;
1430 return QUOTA_NL_NOWARN;
1433 static int info_bdq_free(struct dquot *dquot, qsize_t space)
1437 tspace = dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace;
1439 if (test_bit(DQ_FAKE_B, &dquot->dq_flags) ||
1440 tspace <= dquot->dq_dqb.dqb_bsoftlimit)
1441 return QUOTA_NL_NOWARN;
1443 if (tspace - space <= dquot->dq_dqb.dqb_bsoftlimit)
1444 return QUOTA_NL_BSOFTBELOW;
1445 if (tspace >= dquot->dq_dqb.dqb_bhardlimit &&
1446 tspace - space < dquot->dq_dqb.dqb_bhardlimit)
1447 return QUOTA_NL_BHARDBELOW;
1448 return QUOTA_NL_NOWARN;
1451 static int inode_quota_active(const struct inode *inode)
1453 struct super_block *sb = inode->i_sb;
1455 if (IS_NOQUOTA(inode))
1457 return sb_any_quota_loaded(sb) & ~sb_any_quota_suspended(sb);
1461 * Initialize quota pointers in inode
1463 * It is better to call this function outside of any transaction as it
1464 * might need a lot of space in journal for dquot structure allocation.
1466 static int __dquot_initialize(struct inode *inode, int type)
1468 int cnt, init_needed = 0;
1469 struct dquot __rcu **dquots;
1470 struct dquot *got[MAXQUOTAS] = {};
1471 struct super_block *sb = inode->i_sb;
1475 if (!inode_quota_active(inode))
1478 dquots = i_dquot(inode);
1480 /* First get references to structures we might need. */
1481 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1485 struct dquot *dquot;
1487 if (type != -1 && cnt != type)
1490 * The i_dquot should have been initialized in most cases,
1491 * we check it without locking here to avoid unnecessary
1492 * dqget()/dqput() calls.
1497 if (!sb_has_quota_active(sb, cnt))
1504 qid = make_kqid_uid(inode->i_uid);
1507 qid = make_kqid_gid(inode->i_gid);
1510 rc = inode->i_sb->dq_op->get_projid(inode, &projid);
1513 qid = make_kqid_projid(projid);
1516 dquot = dqget(sb, qid);
1517 if (IS_ERR(dquot)) {
1518 /* We raced with somebody turning quotas off... */
1519 if (PTR_ERR(dquot) != -ESRCH) {
1520 ret = PTR_ERR(dquot);
1528 /* All required i_dquot has been initialized */
1532 spin_lock(&dq_data_lock);
1533 if (IS_NOQUOTA(inode))
1535 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1536 if (type != -1 && cnt != type)
1538 /* Avoid races with quotaoff() */
1539 if (!sb_has_quota_active(sb, cnt))
1541 /* We could race with quotaon or dqget() could have failed */
1545 rcu_assign_pointer(dquots[cnt], got[cnt]);
1548 * Make quota reservation system happy if someone
1549 * did a write before quota was turned on
1551 rsv = inode_get_rsv_space(inode);
1552 if (unlikely(rsv)) {
1553 struct dquot *dquot = srcu_dereference_check(
1554 dquots[cnt], &dquot_srcu,
1555 lockdep_is_held(&dq_data_lock));
1557 spin_lock(&inode->i_lock);
1558 /* Get reservation again under proper lock */
1559 rsv = __inode_get_rsv_space(inode);
1560 spin_lock(&dquot->dq_dqb_lock);
1561 dquot->dq_dqb.dqb_rsvspace += rsv;
1562 spin_unlock(&dquot->dq_dqb_lock);
1563 spin_unlock(&inode->i_lock);
1568 spin_unlock(&dq_data_lock);
1570 /* Drop unused references */
1576 int dquot_initialize(struct inode *inode)
1578 return __dquot_initialize(inode, -1);
1580 EXPORT_SYMBOL(dquot_initialize);
1582 bool dquot_initialize_needed(struct inode *inode)
1584 struct dquot __rcu **dquots;
1587 if (!inode_quota_active(inode))
1590 dquots = i_dquot(inode);
1591 for (i = 0; i < MAXQUOTAS; i++)
1592 if (!dquots[i] && sb_has_quota_active(inode->i_sb, i))
1596 EXPORT_SYMBOL(dquot_initialize_needed);
1599 * Release all quotas referenced by inode.
1601 * This function only be called on inode free or converting
1602 * a file to quota file, no other users for the i_dquot in
1603 * both cases, so we needn't call synchronize_srcu() after
1606 static void __dquot_drop(struct inode *inode)
1609 struct dquot __rcu **dquots = i_dquot(inode);
1610 struct dquot *put[MAXQUOTAS];
1612 spin_lock(&dq_data_lock);
1613 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1614 put[cnt] = srcu_dereference_check(dquots[cnt], &dquot_srcu,
1615 lockdep_is_held(&dq_data_lock));
1616 rcu_assign_pointer(dquots[cnt], NULL);
1618 spin_unlock(&dq_data_lock);
1622 void dquot_drop(struct inode *inode)
1624 struct dquot __rcu * const *dquots;
1627 if (IS_NOQUOTA(inode))
1631 * Test before calling to rule out calls from proc and such
1632 * where we are not allowed to block. Note that this is
1633 * actually reliable test even without the lock - the caller
1634 * must assure that nobody can come after the DQUOT_DROP and
1635 * add quota pointers back anyway.
1637 dquots = i_dquot(inode);
1638 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1643 if (cnt < MAXQUOTAS)
1644 __dquot_drop(inode);
1646 EXPORT_SYMBOL(dquot_drop);
1649 * inode_reserved_space is managed internally by quota, and protected by
1650 * i_lock similar to i_blocks+i_bytes.
1652 static qsize_t *inode_reserved_space(struct inode * inode)
1654 /* Filesystem must explicitly define it's own method in order to use
1655 * quota reservation interface */
1656 BUG_ON(!inode->i_sb->dq_op->get_reserved_space);
1657 return inode->i_sb->dq_op->get_reserved_space(inode);
1660 static qsize_t __inode_get_rsv_space(struct inode *inode)
1662 if (!inode->i_sb->dq_op->get_reserved_space)
1664 return *inode_reserved_space(inode);
1667 static qsize_t inode_get_rsv_space(struct inode *inode)
1671 if (!inode->i_sb->dq_op->get_reserved_space)
1673 spin_lock(&inode->i_lock);
1674 ret = __inode_get_rsv_space(inode);
1675 spin_unlock(&inode->i_lock);
1680 * This functions updates i_blocks+i_bytes fields and quota information
1681 * (together with appropriate checks).
1683 * NOTE: We absolutely rely on the fact that caller dirties the inode
1684 * (usually helpers in quotaops.h care about this) and holds a handle for
1685 * the current transaction so that dquot write and inode write go into the
1690 * This operation can block, but only after everything is updated
1692 int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags)
1694 int cnt, ret = 0, index;
1695 struct dquot_warn warn[MAXQUOTAS];
1696 int reserve = flags & DQUOT_SPACE_RESERVE;
1697 struct dquot __rcu **dquots;
1698 struct dquot *dquot;
1700 if (!inode_quota_active(inode)) {
1702 spin_lock(&inode->i_lock);
1703 *inode_reserved_space(inode) += number;
1704 spin_unlock(&inode->i_lock);
1706 inode_add_bytes(inode, number);
1711 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
1712 warn[cnt].w_type = QUOTA_NL_NOWARN;
1714 dquots = i_dquot(inode);
1715 index = srcu_read_lock(&dquot_srcu);
1716 spin_lock(&inode->i_lock);
1717 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1718 dquot = srcu_dereference(dquots[cnt], &dquot_srcu);
1722 ret = dquot_add_space(dquot, 0, number, flags, &warn[cnt]);
1724 ret = dquot_add_space(dquot, number, 0, flags, &warn[cnt]);
1727 /* Back out changes we already did */
1728 for (cnt--; cnt >= 0; cnt--) {
1729 dquot = srcu_dereference(dquots[cnt], &dquot_srcu);
1732 spin_lock(&dquot->dq_dqb_lock);
1734 dquot_free_reserved_space(dquot, number);
1736 dquot_decr_space(dquot, number);
1737 spin_unlock(&dquot->dq_dqb_lock);
1739 spin_unlock(&inode->i_lock);
1740 goto out_flush_warn;
1744 *inode_reserved_space(inode) += number;
1746 __inode_add_bytes(inode, number);
1747 spin_unlock(&inode->i_lock);
1750 goto out_flush_warn;
1751 mark_all_dquot_dirty(dquots);
1753 srcu_read_unlock(&dquot_srcu, index);
1754 flush_warnings(warn);
1758 EXPORT_SYMBOL(__dquot_alloc_space);
1761 * This operation can block, but only after everything is updated
1763 int dquot_alloc_inode(struct inode *inode)
1765 int cnt, ret = 0, index;
1766 struct dquot_warn warn[MAXQUOTAS];
1767 struct dquot __rcu * const *dquots;
1768 struct dquot *dquot;
1770 if (!inode_quota_active(inode))
1772 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
1773 warn[cnt].w_type = QUOTA_NL_NOWARN;
1775 dquots = i_dquot(inode);
1776 index = srcu_read_lock(&dquot_srcu);
1777 spin_lock(&inode->i_lock);
1778 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1779 dquot = srcu_dereference(dquots[cnt], &dquot_srcu);
1782 ret = dquot_add_inodes(dquot, 1, &warn[cnt]);
1784 for (cnt--; cnt >= 0; cnt--) {
1785 dquot = srcu_dereference(dquots[cnt], &dquot_srcu);
1788 /* Back out changes we already did */
1789 spin_lock(&dquot->dq_dqb_lock);
1790 dquot_decr_inodes(dquot, 1);
1791 spin_unlock(&dquot->dq_dqb_lock);
1798 spin_unlock(&inode->i_lock);
1800 mark_all_dquot_dirty(dquots);
1801 srcu_read_unlock(&dquot_srcu, index);
1802 flush_warnings(warn);
1805 EXPORT_SYMBOL(dquot_alloc_inode);
1808 * Convert in-memory reserved quotas to real consumed quotas
1810 int dquot_claim_space_nodirty(struct inode *inode, qsize_t number)
1812 struct dquot __rcu **dquots;
1813 struct dquot *dquot;
1816 if (!inode_quota_active(inode)) {
1817 spin_lock(&inode->i_lock);
1818 *inode_reserved_space(inode) -= number;
1819 __inode_add_bytes(inode, number);
1820 spin_unlock(&inode->i_lock);
1824 dquots = i_dquot(inode);
1825 index = srcu_read_lock(&dquot_srcu);
1826 spin_lock(&inode->i_lock);
1827 /* Claim reserved quotas to allocated quotas */
1828 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1829 dquot = srcu_dereference(dquots[cnt], &dquot_srcu);
1831 spin_lock(&dquot->dq_dqb_lock);
1832 if (WARN_ON_ONCE(dquot->dq_dqb.dqb_rsvspace < number))
1833 number = dquot->dq_dqb.dqb_rsvspace;
1834 dquot->dq_dqb.dqb_curspace += number;
1835 dquot->dq_dqb.dqb_rsvspace -= number;
1836 spin_unlock(&dquot->dq_dqb_lock);
1839 /* Update inode bytes */
1840 *inode_reserved_space(inode) -= number;
1841 __inode_add_bytes(inode, number);
1842 spin_unlock(&inode->i_lock);
1843 mark_all_dquot_dirty(dquots);
1844 srcu_read_unlock(&dquot_srcu, index);
1847 EXPORT_SYMBOL(dquot_claim_space_nodirty);
1850 * Convert allocated space back to in-memory reserved quotas
1852 void dquot_reclaim_space_nodirty(struct inode *inode, qsize_t number)
1854 struct dquot __rcu **dquots;
1855 struct dquot *dquot;
1858 if (!inode_quota_active(inode)) {
1859 spin_lock(&inode->i_lock);
1860 *inode_reserved_space(inode) += number;
1861 __inode_sub_bytes(inode, number);
1862 spin_unlock(&inode->i_lock);
1866 dquots = i_dquot(inode);
1867 index = srcu_read_lock(&dquot_srcu);
1868 spin_lock(&inode->i_lock);
1869 /* Claim reserved quotas to allocated quotas */
1870 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1871 dquot = srcu_dereference(dquots[cnt], &dquot_srcu);
1873 spin_lock(&dquot->dq_dqb_lock);
1874 if (WARN_ON_ONCE(dquot->dq_dqb.dqb_curspace < number))
1875 number = dquot->dq_dqb.dqb_curspace;
1876 dquot->dq_dqb.dqb_rsvspace += number;
1877 dquot->dq_dqb.dqb_curspace -= number;
1878 spin_unlock(&dquot->dq_dqb_lock);
1881 /* Update inode bytes */
1882 *inode_reserved_space(inode) += number;
1883 __inode_sub_bytes(inode, number);
1884 spin_unlock(&inode->i_lock);
1885 mark_all_dquot_dirty(dquots);
1886 srcu_read_unlock(&dquot_srcu, index);
1889 EXPORT_SYMBOL(dquot_reclaim_space_nodirty);
1892 * This operation can block, but only after everything is updated
1894 void __dquot_free_space(struct inode *inode, qsize_t number, int flags)
1897 struct dquot_warn warn[MAXQUOTAS];
1898 struct dquot __rcu **dquots;
1899 struct dquot *dquot;
1900 int reserve = flags & DQUOT_SPACE_RESERVE, index;
1902 if (!inode_quota_active(inode)) {
1904 spin_lock(&inode->i_lock);
1905 *inode_reserved_space(inode) -= number;
1906 spin_unlock(&inode->i_lock);
1908 inode_sub_bytes(inode, number);
1913 dquots = i_dquot(inode);
1914 index = srcu_read_lock(&dquot_srcu);
1915 spin_lock(&inode->i_lock);
1916 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1919 warn[cnt].w_type = QUOTA_NL_NOWARN;
1920 dquot = srcu_dereference(dquots[cnt], &dquot_srcu);
1923 spin_lock(&dquot->dq_dqb_lock);
1924 wtype = info_bdq_free(dquot, number);
1925 if (wtype != QUOTA_NL_NOWARN)
1926 prepare_warning(&warn[cnt], dquot, wtype);
1928 dquot_free_reserved_space(dquot, number);
1930 dquot_decr_space(dquot, number);
1931 spin_unlock(&dquot->dq_dqb_lock);
1934 *inode_reserved_space(inode) -= number;
1936 __inode_sub_bytes(inode, number);
1937 spin_unlock(&inode->i_lock);
1941 mark_all_dquot_dirty(dquots);
1943 srcu_read_unlock(&dquot_srcu, index);
1944 flush_warnings(warn);
1946 EXPORT_SYMBOL(__dquot_free_space);
1949 * This operation can block, but only after everything is updated
1951 void dquot_free_inode(struct inode *inode)
1954 struct dquot_warn warn[MAXQUOTAS];
1955 struct dquot __rcu * const *dquots;
1956 struct dquot *dquot;
1959 if (!inode_quota_active(inode))
1962 dquots = i_dquot(inode);
1963 index = srcu_read_lock(&dquot_srcu);
1964 spin_lock(&inode->i_lock);
1965 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1967 warn[cnt].w_type = QUOTA_NL_NOWARN;
1968 dquot = srcu_dereference(dquots[cnt], &dquot_srcu);
1971 spin_lock(&dquot->dq_dqb_lock);
1972 wtype = info_idq_free(dquot, 1);
1973 if (wtype != QUOTA_NL_NOWARN)
1974 prepare_warning(&warn[cnt], dquot, wtype);
1975 dquot_decr_inodes(dquot, 1);
1976 spin_unlock(&dquot->dq_dqb_lock);
1978 spin_unlock(&inode->i_lock);
1979 mark_all_dquot_dirty(dquots);
1980 srcu_read_unlock(&dquot_srcu, index);
1981 flush_warnings(warn);
1983 EXPORT_SYMBOL(dquot_free_inode);
1986 * Transfer the number of inode and blocks from one diskquota to an other.
1987 * On success, dquot references in transfer_to are consumed and references
1988 * to original dquots that need to be released are placed there. On failure,
1989 * references are kept untouched.
1991 * This operation can block, but only after everything is updated
1992 * A transaction must be started when entering this function.
1994 * We are holding reference on transfer_from & transfer_to, no need to
1995 * protect them by srcu_read_lock().
1997 int __dquot_transfer(struct inode *inode, struct dquot **transfer_to)
2000 qsize_t rsv_space = 0;
2001 qsize_t inode_usage = 1;
2002 struct dquot __rcu **dquots;
2003 struct dquot *transfer_from[MAXQUOTAS] = {};
2004 int cnt, index, ret = 0;
2005 char is_valid[MAXQUOTAS] = {};
2006 struct dquot_warn warn_to[MAXQUOTAS];
2007 struct dquot_warn warn_from_inodes[MAXQUOTAS];
2008 struct dquot_warn warn_from_space[MAXQUOTAS];
2010 if (IS_NOQUOTA(inode))
2013 if (inode->i_sb->dq_op->get_inode_usage) {
2014 ret = inode->i_sb->dq_op->get_inode_usage(inode, &inode_usage);
2019 /* Initialize the arrays */
2020 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
2021 warn_to[cnt].w_type = QUOTA_NL_NOWARN;
2022 warn_from_inodes[cnt].w_type = QUOTA_NL_NOWARN;
2023 warn_from_space[cnt].w_type = QUOTA_NL_NOWARN;
2026 spin_lock(&dq_data_lock);
2027 spin_lock(&inode->i_lock);
2028 if (IS_NOQUOTA(inode)) { /* File without quota accounting? */
2029 spin_unlock(&inode->i_lock);
2030 spin_unlock(&dq_data_lock);
2033 cur_space = __inode_get_bytes(inode);
2034 rsv_space = __inode_get_rsv_space(inode);
2035 dquots = i_dquot(inode);
2037 * Build the transfer_from list, check limits, and update usage in
2038 * the target structures.
2040 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
2042 * Skip changes for same uid or gid or for turned off quota-type.
2044 if (!transfer_to[cnt])
2046 /* Avoid races with quotaoff() */
2047 if (!sb_has_quota_active(inode->i_sb, cnt))
2050 transfer_from[cnt] = srcu_dereference_check(dquots[cnt],
2051 &dquot_srcu, lockdep_is_held(&dq_data_lock));
2052 ret = dquot_add_inodes(transfer_to[cnt], inode_usage,
2056 ret = dquot_add_space(transfer_to[cnt], cur_space, rsv_space,
2057 DQUOT_SPACE_WARN, &warn_to[cnt]);
2059 spin_lock(&transfer_to[cnt]->dq_dqb_lock);
2060 dquot_decr_inodes(transfer_to[cnt], inode_usage);
2061 spin_unlock(&transfer_to[cnt]->dq_dqb_lock);
2066 /* Decrease usage for source structures and update quota pointers */
2067 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
2070 /* Due to IO error we might not have transfer_from[] structure */
2071 if (transfer_from[cnt]) {
2074 spin_lock(&transfer_from[cnt]->dq_dqb_lock);
2075 wtype = info_idq_free(transfer_from[cnt], inode_usage);
2076 if (wtype != QUOTA_NL_NOWARN)
2077 prepare_warning(&warn_from_inodes[cnt],
2078 transfer_from[cnt], wtype);
2079 wtype = info_bdq_free(transfer_from[cnt],
2080 cur_space + rsv_space);
2081 if (wtype != QUOTA_NL_NOWARN)
2082 prepare_warning(&warn_from_space[cnt],
2083 transfer_from[cnt], wtype);
2084 dquot_decr_inodes(transfer_from[cnt], inode_usage);
2085 dquot_decr_space(transfer_from[cnt], cur_space);
2086 dquot_free_reserved_space(transfer_from[cnt],
2088 spin_unlock(&transfer_from[cnt]->dq_dqb_lock);
2090 rcu_assign_pointer(dquots[cnt], transfer_to[cnt]);
2092 spin_unlock(&inode->i_lock);
2093 spin_unlock(&dq_data_lock);
2096 * These arrays are local and we hold dquot references so we don't need
2097 * the srcu protection but still take dquot_srcu to avoid warning in
2098 * mark_all_dquot_dirty().
2100 index = srcu_read_lock(&dquot_srcu);
2101 mark_all_dquot_dirty((struct dquot __rcu **)transfer_from);
2102 mark_all_dquot_dirty((struct dquot __rcu **)transfer_to);
2103 srcu_read_unlock(&dquot_srcu, index);
2105 flush_warnings(warn_to);
2106 flush_warnings(warn_from_inodes);
2107 flush_warnings(warn_from_space);
2108 /* Pass back references to put */
2109 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
2111 transfer_to[cnt] = transfer_from[cnt];
2114 /* Back out changes we already did */
2115 for (cnt--; cnt >= 0; cnt--) {
2118 spin_lock(&transfer_to[cnt]->dq_dqb_lock);
2119 dquot_decr_inodes(transfer_to[cnt], inode_usage);
2120 dquot_decr_space(transfer_to[cnt], cur_space);
2121 dquot_free_reserved_space(transfer_to[cnt], rsv_space);
2122 spin_unlock(&transfer_to[cnt]->dq_dqb_lock);
2124 spin_unlock(&inode->i_lock);
2125 spin_unlock(&dq_data_lock);
2126 flush_warnings(warn_to);
2129 EXPORT_SYMBOL(__dquot_transfer);
2131 /* Wrapper for transferring ownership of an inode for uid/gid only
2132 * Called from FSXXX_setattr()
2134 int dquot_transfer(struct user_namespace *mnt_userns, struct inode *inode,
2135 struct iattr *iattr)
2137 struct dquot *transfer_to[MAXQUOTAS] = {};
2138 struct dquot *dquot;
2139 struct super_block *sb = inode->i_sb;
2142 if (!inode_quota_active(inode))
2145 if (i_uid_needs_update(mnt_userns, iattr, inode)) {
2146 kuid_t kuid = from_vfsuid(mnt_userns, i_user_ns(inode),
2149 dquot = dqget(sb, make_kqid_uid(kuid));
2150 if (IS_ERR(dquot)) {
2151 if (PTR_ERR(dquot) != -ESRCH) {
2152 ret = PTR_ERR(dquot);
2157 transfer_to[USRQUOTA] = dquot;
2159 if (i_gid_needs_update(mnt_userns, iattr, inode)) {
2160 kgid_t kgid = from_vfsgid(mnt_userns, i_user_ns(inode),
2163 dquot = dqget(sb, make_kqid_gid(kgid));
2164 if (IS_ERR(dquot)) {
2165 if (PTR_ERR(dquot) != -ESRCH) {
2166 ret = PTR_ERR(dquot);
2171 transfer_to[GRPQUOTA] = dquot;
2173 ret = __dquot_transfer(inode, transfer_to);
2175 dqput_all(transfer_to);
2178 EXPORT_SYMBOL(dquot_transfer);
2181 * Write info of quota file to disk
2183 int dquot_commit_info(struct super_block *sb, int type)
2185 struct quota_info *dqopt = sb_dqopt(sb);
2187 return dqopt->ops[type]->write_file_info(sb, type);
2189 EXPORT_SYMBOL(dquot_commit_info);
2191 int dquot_get_next_id(struct super_block *sb, struct kqid *qid)
2193 struct quota_info *dqopt = sb_dqopt(sb);
2195 if (!sb_has_quota_active(sb, qid->type))
2197 if (!dqopt->ops[qid->type]->get_next_id)
2199 return dqopt->ops[qid->type]->get_next_id(sb, qid);
2201 EXPORT_SYMBOL(dquot_get_next_id);
2204 * Definitions of diskquota operations.
2206 const struct dquot_operations dquot_operations = {
2207 .write_dquot = dquot_commit,
2208 .acquire_dquot = dquot_acquire,
2209 .release_dquot = dquot_release,
2210 .mark_dirty = dquot_mark_dquot_dirty,
2211 .write_info = dquot_commit_info,
2212 .alloc_dquot = dquot_alloc,
2213 .destroy_dquot = dquot_destroy,
2214 .get_next_id = dquot_get_next_id,
2216 EXPORT_SYMBOL(dquot_operations);
2219 * Generic helper for ->open on filesystems supporting disk quotas.
2221 int dquot_file_open(struct inode *inode, struct file *file)
2225 error = generic_file_open(inode, file);
2226 if (!error && (file->f_mode & FMODE_WRITE))
2227 error = dquot_initialize(inode);
2230 EXPORT_SYMBOL(dquot_file_open);
2232 static void vfs_cleanup_quota_inode(struct super_block *sb, int type)
2234 struct quota_info *dqopt = sb_dqopt(sb);
2235 struct inode *inode = dqopt->files[type];
2239 if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE)) {
2241 inode->i_flags &= ~S_NOQUOTA;
2242 inode_unlock(inode);
2244 dqopt->files[type] = NULL;
2249 * Turn quota off on a device. type == -1 ==> quotaoff for all types (umount)
2251 int dquot_disable(struct super_block *sb, int type, unsigned int flags)
2254 struct quota_info *dqopt = sb_dqopt(sb);
2256 /* s_umount should be held in exclusive mode */
2257 if (WARN_ON_ONCE(down_read_trylock(&sb->s_umount)))
2258 up_read(&sb->s_umount);
2260 /* Cannot turn off usage accounting without turning off limits, or
2261 * suspend quotas and simultaneously turn quotas off. */
2262 if ((flags & DQUOT_USAGE_ENABLED && !(flags & DQUOT_LIMITS_ENABLED))
2263 || (flags & DQUOT_SUSPENDED && flags & (DQUOT_LIMITS_ENABLED |
2264 DQUOT_USAGE_ENABLED)))
2268 * Skip everything if there's nothing to do. We have to do this because
2269 * sometimes we are called when fill_super() failed and calling
2270 * sync_fs() in such cases does no good.
2272 if (!sb_any_quota_loaded(sb))
2275 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
2276 if (type != -1 && cnt != type)
2278 if (!sb_has_quota_loaded(sb, cnt))
2281 if (flags & DQUOT_SUSPENDED) {
2282 spin_lock(&dq_state_lock);
2284 dquot_state_flag(DQUOT_SUSPENDED, cnt);
2285 spin_unlock(&dq_state_lock);
2287 spin_lock(&dq_state_lock);
2288 dqopt->flags &= ~dquot_state_flag(flags, cnt);
2289 /* Turning off suspended quotas? */
2290 if (!sb_has_quota_loaded(sb, cnt) &&
2291 sb_has_quota_suspended(sb, cnt)) {
2292 dqopt->flags &= ~dquot_state_flag(
2293 DQUOT_SUSPENDED, cnt);
2294 spin_unlock(&dq_state_lock);
2295 vfs_cleanup_quota_inode(sb, cnt);
2298 spin_unlock(&dq_state_lock);
2301 /* We still have to keep quota loaded? */
2302 if (sb_has_quota_loaded(sb, cnt) && !(flags & DQUOT_SUSPENDED))
2305 /* Note: these are blocking operations */
2306 drop_dquot_ref(sb, cnt);
2307 invalidate_dquots(sb, cnt);
2309 * Now all dquots should be invalidated, all writes done so we
2310 * should be only users of the info. No locks needed.
2312 if (info_dirty(&dqopt->info[cnt]))
2313 sb->dq_op->write_info(sb, cnt);
2314 if (dqopt->ops[cnt]->free_file_info)
2315 dqopt->ops[cnt]->free_file_info(sb, cnt);
2316 put_quota_format(dqopt->info[cnt].dqi_format);
2317 dqopt->info[cnt].dqi_flags = 0;
2318 dqopt->info[cnt].dqi_igrace = 0;
2319 dqopt->info[cnt].dqi_bgrace = 0;
2320 dqopt->ops[cnt] = NULL;
2323 /* Skip syncing and setting flags if quota files are hidden */
2324 if (dqopt->flags & DQUOT_QUOTA_SYS_FILE)
2327 /* Sync the superblock so that buffers with quota data are written to
2328 * disk (and so userspace sees correct data afterwards). */
2329 if (sb->s_op->sync_fs)
2330 sb->s_op->sync_fs(sb, 1);
2331 sync_blockdev(sb->s_bdev);
2332 /* Now the quota files are just ordinary files and we can set the
2333 * inode flags back. Moreover we discard the pagecache so that
2334 * userspace sees the writes we did bypassing the pagecache. We
2335 * must also discard the blockdev buffers so that we see the
2336 * changes done by userspace on the next quotaon() */
2337 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
2338 if (!sb_has_quota_loaded(sb, cnt) && dqopt->files[cnt]) {
2339 inode_lock(dqopt->files[cnt]);
2340 truncate_inode_pages(&dqopt->files[cnt]->i_data, 0);
2341 inode_unlock(dqopt->files[cnt]);
2344 invalidate_bdev(sb->s_bdev);
2346 /* We are done when suspending quotas */
2347 if (flags & DQUOT_SUSPENDED)
2350 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
2351 if (!sb_has_quota_loaded(sb, cnt))
2352 vfs_cleanup_quota_inode(sb, cnt);
2355 EXPORT_SYMBOL(dquot_disable);
2357 int dquot_quota_off(struct super_block *sb, int type)
2359 return dquot_disable(sb, type,
2360 DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED);
2362 EXPORT_SYMBOL(dquot_quota_off);
2365 * Turn quotas on on a device
2368 static int vfs_setup_quota_inode(struct inode *inode, int type)
2370 struct super_block *sb = inode->i_sb;
2371 struct quota_info *dqopt = sb_dqopt(sb);
2373 if (is_bad_inode(inode))
2375 if (!S_ISREG(inode->i_mode))
2377 if (IS_RDONLY(inode))
2379 if (sb_has_quota_loaded(sb, type))
2383 * Quota files should never be encrypted. They should be thought of as
2384 * filesystem metadata, not user data. New-style internal quota files
2385 * cannot be encrypted by users anyway, but old-style external quota
2386 * files could potentially be incorrectly created in an encrypted
2387 * directory, hence this explicit check. Some reasons why encrypted
2388 * quota files don't work include: (1) some filesystems that support
2389 * encryption don't handle it in their quota_read and quota_write, and
2390 * (2) cleaning up encrypted quota files at unmount would need special
2391 * consideration, as quota files are cleaned up later than user files.
2393 if (IS_ENCRYPTED(inode))
2396 dqopt->files[type] = igrab(inode);
2397 if (!dqopt->files[type])
2399 if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE)) {
2400 /* We don't want quota and atime on quota files (deadlocks
2401 * possible) Also nobody should write to the file - we use
2402 * special IO operations which ignore the immutable bit. */
2404 inode->i_flags |= S_NOQUOTA;
2405 inode_unlock(inode);
2407 * When S_NOQUOTA is set, remove dquot references as no more
2408 * references can be added
2410 __dquot_drop(inode);
2415 int dquot_load_quota_sb(struct super_block *sb, int type, int format_id,
2418 struct quota_format_type *fmt = find_quota_format(format_id);
2419 struct quota_info *dqopt = sb_dqopt(sb);
2422 /* Just unsuspend quotas? */
2423 BUG_ON(flags & DQUOT_SUSPENDED);
2424 /* s_umount should be held in exclusive mode */
2425 if (WARN_ON_ONCE(down_read_trylock(&sb->s_umount)))
2426 up_read(&sb->s_umount);
2430 if (!sb->s_op->quota_write || !sb->s_op->quota_read ||
2431 (type == PRJQUOTA && sb->dq_op->get_projid == NULL)) {
2435 /* Filesystems outside of init_user_ns not yet supported */
2436 if (sb->s_user_ns != &init_user_ns) {
2440 /* Usage always has to be set... */
2441 if (!(flags & DQUOT_USAGE_ENABLED)) {
2445 if (sb_has_quota_loaded(sb, type)) {
2450 if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE)) {
2451 /* As we bypass the pagecache we must now flush all the
2452 * dirty data and invalidate caches so that kernel sees
2453 * changes from userspace. It is not enough to just flush
2454 * the quota file since if blocksize < pagesize, invalidation
2455 * of the cache could fail because of other unrelated dirty
2457 sync_filesystem(sb);
2458 invalidate_bdev(sb->s_bdev);
2462 if (!fmt->qf_ops->check_quota_file(sb, type))
2465 dqopt->ops[type] = fmt->qf_ops;
2466 dqopt->info[type].dqi_format = fmt;
2467 dqopt->info[type].dqi_fmt_id = format_id;
2468 INIT_LIST_HEAD(&dqopt->info[type].dqi_dirty_list);
2469 error = dqopt->ops[type]->read_file_info(sb, type);
2472 if (dqopt->flags & DQUOT_QUOTA_SYS_FILE) {
2473 spin_lock(&dq_data_lock);
2474 dqopt->info[type].dqi_flags |= DQF_SYS_FILE;
2475 spin_unlock(&dq_data_lock);
2477 spin_lock(&dq_state_lock);
2478 dqopt->flags |= dquot_state_flag(flags, type);
2479 spin_unlock(&dq_state_lock);
2481 error = add_dquot_ref(sb, type);
2483 dquot_disable(sb, type,
2484 DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED);
2488 put_quota_format(fmt);
2492 EXPORT_SYMBOL(dquot_load_quota_sb);
2495 * More powerful function for turning on quotas on given quota inode allowing
2496 * setting of individual quota flags
2498 int dquot_load_quota_inode(struct inode *inode, int type, int format_id,
2503 err = vfs_setup_quota_inode(inode, type);
2506 err = dquot_load_quota_sb(inode->i_sb, type, format_id, flags);
2508 vfs_cleanup_quota_inode(inode->i_sb, type);
2511 EXPORT_SYMBOL(dquot_load_quota_inode);
2513 /* Reenable quotas on remount RW */
2514 int dquot_resume(struct super_block *sb, int type)
2516 struct quota_info *dqopt = sb_dqopt(sb);
2520 /* s_umount should be held in exclusive mode */
2521 if (WARN_ON_ONCE(down_read_trylock(&sb->s_umount)))
2522 up_read(&sb->s_umount);
2524 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
2525 if (type != -1 && cnt != type)
2527 if (!sb_has_quota_suspended(sb, cnt))
2530 spin_lock(&dq_state_lock);
2531 flags = dqopt->flags & dquot_state_flag(DQUOT_USAGE_ENABLED |
2532 DQUOT_LIMITS_ENABLED,
2534 dqopt->flags &= ~dquot_state_flag(DQUOT_STATE_FLAGS, cnt);
2535 spin_unlock(&dq_state_lock);
2537 flags = dquot_generic_flag(flags, cnt);
2538 ret = dquot_load_quota_sb(sb, cnt, dqopt->info[cnt].dqi_fmt_id,
2541 vfs_cleanup_quota_inode(sb, cnt);
2546 EXPORT_SYMBOL(dquot_resume);
2548 int dquot_quota_on(struct super_block *sb, int type, int format_id,
2549 const struct path *path)
2551 int error = security_quota_on(path->dentry);
2554 /* Quota file not on the same filesystem? */
2555 if (path->dentry->d_sb != sb)
2558 error = dquot_load_quota_inode(d_inode(path->dentry), type,
2559 format_id, DQUOT_USAGE_ENABLED |
2560 DQUOT_LIMITS_ENABLED);
2563 EXPORT_SYMBOL(dquot_quota_on);
2566 * This function is used when filesystem needs to initialize quotas
2567 * during mount time.
2569 int dquot_quota_on_mount(struct super_block *sb, char *qf_name,
2570 int format_id, int type)
2572 struct dentry *dentry;
2575 dentry = lookup_positive_unlocked(qf_name, sb->s_root, strlen(qf_name));
2577 return PTR_ERR(dentry);
2579 error = security_quota_on(dentry);
2581 error = dquot_load_quota_inode(d_inode(dentry), type, format_id,
2582 DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED);
2587 EXPORT_SYMBOL(dquot_quota_on_mount);
2589 static int dquot_quota_enable(struct super_block *sb, unsigned int flags)
2593 struct quota_info *dqopt = sb_dqopt(sb);
2595 if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE))
2597 /* Accounting cannot be turned on while fs is mounted */
2598 flags &= ~(FS_QUOTA_UDQ_ACCT | FS_QUOTA_GDQ_ACCT | FS_QUOTA_PDQ_ACCT);
2601 for (type = 0; type < MAXQUOTAS; type++) {
2602 if (!(flags & qtype_enforce_flag(type)))
2604 /* Can't enforce without accounting */
2605 if (!sb_has_quota_usage_enabled(sb, type)) {
2609 if (sb_has_quota_limits_enabled(sb, type)) {
2613 spin_lock(&dq_state_lock);
2614 dqopt->flags |= dquot_state_flag(DQUOT_LIMITS_ENABLED, type);
2615 spin_unlock(&dq_state_lock);
2619 /* Backout enforcement enablement we already did */
2620 for (type--; type >= 0; type--) {
2621 if (flags & qtype_enforce_flag(type))
2622 dquot_disable(sb, type, DQUOT_LIMITS_ENABLED);
2624 /* Error code translation for better compatibility with XFS */
2630 static int dquot_quota_disable(struct super_block *sb, unsigned int flags)
2634 struct quota_info *dqopt = sb_dqopt(sb);
2636 if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE))
2639 * We don't support turning off accounting via quotactl. In principle
2640 * quota infrastructure can do this but filesystems don't expect
2641 * userspace to be able to do it.
2644 (FS_QUOTA_UDQ_ACCT | FS_QUOTA_GDQ_ACCT | FS_QUOTA_PDQ_ACCT))
2647 /* Filter out limits not enabled */
2648 for (type = 0; type < MAXQUOTAS; type++)
2649 if (!sb_has_quota_limits_enabled(sb, type))
2650 flags &= ~qtype_enforce_flag(type);
2654 for (type = 0; type < MAXQUOTAS; type++) {
2655 if (flags & qtype_enforce_flag(type)) {
2656 ret = dquot_disable(sb, type, DQUOT_LIMITS_ENABLED);
2663 /* Backout enforcement disabling we already did */
2664 for (type--; type >= 0; type--) {
2665 if (flags & qtype_enforce_flag(type)) {
2666 spin_lock(&dq_state_lock);
2668 dquot_state_flag(DQUOT_LIMITS_ENABLED, type);
2669 spin_unlock(&dq_state_lock);
2675 /* Generic routine for getting common part of quota structure */
2676 static void do_get_dqblk(struct dquot *dquot, struct qc_dqblk *di)
2678 struct mem_dqblk *dm = &dquot->dq_dqb;
2680 memset(di, 0, sizeof(*di));
2681 spin_lock(&dquot->dq_dqb_lock);
2682 di->d_spc_hardlimit = dm->dqb_bhardlimit;
2683 di->d_spc_softlimit = dm->dqb_bsoftlimit;
2684 di->d_ino_hardlimit = dm->dqb_ihardlimit;
2685 di->d_ino_softlimit = dm->dqb_isoftlimit;
2686 di->d_space = dm->dqb_curspace + dm->dqb_rsvspace;
2687 di->d_ino_count = dm->dqb_curinodes;
2688 di->d_spc_timer = dm->dqb_btime;
2689 di->d_ino_timer = dm->dqb_itime;
2690 spin_unlock(&dquot->dq_dqb_lock);
2693 int dquot_get_dqblk(struct super_block *sb, struct kqid qid,
2694 struct qc_dqblk *di)
2696 struct dquot *dquot;
2698 dquot = dqget(sb, qid);
2700 return PTR_ERR(dquot);
2701 do_get_dqblk(dquot, di);
2706 EXPORT_SYMBOL(dquot_get_dqblk);
2708 int dquot_get_next_dqblk(struct super_block *sb, struct kqid *qid,
2709 struct qc_dqblk *di)
2711 struct dquot *dquot;
2714 if (!sb->dq_op->get_next_id)
2716 err = sb->dq_op->get_next_id(sb, qid);
2719 dquot = dqget(sb, *qid);
2721 return PTR_ERR(dquot);
2722 do_get_dqblk(dquot, di);
2727 EXPORT_SYMBOL(dquot_get_next_dqblk);
2729 #define VFS_QC_MASK \
2730 (QC_SPACE | QC_SPC_SOFT | QC_SPC_HARD | \
2731 QC_INO_COUNT | QC_INO_SOFT | QC_INO_HARD | \
2732 QC_SPC_TIMER | QC_INO_TIMER)
2734 /* Generic routine for setting common part of quota structure */
2735 static int do_set_dqblk(struct dquot *dquot, struct qc_dqblk *di)
2737 struct mem_dqblk *dm = &dquot->dq_dqb;
2738 int check_blim = 0, check_ilim = 0;
2739 struct mem_dqinfo *dqi = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_id.type];
2741 if (di->d_fieldmask & ~VFS_QC_MASK)
2744 if (((di->d_fieldmask & QC_SPC_SOFT) &&
2745 di->d_spc_softlimit > dqi->dqi_max_spc_limit) ||
2746 ((di->d_fieldmask & QC_SPC_HARD) &&
2747 di->d_spc_hardlimit > dqi->dqi_max_spc_limit) ||
2748 ((di->d_fieldmask & QC_INO_SOFT) &&
2749 (di->d_ino_softlimit > dqi->dqi_max_ino_limit)) ||
2750 ((di->d_fieldmask & QC_INO_HARD) &&
2751 (di->d_ino_hardlimit > dqi->dqi_max_ino_limit)))
2754 spin_lock(&dquot->dq_dqb_lock);
2755 if (di->d_fieldmask & QC_SPACE) {
2756 dm->dqb_curspace = di->d_space - dm->dqb_rsvspace;
2758 set_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags);
2761 if (di->d_fieldmask & QC_SPC_SOFT)
2762 dm->dqb_bsoftlimit = di->d_spc_softlimit;
2763 if (di->d_fieldmask & QC_SPC_HARD)
2764 dm->dqb_bhardlimit = di->d_spc_hardlimit;
2765 if (di->d_fieldmask & (QC_SPC_SOFT | QC_SPC_HARD)) {
2767 set_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags);
2770 if (di->d_fieldmask & QC_INO_COUNT) {
2771 dm->dqb_curinodes = di->d_ino_count;
2773 set_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags);
2776 if (di->d_fieldmask & QC_INO_SOFT)
2777 dm->dqb_isoftlimit = di->d_ino_softlimit;
2778 if (di->d_fieldmask & QC_INO_HARD)
2779 dm->dqb_ihardlimit = di->d_ino_hardlimit;
2780 if (di->d_fieldmask & (QC_INO_SOFT | QC_INO_HARD)) {
2782 set_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags);
2785 if (di->d_fieldmask & QC_SPC_TIMER) {
2786 dm->dqb_btime = di->d_spc_timer;
2788 set_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags);
2791 if (di->d_fieldmask & QC_INO_TIMER) {
2792 dm->dqb_itime = di->d_ino_timer;
2794 set_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags);
2798 if (!dm->dqb_bsoftlimit ||
2799 dm->dqb_curspace + dm->dqb_rsvspace <= dm->dqb_bsoftlimit) {
2801 clear_bit(DQ_BLKS_B, &dquot->dq_flags);
2802 } else if (!(di->d_fieldmask & QC_SPC_TIMER))
2803 /* Set grace only if user hasn't provided his own... */
2804 dm->dqb_btime = ktime_get_real_seconds() + dqi->dqi_bgrace;
2807 if (!dm->dqb_isoftlimit ||
2808 dm->dqb_curinodes <= dm->dqb_isoftlimit) {
2810 clear_bit(DQ_INODES_B, &dquot->dq_flags);
2811 } else if (!(di->d_fieldmask & QC_INO_TIMER))
2812 /* Set grace only if user hasn't provided his own... */
2813 dm->dqb_itime = ktime_get_real_seconds() + dqi->dqi_igrace;
2815 if (dm->dqb_bhardlimit || dm->dqb_bsoftlimit || dm->dqb_ihardlimit ||
2817 clear_bit(DQ_FAKE_B, &dquot->dq_flags);
2819 set_bit(DQ_FAKE_B, &dquot->dq_flags);
2820 spin_unlock(&dquot->dq_dqb_lock);
2821 mark_dquot_dirty(dquot);
2826 int dquot_set_dqblk(struct super_block *sb, struct kqid qid,
2827 struct qc_dqblk *di)
2829 struct dquot *dquot;
2832 dquot = dqget(sb, qid);
2833 if (IS_ERR(dquot)) {
2834 rc = PTR_ERR(dquot);
2837 rc = do_set_dqblk(dquot, di);
2842 EXPORT_SYMBOL(dquot_set_dqblk);
2844 /* Generic routine for getting common part of quota file information */
2845 int dquot_get_state(struct super_block *sb, struct qc_state *state)
2847 struct mem_dqinfo *mi;
2848 struct qc_type_state *tstate;
2849 struct quota_info *dqopt = sb_dqopt(sb);
2852 memset(state, 0, sizeof(*state));
2853 for (type = 0; type < MAXQUOTAS; type++) {
2854 if (!sb_has_quota_active(sb, type))
2856 tstate = state->s_state + type;
2857 mi = sb_dqopt(sb)->info + type;
2858 tstate->flags = QCI_ACCT_ENABLED;
2859 spin_lock(&dq_data_lock);
2860 if (mi->dqi_flags & DQF_SYS_FILE)
2861 tstate->flags |= QCI_SYSFILE;
2862 if (mi->dqi_flags & DQF_ROOT_SQUASH)
2863 tstate->flags |= QCI_ROOT_SQUASH;
2864 if (sb_has_quota_limits_enabled(sb, type))
2865 tstate->flags |= QCI_LIMITS_ENFORCED;
2866 tstate->spc_timelimit = mi->dqi_bgrace;
2867 tstate->ino_timelimit = mi->dqi_igrace;
2868 if (dqopt->files[type]) {
2869 tstate->ino = dqopt->files[type]->i_ino;
2870 tstate->blocks = dqopt->files[type]->i_blocks;
2872 tstate->nextents = 1; /* We don't know... */
2873 spin_unlock(&dq_data_lock);
2877 EXPORT_SYMBOL(dquot_get_state);
2879 /* Generic routine for setting common part of quota file information */
2880 int dquot_set_dqinfo(struct super_block *sb, int type, struct qc_info *ii)
2882 struct mem_dqinfo *mi;
2885 if ((ii->i_fieldmask & QC_WARNS_MASK) ||
2886 (ii->i_fieldmask & QC_RT_SPC_TIMER))
2888 if (!sb_has_quota_active(sb, type))
2890 mi = sb_dqopt(sb)->info + type;
2891 if (ii->i_fieldmask & QC_FLAGS) {
2892 if ((ii->i_flags & QCI_ROOT_SQUASH &&
2893 mi->dqi_format->qf_fmt_id != QFMT_VFS_OLD))
2896 spin_lock(&dq_data_lock);
2897 if (ii->i_fieldmask & QC_SPC_TIMER)
2898 mi->dqi_bgrace = ii->i_spc_timelimit;
2899 if (ii->i_fieldmask & QC_INO_TIMER)
2900 mi->dqi_igrace = ii->i_ino_timelimit;
2901 if (ii->i_fieldmask & QC_FLAGS) {
2902 if (ii->i_flags & QCI_ROOT_SQUASH)
2903 mi->dqi_flags |= DQF_ROOT_SQUASH;
2905 mi->dqi_flags &= ~DQF_ROOT_SQUASH;
2907 spin_unlock(&dq_data_lock);
2908 mark_info_dirty(sb, type);
2909 /* Force write to disk */
2910 sb->dq_op->write_info(sb, type);
2913 EXPORT_SYMBOL(dquot_set_dqinfo);
2915 const struct quotactl_ops dquot_quotactl_sysfile_ops = {
2916 .quota_enable = dquot_quota_enable,
2917 .quota_disable = dquot_quota_disable,
2918 .quota_sync = dquot_quota_sync,
2919 .get_state = dquot_get_state,
2920 .set_info = dquot_set_dqinfo,
2921 .get_dqblk = dquot_get_dqblk,
2922 .get_nextdqblk = dquot_get_next_dqblk,
2923 .set_dqblk = dquot_set_dqblk
2925 EXPORT_SYMBOL(dquot_quotactl_sysfile_ops);
2927 static int do_proc_dqstats(struct ctl_table *table, int write,
2928 void *buffer, size_t *lenp, loff_t *ppos)
2930 unsigned int type = (unsigned long *)table->data - dqstats.stat;
2931 s64 value = percpu_counter_sum(&dqstats.counter[type]);
2933 /* Filter negative values for non-monotonic counters */
2934 if (value < 0 && (type == DQST_ALLOC_DQUOTS ||
2935 type == DQST_FREE_DQUOTS))
2938 /* Update global table */
2939 dqstats.stat[type] = value;
2940 return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
2943 static struct ctl_table fs_dqstats_table[] = {
2945 .procname = "lookups",
2946 .data = &dqstats.stat[DQST_LOOKUPS],
2947 .maxlen = sizeof(unsigned long),
2949 .proc_handler = do_proc_dqstats,
2952 .procname = "drops",
2953 .data = &dqstats.stat[DQST_DROPS],
2954 .maxlen = sizeof(unsigned long),
2956 .proc_handler = do_proc_dqstats,
2959 .procname = "reads",
2960 .data = &dqstats.stat[DQST_READS],
2961 .maxlen = sizeof(unsigned long),
2963 .proc_handler = do_proc_dqstats,
2966 .procname = "writes",
2967 .data = &dqstats.stat[DQST_WRITES],
2968 .maxlen = sizeof(unsigned long),
2970 .proc_handler = do_proc_dqstats,
2973 .procname = "cache_hits",
2974 .data = &dqstats.stat[DQST_CACHE_HITS],
2975 .maxlen = sizeof(unsigned long),
2977 .proc_handler = do_proc_dqstats,
2980 .procname = "allocated_dquots",
2981 .data = &dqstats.stat[DQST_ALLOC_DQUOTS],
2982 .maxlen = sizeof(unsigned long),
2984 .proc_handler = do_proc_dqstats,
2987 .procname = "free_dquots",
2988 .data = &dqstats.stat[DQST_FREE_DQUOTS],
2989 .maxlen = sizeof(unsigned long),
2991 .proc_handler = do_proc_dqstats,
2994 .procname = "syncs",
2995 .data = &dqstats.stat[DQST_SYNCS],
2996 .maxlen = sizeof(unsigned long),
2998 .proc_handler = do_proc_dqstats,
3000 #ifdef CONFIG_PRINT_QUOTA_WARNING
3002 .procname = "warnings",
3003 .data = &flag_print_warnings,
3004 .maxlen = sizeof(int),
3006 .proc_handler = proc_dointvec,
3012 static struct ctl_table fs_table[] = {
3014 .procname = "quota",
3016 .child = fs_dqstats_table,
3021 static struct ctl_table sys_table[] = {
3030 static int __init dquot_init(void)
3033 unsigned long nr_hash, order;
3035 printk(KERN_NOTICE "VFS: Disk quotas %s\n", __DQUOT_VERSION__);
3037 register_sysctl_table(sys_table);
3039 dquot_cachep = kmem_cache_create("dquot",
3040 sizeof(struct dquot), sizeof(unsigned long) * 4,
3041 (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|
3042 SLAB_MEM_SPREAD|SLAB_PANIC),
3046 dquot_hash = (struct hlist_head *)__get_free_pages(GFP_KERNEL, order);
3048 panic("Cannot create dquot hash table");
3050 for (i = 0; i < _DQST_DQSTAT_LAST; i++) {
3051 ret = percpu_counter_init(&dqstats.counter[i], 0, GFP_KERNEL);
3053 panic("Cannot create dquot stat counters");
3056 /* Find power-of-two hlist_heads which can fit into allocation */
3057 nr_hash = (1UL << order) * PAGE_SIZE / sizeof(struct hlist_head);
3058 dq_hash_bits = ilog2(nr_hash);
3060 nr_hash = 1UL << dq_hash_bits;
3061 dq_hash_mask = nr_hash - 1;
3062 for (i = 0; i < nr_hash; i++)
3063 INIT_HLIST_HEAD(dquot_hash + i);
3065 pr_info("VFS: Dquot-cache hash table entries: %ld (order %ld,"
3066 " %ld bytes)\n", nr_hash, order, (PAGE_SIZE << order));
3068 if (register_shrinker(&dqcache_shrinker, "dquota-cache"))
3069 panic("Cannot register dquot shrinker");
3073 fs_initcall(dquot_init);