1 // SPDX-License-Identifier: GPL-2.0
3 * Implementation of the diskquota system for the LINUX operating system. QUOTA
4 * is implemented using the BSD system call interface as the means of
5 * communication with the user level. This file contains the generic routines
6 * called by the different filesystems on allocation of an inode or block.
7 * These routines take care of the administration needed to have a consistent
8 * diskquota tracking system. The ideas of both user and group quotas are based
9 * on the Melbourne quota system as used on BSD derived systems. The internal
10 * implementation is based on one of the several variants of the LINUX
11 * inode-subsystem with added complexity of the diskquota system.
13 * Author: Marco van Wieringen <mvw@planets.elm.net>
15 * Fixes: Dmitry Gorodchanin <pgmdsg@ibi.com>, 11 Feb 96
17 * Revised list management to avoid races
18 * -- Bill Hawes, <whawes@star.net>, 9/98
20 * Fixed races in dquot_transfer(), dqget() and dquot_alloc_...().
21 * As the consequence the locking was moved from dquot_decr_...(),
22 * dquot_incr_...() to calling functions.
23 * invalidate_dquots() now writes modified dquots.
24 * Serialized quota_off() and quota_on() for mount point.
25 * Fixed a few bugs in grow_dquots().
26 * Fixed deadlock in write_dquot() - we no longer account quotas on
28 * remove_dquot_ref() moved to inode.c - it now traverses through inodes
29 * add_dquot_ref() restarts after blocking
30 * Added check for bogus uid and fixed check for group in quotactl.
31 * Jan Kara, <jack@suse.cz>, sponsored by SuSE CR, 10-11/99
33 * Used struct list_head instead of own list struct
34 * Invalidation of referenced dquots is no longer possible
35 * Improved free_dquots list management
36 * Quota and i_blocks are now updated in one place to avoid races
37 * Warnings are now delayed so we won't block in critical section
38 * Write updated not to require dquot lock
39 * Jan Kara, <jack@suse.cz>, 9/2000
41 * Added dynamic quota structure allocation
42 * Jan Kara <jack@suse.cz> 12/2000
44 * Rewritten quota interface. Implemented new quota format and
45 * formats registering.
46 * Jan Kara, <jack@suse.cz>, 2001,2002
49 * Jan Kara, <jack@suse.cz>, 10/2002
51 * Added journalled quota support, fix lock inversion problems
52 * Jan Kara, <jack@suse.cz>, 2003,2004
54 * (C) Copyright 1994 - 1997 Marco van Wieringen
57 #include <linux/errno.h>
58 #include <linux/kernel.h>
60 #include <linux/mount.h>
62 #include <linux/time.h>
63 #include <linux/types.h>
64 #include <linux/string.h>
65 #include <linux/fcntl.h>
66 #include <linux/stat.h>
67 #include <linux/tty.h>
68 #include <linux/file.h>
69 #include <linux/slab.h>
70 #include <linux/sysctl.h>
71 #include <linux/init.h>
72 #include <linux/module.h>
73 #include <linux/proc_fs.h>
74 #include <linux/security.h>
75 #include <linux/sched.h>
76 #include <linux/cred.h>
77 #include <linux/kmod.h>
78 #include <linux/namei.h>
79 #include <linux/capability.h>
80 #include <linux/quotaops.h>
81 #include <linux/blkdev.h>
82 #include <linux/sched/mm.h>
83 #include "../internal.h" /* ugh */
85 #include <linux/uaccess.h>
88 * There are five quota SMP locks:
89 * * dq_list_lock protects all lists with quotas and quota formats.
90 * * dquot->dq_dqb_lock protects data from dq_dqb
91 * * inode->i_lock protects inode->i_blocks, i_bytes and also guards
92 * consistency of dquot->dq_dqb with inode->i_blocks, i_bytes so that
93 * dquot_transfer() can stabilize amount it transfers
94 * * dq_data_lock protects mem_dqinfo structures and modifications of dquot
95 * pointers in the inode
96 * * dq_state_lock protects modifications of quota state (on quotaon and
97 * quotaoff) and readers who care about latest values take it as well.
99 * The spinlock ordering is hence:
100 * dq_data_lock > dq_list_lock > i_lock > dquot->dq_dqb_lock,
101 * dq_list_lock > dq_state_lock
103 * Note that some things (eg. sb pointer, type, id) doesn't change during
104 * the life of the dquot structure and so needn't to be protected by a lock
106 * Operation accessing dquots via inode pointers are protected by dquot_srcu.
107 * Operation of reading pointer needs srcu_read_lock(&dquot_srcu), and
108 * synchronize_srcu(&dquot_srcu) is called after clearing pointers from
109 * inode and before dropping dquot references to avoid use of dquots after
110 * they are freed. dq_data_lock is used to serialize the pointer setting and
111 * clearing operations.
112 * Special care needs to be taken about S_NOQUOTA inode flag (marking that
113 * inode is a quota file). Functions adding pointers from inode to dquots have
114 * to check this flag under dq_data_lock and then (if S_NOQUOTA is not set) they
115 * have to do all pointer modifications before dropping dq_data_lock. This makes
116 * sure they cannot race with quotaon which first sets S_NOQUOTA flag and
117 * then drops all pointers to dquots from an inode.
119 * Each dquot has its dq_lock mutex. Dquot is locked when it is being read to
120 * memory (or space for it is being allocated) on the first dqget(), when it is
121 * being written out, and when it is being released on the last dqput(). The
122 * allocation and release operations are serialized by the dq_lock and by
123 * checking the use count in dquot_release().
125 * Lock ordering (including related VFS locks) is the following:
126 * s_umount > i_mutex > journal_lock > dquot->dq_lock > dqio_sem
129 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_list_lock);
130 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_state_lock);
131 __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_data_lock);
132 EXPORT_SYMBOL(dq_data_lock);
133 DEFINE_STATIC_SRCU(dquot_srcu);
135 static DECLARE_WAIT_QUEUE_HEAD(dquot_ref_wq);
137 void __quota_error(struct super_block *sb, const char *func,
138 const char *fmt, ...)
140 if (printk_ratelimit()) {
142 struct va_format vaf;
149 printk(KERN_ERR "Quota error (device %s): %s: %pV\n",
150 sb->s_id, func, &vaf);
155 EXPORT_SYMBOL(__quota_error);
157 #if defined(CONFIG_QUOTA_DEBUG) || defined(CONFIG_PRINT_QUOTA_WARNING)
158 static char *quotatypes[] = INITQFNAMES;
160 static struct quota_format_type *quota_formats; /* List of registered formats */
161 static struct quota_module_name module_names[] = INIT_QUOTA_MODULE_NAMES;
163 /* SLAB cache for dquot structures */
164 static struct kmem_cache *dquot_cachep;
166 int register_quota_format(struct quota_format_type *fmt)
168 spin_lock(&dq_list_lock);
169 fmt->qf_next = quota_formats;
171 spin_unlock(&dq_list_lock);
174 EXPORT_SYMBOL(register_quota_format);
176 void unregister_quota_format(struct quota_format_type *fmt)
178 struct quota_format_type **actqf;
180 spin_lock(&dq_list_lock);
181 for (actqf = "a_formats; *actqf && *actqf != fmt;
182 actqf = &(*actqf)->qf_next)
185 *actqf = (*actqf)->qf_next;
186 spin_unlock(&dq_list_lock);
188 EXPORT_SYMBOL(unregister_quota_format);
190 static struct quota_format_type *find_quota_format(int id)
192 struct quota_format_type *actqf;
194 spin_lock(&dq_list_lock);
195 for (actqf = quota_formats; actqf && actqf->qf_fmt_id != id;
196 actqf = actqf->qf_next)
198 if (!actqf || !try_module_get(actqf->qf_owner)) {
201 spin_unlock(&dq_list_lock);
203 for (qm = 0; module_names[qm].qm_fmt_id &&
204 module_names[qm].qm_fmt_id != id; qm++)
206 if (!module_names[qm].qm_fmt_id ||
207 request_module(module_names[qm].qm_mod_name))
210 spin_lock(&dq_list_lock);
211 for (actqf = quota_formats; actqf && actqf->qf_fmt_id != id;
212 actqf = actqf->qf_next)
214 if (actqf && !try_module_get(actqf->qf_owner))
217 spin_unlock(&dq_list_lock);
221 static void put_quota_format(struct quota_format_type *fmt)
223 module_put(fmt->qf_owner);
227 * Dquot List Management:
228 * The quota code uses five lists for dquot management: the inuse_list,
229 * releasing_dquots, free_dquots, dqi_dirty_list, and dquot_hash[] array.
230 * A single dquot structure may be on some of those lists, depending on
233 * All dquots are placed to the end of inuse_list when first created, and this
234 * list is used for invalidate operation, which must look at every dquot.
236 * When the last reference of a dquot is dropped, the dquot is added to
237 * releasing_dquots. We'll then queue work item which will call
238 * synchronize_srcu() and after that perform the final cleanup of all the
239 * dquots on the list. Each cleaned up dquot is moved to free_dquots list.
240 * Both releasing_dquots and free_dquots use the dq_free list_head in the dquot
243 * Unused and cleaned up dquots are in the free_dquots list and this list is
244 * searched whenever we need an available dquot. Dquots are removed from the
245 * list as soon as they are used again and dqstats.free_dquots gives the number
246 * of dquots on the list. When dquot is invalidated it's completely released
249 * Dirty dquots are added to the dqi_dirty_list of quota_info when mark
250 * dirtied, and this list is searched when writing dirty dquots back to
251 * quota file. Note that some filesystems do dirty dquot tracking on their
252 * own (e.g. in a journal) and thus don't use dqi_dirty_list.
254 * Dquots with a specific identity (device, type and id) are placed on
255 * one of the dquot_hash[] hash chains. The provides an efficient search
256 * mechanism to locate a specific dquot.
259 static LIST_HEAD(inuse_list);
260 static LIST_HEAD(free_dquots);
261 static LIST_HEAD(releasing_dquots);
262 static unsigned int dq_hash_bits, dq_hash_mask;
263 static struct hlist_head *dquot_hash;
265 struct dqstats dqstats;
266 EXPORT_SYMBOL(dqstats);
268 static qsize_t inode_get_rsv_space(struct inode *inode);
269 static qsize_t __inode_get_rsv_space(struct inode *inode);
270 static int __dquot_initialize(struct inode *inode, int type);
272 static void quota_release_workfn(struct work_struct *work);
273 static DECLARE_DELAYED_WORK(quota_release_work, quota_release_workfn);
275 static inline unsigned int
276 hashfn(const struct super_block *sb, struct kqid qid)
278 unsigned int id = from_kqid(&init_user_ns, qid);
282 tmp = (((unsigned long)sb>>L1_CACHE_SHIFT) ^ id) * (MAXQUOTAS - type);
283 return (tmp + (tmp >> dq_hash_bits)) & dq_hash_mask;
287 * Following list functions expect dq_list_lock to be held
289 static inline void insert_dquot_hash(struct dquot *dquot)
291 struct hlist_head *head;
292 head = dquot_hash + hashfn(dquot->dq_sb, dquot->dq_id);
293 hlist_add_head(&dquot->dq_hash, head);
296 static inline void remove_dquot_hash(struct dquot *dquot)
298 hlist_del_init(&dquot->dq_hash);
301 static struct dquot *find_dquot(unsigned int hashent, struct super_block *sb,
306 hlist_for_each_entry(dquot, dquot_hash+hashent, dq_hash)
307 if (dquot->dq_sb == sb && qid_eq(dquot->dq_id, qid))
313 /* Add a dquot to the tail of the free list */
314 static inline void put_dquot_last(struct dquot *dquot)
316 list_add_tail(&dquot->dq_free, &free_dquots);
317 dqstats_inc(DQST_FREE_DQUOTS);
320 static inline void put_releasing_dquots(struct dquot *dquot)
322 list_add_tail(&dquot->dq_free, &releasing_dquots);
323 set_bit(DQ_RELEASING_B, &dquot->dq_flags);
326 static inline void remove_free_dquot(struct dquot *dquot)
328 if (list_empty(&dquot->dq_free))
330 list_del_init(&dquot->dq_free);
331 if (!test_bit(DQ_RELEASING_B, &dquot->dq_flags))
332 dqstats_dec(DQST_FREE_DQUOTS);
334 clear_bit(DQ_RELEASING_B, &dquot->dq_flags);
337 static inline void put_inuse(struct dquot *dquot)
339 /* We add to the back of inuse list so we don't have to restart
340 * when traversing this list and we block */
341 list_add_tail(&dquot->dq_inuse, &inuse_list);
342 dqstats_inc(DQST_ALLOC_DQUOTS);
345 static inline void remove_inuse(struct dquot *dquot)
347 dqstats_dec(DQST_ALLOC_DQUOTS);
348 list_del(&dquot->dq_inuse);
351 * End of list functions needing dq_list_lock
354 static void wait_on_dquot(struct dquot *dquot)
356 mutex_lock(&dquot->dq_lock);
357 mutex_unlock(&dquot->dq_lock);
360 static inline int dquot_active(struct dquot *dquot)
362 return test_bit(DQ_ACTIVE_B, &dquot->dq_flags);
365 static inline int dquot_dirty(struct dquot *dquot)
367 return test_bit(DQ_MOD_B, &dquot->dq_flags);
370 static inline int mark_dquot_dirty(struct dquot *dquot)
372 return dquot->dq_sb->dq_op->mark_dirty(dquot);
375 /* Mark dquot dirty in atomic manner, and return it's old dirty flag state */
376 int dquot_mark_dquot_dirty(struct dquot *dquot)
380 if (!dquot_active(dquot))
383 if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NOLIST_DIRTY)
384 return test_and_set_bit(DQ_MOD_B, &dquot->dq_flags);
386 /* If quota is dirty already, we don't have to acquire dq_list_lock */
387 if (dquot_dirty(dquot))
390 spin_lock(&dq_list_lock);
391 if (!test_and_set_bit(DQ_MOD_B, &dquot->dq_flags)) {
392 list_add(&dquot->dq_dirty, &sb_dqopt(dquot->dq_sb)->
393 info[dquot->dq_id.type].dqi_dirty_list);
396 spin_unlock(&dq_list_lock);
399 EXPORT_SYMBOL(dquot_mark_dquot_dirty);
401 /* Dirtify all the dquots - this can block when journalling */
402 static inline int mark_all_dquot_dirty(struct dquot __rcu * const *dquots)
408 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
409 dquot = srcu_dereference(dquots[cnt], &dquot_srcu);
411 /* Even in case of error we have to continue */
412 ret = mark_dquot_dirty(dquot);
419 static inline void dqput_all(struct dquot **dquot)
423 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
427 static inline int clear_dquot_dirty(struct dquot *dquot)
429 if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NOLIST_DIRTY)
430 return test_and_clear_bit(DQ_MOD_B, &dquot->dq_flags);
432 spin_lock(&dq_list_lock);
433 if (!test_and_clear_bit(DQ_MOD_B, &dquot->dq_flags)) {
434 spin_unlock(&dq_list_lock);
437 list_del_init(&dquot->dq_dirty);
438 spin_unlock(&dq_list_lock);
442 void mark_info_dirty(struct super_block *sb, int type)
444 spin_lock(&dq_data_lock);
445 sb_dqopt(sb)->info[type].dqi_flags |= DQF_INFO_DIRTY;
446 spin_unlock(&dq_data_lock);
448 EXPORT_SYMBOL(mark_info_dirty);
451 * Read dquot from disk and alloc space for it
454 int dquot_acquire(struct dquot *dquot)
456 int ret = 0, ret2 = 0;
457 unsigned int memalloc;
458 struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
460 mutex_lock(&dquot->dq_lock);
461 memalloc = memalloc_nofs_save();
462 if (!test_bit(DQ_READ_B, &dquot->dq_flags)) {
463 ret = dqopt->ops[dquot->dq_id.type]->read_dqblk(dquot);
467 /* Make sure flags update is visible after dquot has been filled */
468 smp_mb__before_atomic();
469 set_bit(DQ_READ_B, &dquot->dq_flags);
470 /* Instantiate dquot if needed */
471 if (!dquot_active(dquot) && !dquot->dq_off) {
472 ret = dqopt->ops[dquot->dq_id.type]->commit_dqblk(dquot);
473 /* Write the info if needed */
474 if (info_dirty(&dqopt->info[dquot->dq_id.type])) {
475 ret2 = dqopt->ops[dquot->dq_id.type]->write_file_info(
476 dquot->dq_sb, dquot->dq_id.type);
486 * Make sure flags update is visible after on-disk struct has been
487 * allocated. Paired with smp_rmb() in dqget().
489 smp_mb__before_atomic();
490 set_bit(DQ_ACTIVE_B, &dquot->dq_flags);
492 memalloc_nofs_restore(memalloc);
493 mutex_unlock(&dquot->dq_lock);
496 EXPORT_SYMBOL(dquot_acquire);
499 * Write dquot to disk
501 int dquot_commit(struct dquot *dquot)
504 unsigned int memalloc;
505 struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
507 mutex_lock(&dquot->dq_lock);
508 memalloc = memalloc_nofs_save();
509 if (!clear_dquot_dirty(dquot))
511 /* Inactive dquot can be only if there was error during read/init
512 * => we have better not writing it */
513 if (dquot_active(dquot))
514 ret = dqopt->ops[dquot->dq_id.type]->commit_dqblk(dquot);
518 memalloc_nofs_restore(memalloc);
519 mutex_unlock(&dquot->dq_lock);
522 EXPORT_SYMBOL(dquot_commit);
527 int dquot_release(struct dquot *dquot)
529 int ret = 0, ret2 = 0;
530 unsigned int memalloc;
531 struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
533 mutex_lock(&dquot->dq_lock);
534 memalloc = memalloc_nofs_save();
535 /* Check whether we are not racing with some other dqget() */
536 if (dquot_is_busy(dquot))
538 if (dqopt->ops[dquot->dq_id.type]->release_dqblk) {
539 ret = dqopt->ops[dquot->dq_id.type]->release_dqblk(dquot);
541 if (info_dirty(&dqopt->info[dquot->dq_id.type])) {
542 ret2 = dqopt->ops[dquot->dq_id.type]->write_file_info(
543 dquot->dq_sb, dquot->dq_id.type);
548 clear_bit(DQ_ACTIVE_B, &dquot->dq_flags);
550 memalloc_nofs_restore(memalloc);
551 mutex_unlock(&dquot->dq_lock);
554 EXPORT_SYMBOL(dquot_release);
556 void dquot_destroy(struct dquot *dquot)
558 kmem_cache_free(dquot_cachep, dquot);
560 EXPORT_SYMBOL(dquot_destroy);
562 static inline void do_destroy_dquot(struct dquot *dquot)
564 dquot->dq_sb->dq_op->destroy_dquot(dquot);
567 /* Invalidate all dquots on the list. Note that this function is called after
568 * quota is disabled and pointers from inodes removed so there cannot be new
569 * quota users. There can still be some users of quotas due to inodes being
570 * just deleted or pruned by prune_icache() (those are not attached to any
571 * list) or parallel quotactl call. We have to wait for such users.
573 static void invalidate_dquots(struct super_block *sb, int type)
575 struct dquot *dquot, *tmp;
578 flush_delayed_work("a_release_work);
580 spin_lock(&dq_list_lock);
581 list_for_each_entry_safe(dquot, tmp, &inuse_list, dq_inuse) {
582 if (dquot->dq_sb != sb)
584 if (dquot->dq_id.type != type)
586 /* Wait for dquot users */
587 if (atomic_read(&dquot->dq_count)) {
588 atomic_inc(&dquot->dq_count);
589 spin_unlock(&dq_list_lock);
591 * Once dqput() wakes us up, we know it's time to free
593 * IMPORTANT: we rely on the fact that there is always
594 * at most one process waiting for dquot to free.
595 * Otherwise dq_count would be > 1 and we would never
598 wait_event(dquot_ref_wq,
599 atomic_read(&dquot->dq_count) == 1);
601 /* At this moment dquot() need not exist (it could be
602 * reclaimed by prune_dqcache(). Hence we must
607 * The last user already dropped its reference but dquot didn't
608 * get fully cleaned up yet. Restart the scan which flushes the
609 * work cleaning up released dquots.
611 if (test_bit(DQ_RELEASING_B, &dquot->dq_flags)) {
612 spin_unlock(&dq_list_lock);
616 * Quota now has no users and it has been written on last
619 remove_dquot_hash(dquot);
620 remove_free_dquot(dquot);
622 do_destroy_dquot(dquot);
624 spin_unlock(&dq_list_lock);
627 /* Call callback for every active dquot on given filesystem */
628 int dquot_scan_active(struct super_block *sb,
629 int (*fn)(struct dquot *dquot, unsigned long priv),
632 struct dquot *dquot, *old_dquot = NULL;
635 WARN_ON_ONCE(!rwsem_is_locked(&sb->s_umount));
637 spin_lock(&dq_list_lock);
638 list_for_each_entry(dquot, &inuse_list, dq_inuse) {
639 if (!dquot_active(dquot))
641 if (dquot->dq_sb != sb)
643 /* Now we have active dquot so we can just increase use count */
644 atomic_inc(&dquot->dq_count);
645 spin_unlock(&dq_list_lock);
649 * ->release_dquot() can be racing with us. Our reference
650 * protects us from new calls to it so just wait for any
651 * outstanding call and recheck the DQ_ACTIVE_B after that.
653 wait_on_dquot(dquot);
654 if (dquot_active(dquot)) {
655 ret = fn(dquot, priv);
659 spin_lock(&dq_list_lock);
660 /* We are safe to continue now because our dquot could not
661 * be moved out of the inuse list while we hold the reference */
663 spin_unlock(&dq_list_lock);
668 EXPORT_SYMBOL(dquot_scan_active);
670 static inline int dquot_write_dquot(struct dquot *dquot)
672 int ret = dquot->dq_sb->dq_op->write_dquot(dquot);
674 quota_error(dquot->dq_sb, "Can't write quota structure "
675 "(error %d). Quota may get out of sync!", ret);
676 /* Clear dirty bit anyway to avoid infinite loop. */
677 clear_dquot_dirty(dquot);
682 /* Write all dquot structures to quota files */
683 int dquot_writeback_dquots(struct super_block *sb, int type)
685 struct list_head dirty;
687 struct quota_info *dqopt = sb_dqopt(sb);
691 WARN_ON_ONCE(!rwsem_is_locked(&sb->s_umount));
693 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
694 if (type != -1 && cnt != type)
696 if (!sb_has_quota_active(sb, cnt))
698 spin_lock(&dq_list_lock);
699 /* Move list away to avoid livelock. */
700 list_replace_init(&dqopt->info[cnt].dqi_dirty_list, &dirty);
701 while (!list_empty(&dirty)) {
702 dquot = list_first_entry(&dirty, struct dquot,
705 WARN_ON(!dquot_active(dquot));
706 /* If the dquot is releasing we should not touch it */
707 if (test_bit(DQ_RELEASING_B, &dquot->dq_flags)) {
708 spin_unlock(&dq_list_lock);
709 flush_delayed_work("a_release_work);
710 spin_lock(&dq_list_lock);
714 /* Now we have active dquot from which someone is
715 * holding reference so we can safely just increase
718 spin_unlock(&dq_list_lock);
719 err = dquot_write_dquot(dquot);
723 spin_lock(&dq_list_lock);
725 spin_unlock(&dq_list_lock);
728 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
729 if ((cnt == type || type == -1) && sb_has_quota_active(sb, cnt)
730 && info_dirty(&dqopt->info[cnt]))
731 sb->dq_op->write_info(sb, cnt);
732 dqstats_inc(DQST_SYNCS);
736 EXPORT_SYMBOL(dquot_writeback_dquots);
738 /* Write all dquot structures to disk and make them visible from userspace */
739 int dquot_quota_sync(struct super_block *sb, int type)
741 struct quota_info *dqopt = sb_dqopt(sb);
745 ret = dquot_writeback_dquots(sb, type);
748 if (dqopt->flags & DQUOT_QUOTA_SYS_FILE)
751 /* This is not very clever (and fast) but currently I don't know about
752 * any other simple way of getting quota data to disk and we must get
753 * them there for userspace to be visible... */
754 if (sb->s_op->sync_fs) {
755 ret = sb->s_op->sync_fs(sb, 1);
759 ret = sync_blockdev(sb->s_bdev);
764 * Now when everything is written we can discard the pagecache so
765 * that userspace sees the changes.
767 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
768 if (type != -1 && cnt != type)
770 if (!sb_has_quota_active(sb, cnt))
772 inode_lock(dqopt->files[cnt]);
773 truncate_inode_pages(&dqopt->files[cnt]->i_data, 0);
774 inode_unlock(dqopt->files[cnt]);
779 EXPORT_SYMBOL(dquot_quota_sync);
782 dqcache_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
785 unsigned long freed = 0;
787 spin_lock(&dq_list_lock);
788 while (!list_empty(&free_dquots) && sc->nr_to_scan) {
789 dquot = list_first_entry(&free_dquots, struct dquot, dq_free);
790 remove_dquot_hash(dquot);
791 remove_free_dquot(dquot);
793 do_destroy_dquot(dquot);
797 spin_unlock(&dq_list_lock);
802 dqcache_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
804 return vfs_pressure_ratio(
805 percpu_counter_read_positive(&dqstats.counter[DQST_FREE_DQUOTS]));
809 * Safely release dquot and put reference to dquot.
811 static void quota_release_workfn(struct work_struct *work)
814 struct list_head rls_head;
816 spin_lock(&dq_list_lock);
817 /* Exchange the list head to avoid livelock. */
818 list_replace_init(&releasing_dquots, &rls_head);
819 spin_unlock(&dq_list_lock);
820 synchronize_srcu(&dquot_srcu);
823 spin_lock(&dq_list_lock);
824 while (!list_empty(&rls_head)) {
825 dquot = list_first_entry(&rls_head, struct dquot, dq_free);
826 WARN_ON_ONCE(atomic_read(&dquot->dq_count));
828 * Note that DQ_RELEASING_B protects us from racing with
829 * invalidate_dquots() calls so we are safe to work with the
830 * dquot even after we drop dq_list_lock.
832 if (dquot_dirty(dquot)) {
833 spin_unlock(&dq_list_lock);
834 /* Commit dquot before releasing */
835 dquot_write_dquot(dquot);
838 if (dquot_active(dquot)) {
839 spin_unlock(&dq_list_lock);
840 dquot->dq_sb->dq_op->release_dquot(dquot);
843 /* Dquot is inactive and clean, now move it to free list */
844 remove_free_dquot(dquot);
845 put_dquot_last(dquot);
847 spin_unlock(&dq_list_lock);
851 * Put reference to dquot
853 void dqput(struct dquot *dquot)
857 #ifdef CONFIG_QUOTA_DEBUG
858 if (!atomic_read(&dquot->dq_count)) {
859 quota_error(dquot->dq_sb, "trying to free free dquot of %s %d",
860 quotatypes[dquot->dq_id.type],
861 from_kqid(&init_user_ns, dquot->dq_id));
865 dqstats_inc(DQST_DROPS);
867 spin_lock(&dq_list_lock);
868 if (atomic_read(&dquot->dq_count) > 1) {
869 /* We have more than one user... nothing to do */
870 atomic_dec(&dquot->dq_count);
871 /* Releasing dquot during quotaoff phase? */
872 if (!sb_has_quota_active(dquot->dq_sb, dquot->dq_id.type) &&
873 atomic_read(&dquot->dq_count) == 1)
874 wake_up(&dquot_ref_wq);
875 spin_unlock(&dq_list_lock);
879 /* Need to release dquot? */
880 WARN_ON_ONCE(!list_empty(&dquot->dq_free));
881 put_releasing_dquots(dquot);
882 atomic_dec(&dquot->dq_count);
883 spin_unlock(&dq_list_lock);
884 queue_delayed_work(system_unbound_wq, "a_release_work, 1);
886 EXPORT_SYMBOL(dqput);
888 struct dquot *dquot_alloc(struct super_block *sb, int type)
890 return kmem_cache_zalloc(dquot_cachep, GFP_NOFS);
892 EXPORT_SYMBOL(dquot_alloc);
894 static struct dquot *get_empty_dquot(struct super_block *sb, int type)
898 dquot = sb->dq_op->alloc_dquot(sb, type);
902 mutex_init(&dquot->dq_lock);
903 INIT_LIST_HEAD(&dquot->dq_free);
904 INIT_LIST_HEAD(&dquot->dq_inuse);
905 INIT_HLIST_NODE(&dquot->dq_hash);
906 INIT_LIST_HEAD(&dquot->dq_dirty);
908 dquot->dq_id = make_kqid_invalid(type);
909 atomic_set(&dquot->dq_count, 1);
910 spin_lock_init(&dquot->dq_dqb_lock);
916 * Get reference to dquot
918 * Locking is slightly tricky here. We are guarded from parallel quotaoff()
919 * destroying our dquot by:
920 * a) checking for quota flags under dq_list_lock and
921 * b) getting a reference to dquot before we release dq_list_lock
923 struct dquot *dqget(struct super_block *sb, struct kqid qid)
925 unsigned int hashent = hashfn(sb, qid);
926 struct dquot *dquot, *empty = NULL;
928 if (!qid_has_mapping(sb->s_user_ns, qid))
929 return ERR_PTR(-EINVAL);
931 if (!sb_has_quota_active(sb, qid.type))
932 return ERR_PTR(-ESRCH);
934 spin_lock(&dq_list_lock);
935 spin_lock(&dq_state_lock);
936 if (!sb_has_quota_active(sb, qid.type)) {
937 spin_unlock(&dq_state_lock);
938 spin_unlock(&dq_list_lock);
939 dquot = ERR_PTR(-ESRCH);
942 spin_unlock(&dq_state_lock);
944 dquot = find_dquot(hashent, sb, qid);
947 spin_unlock(&dq_list_lock);
948 empty = get_empty_dquot(sb, qid.type);
950 schedule(); /* Try to wait for a moment... */
956 /* all dquots go on the inuse_list */
958 /* hash it first so it can be found */
959 insert_dquot_hash(dquot);
960 spin_unlock(&dq_list_lock);
961 dqstats_inc(DQST_LOOKUPS);
963 if (!atomic_read(&dquot->dq_count))
964 remove_free_dquot(dquot);
965 atomic_inc(&dquot->dq_count);
966 spin_unlock(&dq_list_lock);
967 dqstats_inc(DQST_CACHE_HITS);
968 dqstats_inc(DQST_LOOKUPS);
970 /* Wait for dq_lock - after this we know that either dquot_release() is
971 * already finished or it will be canceled due to dq_count > 0 test */
972 wait_on_dquot(dquot);
973 /* Read the dquot / allocate space in quota file */
974 if (!dquot_active(dquot)) {
977 err = sb->dq_op->acquire_dquot(dquot);
980 dquot = ERR_PTR(err);
985 * Make sure following reads see filled structure - paired with
986 * smp_mb__before_atomic() in dquot_acquire().
989 /* Has somebody invalidated entry under us? */
990 WARN_ON_ONCE(hlist_unhashed(&dquot->dq_hash));
993 do_destroy_dquot(empty);
997 EXPORT_SYMBOL(dqget);
999 static inline struct dquot __rcu **i_dquot(struct inode *inode)
1001 return inode->i_sb->s_op->get_dquots(inode);
1004 static int dqinit_needed(struct inode *inode, int type)
1006 struct dquot __rcu * const *dquots;
1009 if (IS_NOQUOTA(inode))
1012 dquots = i_dquot(inode);
1014 return !dquots[type];
1015 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
1021 /* This routine is guarded by s_umount semaphore */
1022 static int add_dquot_ref(struct super_block *sb, int type)
1024 struct inode *inode, *old_inode = NULL;
1025 #ifdef CONFIG_QUOTA_DEBUG
1030 spin_lock(&sb->s_inode_list_lock);
1031 list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
1032 spin_lock(&inode->i_lock);
1033 if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) ||
1034 !atomic_read(&inode->i_writecount) ||
1035 !dqinit_needed(inode, type)) {
1036 spin_unlock(&inode->i_lock);
1040 spin_unlock(&inode->i_lock);
1041 spin_unlock(&sb->s_inode_list_lock);
1043 #ifdef CONFIG_QUOTA_DEBUG
1044 if (unlikely(inode_get_rsv_space(inode) > 0))
1048 err = __dquot_initialize(inode, type);
1055 * We hold a reference to 'inode' so it couldn't have been
1056 * removed from s_inodes list while we dropped the
1057 * s_inode_list_lock. We cannot iput the inode now as we can be
1058 * holding the last reference and we cannot iput it under
1059 * s_inode_list_lock. So we keep the reference and iput it
1064 spin_lock(&sb->s_inode_list_lock);
1066 spin_unlock(&sb->s_inode_list_lock);
1069 #ifdef CONFIG_QUOTA_DEBUG
1071 quota_error(sb, "Writes happened before quota was turned on "
1072 "thus quota information is probably inconsistent. "
1073 "Please run quotacheck(8)");
1079 static void remove_dquot_ref(struct super_block *sb, int type)
1081 struct inode *inode;
1082 #ifdef CONFIG_QUOTA_DEBUG
1086 spin_lock(&sb->s_inode_list_lock);
1087 list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
1089 * We have to scan also I_NEW inodes because they can already
1090 * have quota pointer initialized. Luckily, we need to touch
1091 * only quota pointers and these have separate locking
1094 spin_lock(&dq_data_lock);
1095 if (!IS_NOQUOTA(inode)) {
1096 struct dquot __rcu **dquots = i_dquot(inode);
1097 struct dquot *dquot = srcu_dereference_check(
1098 dquots[type], &dquot_srcu,
1099 lockdep_is_held(&dq_data_lock));
1101 #ifdef CONFIG_QUOTA_DEBUG
1102 if (unlikely(inode_get_rsv_space(inode) > 0))
1105 rcu_assign_pointer(dquots[type], NULL);
1109 spin_unlock(&dq_data_lock);
1111 spin_unlock(&sb->s_inode_list_lock);
1112 #ifdef CONFIG_QUOTA_DEBUG
1114 printk(KERN_WARNING "VFS (%s): Writes happened after quota"
1115 " was disabled thus quota information is probably "
1116 "inconsistent. Please run quotacheck(8).\n", sb->s_id);
1121 /* Gather all references from inodes and drop them */
1122 static void drop_dquot_ref(struct super_block *sb, int type)
1125 remove_dquot_ref(sb, type);
1129 void dquot_free_reserved_space(struct dquot *dquot, qsize_t number)
1131 if (dquot->dq_dqb.dqb_rsvspace >= number)
1132 dquot->dq_dqb.dqb_rsvspace -= number;
1135 dquot->dq_dqb.dqb_rsvspace = 0;
1137 if (dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace <=
1138 dquot->dq_dqb.dqb_bsoftlimit)
1139 dquot->dq_dqb.dqb_btime = (time64_t) 0;
1140 clear_bit(DQ_BLKS_B, &dquot->dq_flags);
1143 static void dquot_decr_inodes(struct dquot *dquot, qsize_t number)
1145 if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NEGATIVE_USAGE ||
1146 dquot->dq_dqb.dqb_curinodes >= number)
1147 dquot->dq_dqb.dqb_curinodes -= number;
1149 dquot->dq_dqb.dqb_curinodes = 0;
1150 if (dquot->dq_dqb.dqb_curinodes <= dquot->dq_dqb.dqb_isoftlimit)
1151 dquot->dq_dqb.dqb_itime = (time64_t) 0;
1152 clear_bit(DQ_INODES_B, &dquot->dq_flags);
1155 static void dquot_decr_space(struct dquot *dquot, qsize_t number)
1157 if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NEGATIVE_USAGE ||
1158 dquot->dq_dqb.dqb_curspace >= number)
1159 dquot->dq_dqb.dqb_curspace -= number;
1161 dquot->dq_dqb.dqb_curspace = 0;
1162 if (dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace <=
1163 dquot->dq_dqb.dqb_bsoftlimit)
1164 dquot->dq_dqb.dqb_btime = (time64_t) 0;
1165 clear_bit(DQ_BLKS_B, &dquot->dq_flags);
1169 struct super_block *w_sb;
1170 struct kqid w_dq_id;
1174 static int warning_issued(struct dquot *dquot, const int warntype)
1176 int flag = (warntype == QUOTA_NL_BHARDWARN ||
1177 warntype == QUOTA_NL_BSOFTLONGWARN) ? DQ_BLKS_B :
1178 ((warntype == QUOTA_NL_IHARDWARN ||
1179 warntype == QUOTA_NL_ISOFTLONGWARN) ? DQ_INODES_B : 0);
1183 return test_and_set_bit(flag, &dquot->dq_flags);
1186 #ifdef CONFIG_PRINT_QUOTA_WARNING
1187 static int flag_print_warnings = 1;
1189 static int need_print_warning(struct dquot_warn *warn)
1191 if (!flag_print_warnings)
1194 switch (warn->w_dq_id.type) {
1196 return uid_eq(current_fsuid(), warn->w_dq_id.uid);
1198 return in_group_p(warn->w_dq_id.gid);
1205 /* Print warning to user which exceeded quota */
1206 static void print_warning(struct dquot_warn *warn)
1209 struct tty_struct *tty;
1210 int warntype = warn->w_type;
1212 if (warntype == QUOTA_NL_IHARDBELOW ||
1213 warntype == QUOTA_NL_ISOFTBELOW ||
1214 warntype == QUOTA_NL_BHARDBELOW ||
1215 warntype == QUOTA_NL_BSOFTBELOW || !need_print_warning(warn))
1218 tty = get_current_tty();
1221 tty_write_message(tty, warn->w_sb->s_id);
1222 if (warntype == QUOTA_NL_ISOFTWARN || warntype == QUOTA_NL_BSOFTWARN)
1223 tty_write_message(tty, ": warning, ");
1225 tty_write_message(tty, ": write failed, ");
1226 tty_write_message(tty, quotatypes[warn->w_dq_id.type]);
1228 case QUOTA_NL_IHARDWARN:
1229 msg = " file limit reached.\r\n";
1231 case QUOTA_NL_ISOFTLONGWARN:
1232 msg = " file quota exceeded too long.\r\n";
1234 case QUOTA_NL_ISOFTWARN:
1235 msg = " file quota exceeded.\r\n";
1237 case QUOTA_NL_BHARDWARN:
1238 msg = " block limit reached.\r\n";
1240 case QUOTA_NL_BSOFTLONGWARN:
1241 msg = " block quota exceeded too long.\r\n";
1243 case QUOTA_NL_BSOFTWARN:
1244 msg = " block quota exceeded.\r\n";
1247 tty_write_message(tty, msg);
1252 static void prepare_warning(struct dquot_warn *warn, struct dquot *dquot,
1255 if (warning_issued(dquot, warntype))
1257 warn->w_type = warntype;
1258 warn->w_sb = dquot->dq_sb;
1259 warn->w_dq_id = dquot->dq_id;
1263 * Write warnings to the console and send warning messages over netlink.
1265 * Note that this function can call into tty and networking code.
1267 static void flush_warnings(struct dquot_warn *warn)
1271 for (i = 0; i < MAXQUOTAS; i++) {
1272 if (warn[i].w_type == QUOTA_NL_NOWARN)
1274 #ifdef CONFIG_PRINT_QUOTA_WARNING
1275 print_warning(&warn[i]);
1277 quota_send_warning(warn[i].w_dq_id,
1278 warn[i].w_sb->s_dev, warn[i].w_type);
1282 static int ignore_hardlimit(struct dquot *dquot)
1284 struct mem_dqinfo *info = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_id.type];
1286 return capable(CAP_SYS_RESOURCE) &&
1287 (info->dqi_format->qf_fmt_id != QFMT_VFS_OLD ||
1288 !(info->dqi_flags & DQF_ROOT_SQUASH));
1291 static int dquot_add_inodes(struct dquot *dquot, qsize_t inodes,
1292 struct dquot_warn *warn)
1297 spin_lock(&dquot->dq_dqb_lock);
1298 newinodes = dquot->dq_dqb.dqb_curinodes + inodes;
1299 if (!sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_id.type) ||
1300 test_bit(DQ_FAKE_B, &dquot->dq_flags))
1303 if (dquot->dq_dqb.dqb_ihardlimit &&
1304 newinodes > dquot->dq_dqb.dqb_ihardlimit &&
1305 !ignore_hardlimit(dquot)) {
1306 prepare_warning(warn, dquot, QUOTA_NL_IHARDWARN);
1311 if (dquot->dq_dqb.dqb_isoftlimit &&
1312 newinodes > dquot->dq_dqb.dqb_isoftlimit &&
1313 dquot->dq_dqb.dqb_itime &&
1314 ktime_get_real_seconds() >= dquot->dq_dqb.dqb_itime &&
1315 !ignore_hardlimit(dquot)) {
1316 prepare_warning(warn, dquot, QUOTA_NL_ISOFTLONGWARN);
1321 if (dquot->dq_dqb.dqb_isoftlimit &&
1322 newinodes > dquot->dq_dqb.dqb_isoftlimit &&
1323 dquot->dq_dqb.dqb_itime == 0) {
1324 prepare_warning(warn, dquot, QUOTA_NL_ISOFTWARN);
1325 dquot->dq_dqb.dqb_itime = ktime_get_real_seconds() +
1326 sb_dqopt(dquot->dq_sb)->info[dquot->dq_id.type].dqi_igrace;
1329 dquot->dq_dqb.dqb_curinodes = newinodes;
1332 spin_unlock(&dquot->dq_dqb_lock);
1336 static int dquot_add_space(struct dquot *dquot, qsize_t space,
1337 qsize_t rsv_space, unsigned int flags,
1338 struct dquot_warn *warn)
1341 struct super_block *sb = dquot->dq_sb;
1344 spin_lock(&dquot->dq_dqb_lock);
1345 if (!sb_has_quota_limits_enabled(sb, dquot->dq_id.type) ||
1346 test_bit(DQ_FAKE_B, &dquot->dq_flags))
1349 tspace = dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace
1350 + space + rsv_space;
1352 if (dquot->dq_dqb.dqb_bhardlimit &&
1353 tspace > dquot->dq_dqb.dqb_bhardlimit &&
1354 !ignore_hardlimit(dquot)) {
1355 if (flags & DQUOT_SPACE_WARN)
1356 prepare_warning(warn, dquot, QUOTA_NL_BHARDWARN);
1361 if (dquot->dq_dqb.dqb_bsoftlimit &&
1362 tspace > dquot->dq_dqb.dqb_bsoftlimit &&
1363 dquot->dq_dqb.dqb_btime &&
1364 ktime_get_real_seconds() >= dquot->dq_dqb.dqb_btime &&
1365 !ignore_hardlimit(dquot)) {
1366 if (flags & DQUOT_SPACE_WARN)
1367 prepare_warning(warn, dquot, QUOTA_NL_BSOFTLONGWARN);
1372 if (dquot->dq_dqb.dqb_bsoftlimit &&
1373 tspace > dquot->dq_dqb.dqb_bsoftlimit &&
1374 dquot->dq_dqb.dqb_btime == 0) {
1375 if (flags & DQUOT_SPACE_WARN) {
1376 prepare_warning(warn, dquot, QUOTA_NL_BSOFTWARN);
1377 dquot->dq_dqb.dqb_btime = ktime_get_real_seconds() +
1378 sb_dqopt(sb)->info[dquot->dq_id.type].dqi_bgrace;
1381 * We don't allow preallocation to exceed softlimit so exceeding will
1390 * We have to be careful and go through warning generation & grace time
1391 * setting even if DQUOT_SPACE_NOFAIL is set. That's why we check it
1394 if (flags & DQUOT_SPACE_NOFAIL)
1397 dquot->dq_dqb.dqb_rsvspace += rsv_space;
1398 dquot->dq_dqb.dqb_curspace += space;
1400 spin_unlock(&dquot->dq_dqb_lock);
1404 static int info_idq_free(struct dquot *dquot, qsize_t inodes)
1408 if (test_bit(DQ_FAKE_B, &dquot->dq_flags) ||
1409 dquot->dq_dqb.dqb_curinodes <= dquot->dq_dqb.dqb_isoftlimit ||
1410 !sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_id.type))
1411 return QUOTA_NL_NOWARN;
1413 newinodes = dquot->dq_dqb.dqb_curinodes - inodes;
1414 if (newinodes <= dquot->dq_dqb.dqb_isoftlimit)
1415 return QUOTA_NL_ISOFTBELOW;
1416 if (dquot->dq_dqb.dqb_curinodes >= dquot->dq_dqb.dqb_ihardlimit &&
1417 newinodes < dquot->dq_dqb.dqb_ihardlimit)
1418 return QUOTA_NL_IHARDBELOW;
1419 return QUOTA_NL_NOWARN;
1422 static int info_bdq_free(struct dquot *dquot, qsize_t space)
1426 tspace = dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace;
1428 if (test_bit(DQ_FAKE_B, &dquot->dq_flags) ||
1429 tspace <= dquot->dq_dqb.dqb_bsoftlimit)
1430 return QUOTA_NL_NOWARN;
1432 if (tspace - space <= dquot->dq_dqb.dqb_bsoftlimit)
1433 return QUOTA_NL_BSOFTBELOW;
1434 if (tspace >= dquot->dq_dqb.dqb_bhardlimit &&
1435 tspace - space < dquot->dq_dqb.dqb_bhardlimit)
1436 return QUOTA_NL_BHARDBELOW;
1437 return QUOTA_NL_NOWARN;
1440 static int inode_quota_active(const struct inode *inode)
1442 struct super_block *sb = inode->i_sb;
1444 if (IS_NOQUOTA(inode))
1446 return sb_any_quota_loaded(sb) & ~sb_any_quota_suspended(sb);
1450 * Initialize quota pointers in inode
1452 * It is better to call this function outside of any transaction as it
1453 * might need a lot of space in journal for dquot structure allocation.
1455 static int __dquot_initialize(struct inode *inode, int type)
1457 int cnt, init_needed = 0;
1458 struct dquot __rcu **dquots;
1459 struct dquot *got[MAXQUOTAS] = {};
1460 struct super_block *sb = inode->i_sb;
1464 if (!inode_quota_active(inode))
1467 dquots = i_dquot(inode);
1469 /* First get references to structures we might need. */
1470 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1474 struct dquot *dquot;
1476 if (type != -1 && cnt != type)
1479 * The i_dquot should have been initialized in most cases,
1480 * we check it without locking here to avoid unnecessary
1481 * dqget()/dqput() calls.
1486 if (!sb_has_quota_active(sb, cnt))
1493 qid = make_kqid_uid(inode->i_uid);
1496 qid = make_kqid_gid(inode->i_gid);
1499 rc = inode->i_sb->dq_op->get_projid(inode, &projid);
1502 qid = make_kqid_projid(projid);
1505 dquot = dqget(sb, qid);
1506 if (IS_ERR(dquot)) {
1507 /* We raced with somebody turning quotas off... */
1508 if (PTR_ERR(dquot) != -ESRCH) {
1509 ret = PTR_ERR(dquot);
1517 /* All required i_dquot has been initialized */
1521 spin_lock(&dq_data_lock);
1522 if (IS_NOQUOTA(inode))
1524 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1525 if (type != -1 && cnt != type)
1527 /* Avoid races with quotaoff() */
1528 if (!sb_has_quota_active(sb, cnt))
1530 /* We could race with quotaon or dqget() could have failed */
1534 rcu_assign_pointer(dquots[cnt], got[cnt]);
1537 * Make quota reservation system happy if someone
1538 * did a write before quota was turned on
1540 rsv = inode_get_rsv_space(inode);
1541 if (unlikely(rsv)) {
1542 struct dquot *dquot = srcu_dereference_check(
1543 dquots[cnt], &dquot_srcu,
1544 lockdep_is_held(&dq_data_lock));
1546 spin_lock(&inode->i_lock);
1547 /* Get reservation again under proper lock */
1548 rsv = __inode_get_rsv_space(inode);
1549 spin_lock(&dquot->dq_dqb_lock);
1550 dquot->dq_dqb.dqb_rsvspace += rsv;
1551 spin_unlock(&dquot->dq_dqb_lock);
1552 spin_unlock(&inode->i_lock);
1557 spin_unlock(&dq_data_lock);
1559 /* Drop unused references */
1565 int dquot_initialize(struct inode *inode)
1567 return __dquot_initialize(inode, -1);
1569 EXPORT_SYMBOL(dquot_initialize);
1571 bool dquot_initialize_needed(struct inode *inode)
1573 struct dquot __rcu **dquots;
1576 if (!inode_quota_active(inode))
1579 dquots = i_dquot(inode);
1580 for (i = 0; i < MAXQUOTAS; i++)
1581 if (!dquots[i] && sb_has_quota_active(inode->i_sb, i))
1585 EXPORT_SYMBOL(dquot_initialize_needed);
1588 * Release all quotas referenced by inode.
1590 * This function only be called on inode free or converting
1591 * a file to quota file, no other users for the i_dquot in
1592 * both cases, so we needn't call synchronize_srcu() after
1595 static void __dquot_drop(struct inode *inode)
1598 struct dquot __rcu **dquots = i_dquot(inode);
1599 struct dquot *put[MAXQUOTAS];
1601 spin_lock(&dq_data_lock);
1602 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1603 put[cnt] = srcu_dereference_check(dquots[cnt], &dquot_srcu,
1604 lockdep_is_held(&dq_data_lock));
1605 rcu_assign_pointer(dquots[cnt], NULL);
1607 spin_unlock(&dq_data_lock);
1611 void dquot_drop(struct inode *inode)
1613 struct dquot __rcu * const *dquots;
1616 if (IS_NOQUOTA(inode))
1620 * Test before calling to rule out calls from proc and such
1621 * where we are not allowed to block. Note that this is
1622 * actually reliable test even without the lock - the caller
1623 * must assure that nobody can come after the DQUOT_DROP and
1624 * add quota pointers back anyway.
1626 dquots = i_dquot(inode);
1627 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1632 if (cnt < MAXQUOTAS)
1633 __dquot_drop(inode);
1635 EXPORT_SYMBOL(dquot_drop);
1638 * inode_reserved_space is managed internally by quota, and protected by
1639 * i_lock similar to i_blocks+i_bytes.
1641 static qsize_t *inode_reserved_space(struct inode * inode)
1643 /* Filesystem must explicitly define it's own method in order to use
1644 * quota reservation interface */
1645 BUG_ON(!inode->i_sb->dq_op->get_reserved_space);
1646 return inode->i_sb->dq_op->get_reserved_space(inode);
1649 static qsize_t __inode_get_rsv_space(struct inode *inode)
1651 if (!inode->i_sb->dq_op->get_reserved_space)
1653 return *inode_reserved_space(inode);
1656 static qsize_t inode_get_rsv_space(struct inode *inode)
1660 if (!inode->i_sb->dq_op->get_reserved_space)
1662 spin_lock(&inode->i_lock);
1663 ret = __inode_get_rsv_space(inode);
1664 spin_unlock(&inode->i_lock);
1669 * This functions updates i_blocks+i_bytes fields and quota information
1670 * (together with appropriate checks).
1672 * NOTE: We absolutely rely on the fact that caller dirties the inode
1673 * (usually helpers in quotaops.h care about this) and holds a handle for
1674 * the current transaction so that dquot write and inode write go into the
1679 * This operation can block, but only after everything is updated
1681 int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags)
1683 int cnt, ret = 0, index;
1684 struct dquot_warn warn[MAXQUOTAS];
1685 int reserve = flags & DQUOT_SPACE_RESERVE;
1686 struct dquot __rcu **dquots;
1687 struct dquot *dquot;
1689 if (!inode_quota_active(inode)) {
1691 spin_lock(&inode->i_lock);
1692 *inode_reserved_space(inode) += number;
1693 spin_unlock(&inode->i_lock);
1695 inode_add_bytes(inode, number);
1700 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
1701 warn[cnt].w_type = QUOTA_NL_NOWARN;
1703 dquots = i_dquot(inode);
1704 index = srcu_read_lock(&dquot_srcu);
1705 spin_lock(&inode->i_lock);
1706 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1707 dquot = srcu_dereference(dquots[cnt], &dquot_srcu);
1711 ret = dquot_add_space(dquot, 0, number, flags, &warn[cnt]);
1713 ret = dquot_add_space(dquot, number, 0, flags, &warn[cnt]);
1716 /* Back out changes we already did */
1717 for (cnt--; cnt >= 0; cnt--) {
1718 dquot = srcu_dereference(dquots[cnt], &dquot_srcu);
1721 spin_lock(&dquot->dq_dqb_lock);
1723 dquot_free_reserved_space(dquot, number);
1725 dquot_decr_space(dquot, number);
1726 spin_unlock(&dquot->dq_dqb_lock);
1728 spin_unlock(&inode->i_lock);
1729 goto out_flush_warn;
1733 *inode_reserved_space(inode) += number;
1735 __inode_add_bytes(inode, number);
1736 spin_unlock(&inode->i_lock);
1739 goto out_flush_warn;
1740 mark_all_dquot_dirty(dquots);
1742 srcu_read_unlock(&dquot_srcu, index);
1743 flush_warnings(warn);
1747 EXPORT_SYMBOL(__dquot_alloc_space);
1750 * This operation can block, but only after everything is updated
1752 int dquot_alloc_inode(struct inode *inode)
1754 int cnt, ret = 0, index;
1755 struct dquot_warn warn[MAXQUOTAS];
1756 struct dquot __rcu * const *dquots;
1757 struct dquot *dquot;
1759 if (!inode_quota_active(inode))
1761 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
1762 warn[cnt].w_type = QUOTA_NL_NOWARN;
1764 dquots = i_dquot(inode);
1765 index = srcu_read_lock(&dquot_srcu);
1766 spin_lock(&inode->i_lock);
1767 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1768 dquot = srcu_dereference(dquots[cnt], &dquot_srcu);
1771 ret = dquot_add_inodes(dquot, 1, &warn[cnt]);
1773 for (cnt--; cnt >= 0; cnt--) {
1774 dquot = srcu_dereference(dquots[cnt], &dquot_srcu);
1777 /* Back out changes we already did */
1778 spin_lock(&dquot->dq_dqb_lock);
1779 dquot_decr_inodes(dquot, 1);
1780 spin_unlock(&dquot->dq_dqb_lock);
1787 spin_unlock(&inode->i_lock);
1789 mark_all_dquot_dirty(dquots);
1790 srcu_read_unlock(&dquot_srcu, index);
1791 flush_warnings(warn);
1794 EXPORT_SYMBOL(dquot_alloc_inode);
1797 * Convert in-memory reserved quotas to real consumed quotas
1799 void dquot_claim_space_nodirty(struct inode *inode, qsize_t number)
1801 struct dquot __rcu **dquots;
1802 struct dquot *dquot;
1805 if (!inode_quota_active(inode)) {
1806 spin_lock(&inode->i_lock);
1807 *inode_reserved_space(inode) -= number;
1808 __inode_add_bytes(inode, number);
1809 spin_unlock(&inode->i_lock);
1813 dquots = i_dquot(inode);
1814 index = srcu_read_lock(&dquot_srcu);
1815 spin_lock(&inode->i_lock);
1816 /* Claim reserved quotas to allocated quotas */
1817 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1818 dquot = srcu_dereference(dquots[cnt], &dquot_srcu);
1820 spin_lock(&dquot->dq_dqb_lock);
1821 if (WARN_ON_ONCE(dquot->dq_dqb.dqb_rsvspace < number))
1822 number = dquot->dq_dqb.dqb_rsvspace;
1823 dquot->dq_dqb.dqb_curspace += number;
1824 dquot->dq_dqb.dqb_rsvspace -= number;
1825 spin_unlock(&dquot->dq_dqb_lock);
1828 /* Update inode bytes */
1829 *inode_reserved_space(inode) -= number;
1830 __inode_add_bytes(inode, number);
1831 spin_unlock(&inode->i_lock);
1832 mark_all_dquot_dirty(dquots);
1833 srcu_read_unlock(&dquot_srcu, index);
1836 EXPORT_SYMBOL(dquot_claim_space_nodirty);
1839 * Convert allocated space back to in-memory reserved quotas
1841 void dquot_reclaim_space_nodirty(struct inode *inode, qsize_t number)
1843 struct dquot __rcu **dquots;
1844 struct dquot *dquot;
1847 if (!inode_quota_active(inode)) {
1848 spin_lock(&inode->i_lock);
1849 *inode_reserved_space(inode) += number;
1850 __inode_sub_bytes(inode, number);
1851 spin_unlock(&inode->i_lock);
1855 dquots = i_dquot(inode);
1856 index = srcu_read_lock(&dquot_srcu);
1857 spin_lock(&inode->i_lock);
1858 /* Claim reserved quotas to allocated quotas */
1859 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1860 dquot = srcu_dereference(dquots[cnt], &dquot_srcu);
1862 spin_lock(&dquot->dq_dqb_lock);
1863 if (WARN_ON_ONCE(dquot->dq_dqb.dqb_curspace < number))
1864 number = dquot->dq_dqb.dqb_curspace;
1865 dquot->dq_dqb.dqb_rsvspace += number;
1866 dquot->dq_dqb.dqb_curspace -= number;
1867 spin_unlock(&dquot->dq_dqb_lock);
1870 /* Update inode bytes */
1871 *inode_reserved_space(inode) += number;
1872 __inode_sub_bytes(inode, number);
1873 spin_unlock(&inode->i_lock);
1874 mark_all_dquot_dirty(dquots);
1875 srcu_read_unlock(&dquot_srcu, index);
1878 EXPORT_SYMBOL(dquot_reclaim_space_nodirty);
1881 * This operation can block, but only after everything is updated
1883 void __dquot_free_space(struct inode *inode, qsize_t number, int flags)
1886 struct dquot_warn warn[MAXQUOTAS];
1887 struct dquot __rcu **dquots;
1888 struct dquot *dquot;
1889 int reserve = flags & DQUOT_SPACE_RESERVE, index;
1891 if (!inode_quota_active(inode)) {
1893 spin_lock(&inode->i_lock);
1894 *inode_reserved_space(inode) -= number;
1895 spin_unlock(&inode->i_lock);
1897 inode_sub_bytes(inode, number);
1902 dquots = i_dquot(inode);
1903 index = srcu_read_lock(&dquot_srcu);
1904 spin_lock(&inode->i_lock);
1905 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1908 warn[cnt].w_type = QUOTA_NL_NOWARN;
1909 dquot = srcu_dereference(dquots[cnt], &dquot_srcu);
1912 spin_lock(&dquot->dq_dqb_lock);
1913 wtype = info_bdq_free(dquot, number);
1914 if (wtype != QUOTA_NL_NOWARN)
1915 prepare_warning(&warn[cnt], dquot, wtype);
1917 dquot_free_reserved_space(dquot, number);
1919 dquot_decr_space(dquot, number);
1920 spin_unlock(&dquot->dq_dqb_lock);
1923 *inode_reserved_space(inode) -= number;
1925 __inode_sub_bytes(inode, number);
1926 spin_unlock(&inode->i_lock);
1930 mark_all_dquot_dirty(dquots);
1932 srcu_read_unlock(&dquot_srcu, index);
1933 flush_warnings(warn);
1935 EXPORT_SYMBOL(__dquot_free_space);
1938 * This operation can block, but only after everything is updated
1940 void dquot_free_inode(struct inode *inode)
1943 struct dquot_warn warn[MAXQUOTAS];
1944 struct dquot __rcu * const *dquots;
1945 struct dquot *dquot;
1948 if (!inode_quota_active(inode))
1951 dquots = i_dquot(inode);
1952 index = srcu_read_lock(&dquot_srcu);
1953 spin_lock(&inode->i_lock);
1954 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1956 warn[cnt].w_type = QUOTA_NL_NOWARN;
1957 dquot = srcu_dereference(dquots[cnt], &dquot_srcu);
1960 spin_lock(&dquot->dq_dqb_lock);
1961 wtype = info_idq_free(dquot, 1);
1962 if (wtype != QUOTA_NL_NOWARN)
1963 prepare_warning(&warn[cnt], dquot, wtype);
1964 dquot_decr_inodes(dquot, 1);
1965 spin_unlock(&dquot->dq_dqb_lock);
1967 spin_unlock(&inode->i_lock);
1968 mark_all_dquot_dirty(dquots);
1969 srcu_read_unlock(&dquot_srcu, index);
1970 flush_warnings(warn);
1972 EXPORT_SYMBOL(dquot_free_inode);
1975 * Transfer the number of inode and blocks from one diskquota to an other.
1976 * On success, dquot references in transfer_to are consumed and references
1977 * to original dquots that need to be released are placed there. On failure,
1978 * references are kept untouched.
1980 * This operation can block, but only after everything is updated
1981 * A transaction must be started when entering this function.
1983 * We are holding reference on transfer_from & transfer_to, no need to
1984 * protect them by srcu_read_lock().
1986 int __dquot_transfer(struct inode *inode, struct dquot **transfer_to)
1989 qsize_t rsv_space = 0;
1990 qsize_t inode_usage = 1;
1991 struct dquot __rcu **dquots;
1992 struct dquot *transfer_from[MAXQUOTAS] = {};
1993 int cnt, index, ret = 0;
1994 char is_valid[MAXQUOTAS] = {};
1995 struct dquot_warn warn_to[MAXQUOTAS];
1996 struct dquot_warn warn_from_inodes[MAXQUOTAS];
1997 struct dquot_warn warn_from_space[MAXQUOTAS];
1999 if (IS_NOQUOTA(inode))
2002 if (inode->i_sb->dq_op->get_inode_usage) {
2003 ret = inode->i_sb->dq_op->get_inode_usage(inode, &inode_usage);
2008 /* Initialize the arrays */
2009 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
2010 warn_to[cnt].w_type = QUOTA_NL_NOWARN;
2011 warn_from_inodes[cnt].w_type = QUOTA_NL_NOWARN;
2012 warn_from_space[cnt].w_type = QUOTA_NL_NOWARN;
2015 spin_lock(&dq_data_lock);
2016 spin_lock(&inode->i_lock);
2017 if (IS_NOQUOTA(inode)) { /* File without quota accounting? */
2018 spin_unlock(&inode->i_lock);
2019 spin_unlock(&dq_data_lock);
2022 cur_space = __inode_get_bytes(inode);
2023 rsv_space = __inode_get_rsv_space(inode);
2024 dquots = i_dquot(inode);
2026 * Build the transfer_from list, check limits, and update usage in
2027 * the target structures.
2029 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
2031 * Skip changes for same uid or gid or for turned off quota-type.
2033 if (!transfer_to[cnt])
2035 /* Avoid races with quotaoff() */
2036 if (!sb_has_quota_active(inode->i_sb, cnt))
2039 transfer_from[cnt] = srcu_dereference_check(dquots[cnt],
2040 &dquot_srcu, lockdep_is_held(&dq_data_lock));
2041 ret = dquot_add_inodes(transfer_to[cnt], inode_usage,
2045 ret = dquot_add_space(transfer_to[cnt], cur_space, rsv_space,
2046 DQUOT_SPACE_WARN, &warn_to[cnt]);
2048 spin_lock(&transfer_to[cnt]->dq_dqb_lock);
2049 dquot_decr_inodes(transfer_to[cnt], inode_usage);
2050 spin_unlock(&transfer_to[cnt]->dq_dqb_lock);
2055 /* Decrease usage for source structures and update quota pointers */
2056 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
2059 /* Due to IO error we might not have transfer_from[] structure */
2060 if (transfer_from[cnt]) {
2063 spin_lock(&transfer_from[cnt]->dq_dqb_lock);
2064 wtype = info_idq_free(transfer_from[cnt], inode_usage);
2065 if (wtype != QUOTA_NL_NOWARN)
2066 prepare_warning(&warn_from_inodes[cnt],
2067 transfer_from[cnt], wtype);
2068 wtype = info_bdq_free(transfer_from[cnt],
2069 cur_space + rsv_space);
2070 if (wtype != QUOTA_NL_NOWARN)
2071 prepare_warning(&warn_from_space[cnt],
2072 transfer_from[cnt], wtype);
2073 dquot_decr_inodes(transfer_from[cnt], inode_usage);
2074 dquot_decr_space(transfer_from[cnt], cur_space);
2075 dquot_free_reserved_space(transfer_from[cnt],
2077 spin_unlock(&transfer_from[cnt]->dq_dqb_lock);
2079 rcu_assign_pointer(dquots[cnt], transfer_to[cnt]);
2081 spin_unlock(&inode->i_lock);
2082 spin_unlock(&dq_data_lock);
2085 * These arrays are local and we hold dquot references so we don't need
2086 * the srcu protection but still take dquot_srcu to avoid warning in
2087 * mark_all_dquot_dirty().
2089 index = srcu_read_lock(&dquot_srcu);
2090 mark_all_dquot_dirty((struct dquot __rcu **)transfer_from);
2091 mark_all_dquot_dirty((struct dquot __rcu **)transfer_to);
2092 srcu_read_unlock(&dquot_srcu, index);
2094 flush_warnings(warn_to);
2095 flush_warnings(warn_from_inodes);
2096 flush_warnings(warn_from_space);
2097 /* Pass back references to put */
2098 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
2100 transfer_to[cnt] = transfer_from[cnt];
2103 /* Back out changes we already did */
2104 for (cnt--; cnt >= 0; cnt--) {
2107 spin_lock(&transfer_to[cnt]->dq_dqb_lock);
2108 dquot_decr_inodes(transfer_to[cnt], inode_usage);
2109 dquot_decr_space(transfer_to[cnt], cur_space);
2110 dquot_free_reserved_space(transfer_to[cnt], rsv_space);
2111 spin_unlock(&transfer_to[cnt]->dq_dqb_lock);
2113 spin_unlock(&inode->i_lock);
2114 spin_unlock(&dq_data_lock);
2115 flush_warnings(warn_to);
2118 EXPORT_SYMBOL(__dquot_transfer);
2120 /* Wrapper for transferring ownership of an inode for uid/gid only
2121 * Called from FSXXX_setattr()
2123 int dquot_transfer(struct mnt_idmap *idmap, struct inode *inode,
2124 struct iattr *iattr)
2126 struct dquot *transfer_to[MAXQUOTAS] = {};
2127 struct dquot *dquot;
2128 struct super_block *sb = inode->i_sb;
2131 if (!inode_quota_active(inode))
2134 if (i_uid_needs_update(idmap, iattr, inode)) {
2135 kuid_t kuid = from_vfsuid(idmap, i_user_ns(inode),
2138 dquot = dqget(sb, make_kqid_uid(kuid));
2139 if (IS_ERR(dquot)) {
2140 if (PTR_ERR(dquot) != -ESRCH) {
2141 ret = PTR_ERR(dquot);
2146 transfer_to[USRQUOTA] = dquot;
2148 if (i_gid_needs_update(idmap, iattr, inode)) {
2149 kgid_t kgid = from_vfsgid(idmap, i_user_ns(inode),
2152 dquot = dqget(sb, make_kqid_gid(kgid));
2153 if (IS_ERR(dquot)) {
2154 if (PTR_ERR(dquot) != -ESRCH) {
2155 ret = PTR_ERR(dquot);
2160 transfer_to[GRPQUOTA] = dquot;
2162 ret = __dquot_transfer(inode, transfer_to);
2164 dqput_all(transfer_to);
2167 EXPORT_SYMBOL(dquot_transfer);
2170 * Write info of quota file to disk
2172 int dquot_commit_info(struct super_block *sb, int type)
2174 struct quota_info *dqopt = sb_dqopt(sb);
2176 return dqopt->ops[type]->write_file_info(sb, type);
2178 EXPORT_SYMBOL(dquot_commit_info);
2180 int dquot_get_next_id(struct super_block *sb, struct kqid *qid)
2182 struct quota_info *dqopt = sb_dqopt(sb);
2184 if (!sb_has_quota_active(sb, qid->type))
2186 if (!dqopt->ops[qid->type]->get_next_id)
2188 return dqopt->ops[qid->type]->get_next_id(sb, qid);
2190 EXPORT_SYMBOL(dquot_get_next_id);
2193 * Definitions of diskquota operations.
2195 const struct dquot_operations dquot_operations = {
2196 .write_dquot = dquot_commit,
2197 .acquire_dquot = dquot_acquire,
2198 .release_dquot = dquot_release,
2199 .mark_dirty = dquot_mark_dquot_dirty,
2200 .write_info = dquot_commit_info,
2201 .alloc_dquot = dquot_alloc,
2202 .destroy_dquot = dquot_destroy,
2203 .get_next_id = dquot_get_next_id,
2205 EXPORT_SYMBOL(dquot_operations);
2208 * Generic helper for ->open on filesystems supporting disk quotas.
2210 int dquot_file_open(struct inode *inode, struct file *file)
2214 error = generic_file_open(inode, file);
2215 if (!error && (file->f_mode & FMODE_WRITE))
2216 error = dquot_initialize(inode);
2219 EXPORT_SYMBOL(dquot_file_open);
2221 static void vfs_cleanup_quota_inode(struct super_block *sb, int type)
2223 struct quota_info *dqopt = sb_dqopt(sb);
2224 struct inode *inode = dqopt->files[type];
2228 if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE)) {
2230 inode->i_flags &= ~S_NOQUOTA;
2231 inode_unlock(inode);
2233 dqopt->files[type] = NULL;
2238 * Turn quota off on a device. type == -1 ==> quotaoff for all types (umount)
2240 int dquot_disable(struct super_block *sb, int type, unsigned int flags)
2243 struct quota_info *dqopt = sb_dqopt(sb);
2245 /* s_umount should be held in exclusive mode */
2246 if (WARN_ON_ONCE(down_read_trylock(&sb->s_umount)))
2247 up_read(&sb->s_umount);
2249 /* Cannot turn off usage accounting without turning off limits, or
2250 * suspend quotas and simultaneously turn quotas off. */
2251 if ((flags & DQUOT_USAGE_ENABLED && !(flags & DQUOT_LIMITS_ENABLED))
2252 || (flags & DQUOT_SUSPENDED && flags & (DQUOT_LIMITS_ENABLED |
2253 DQUOT_USAGE_ENABLED)))
2257 * Skip everything if there's nothing to do. We have to do this because
2258 * sometimes we are called when fill_super() failed and calling
2259 * sync_fs() in such cases does no good.
2261 if (!sb_any_quota_loaded(sb))
2264 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
2265 if (type != -1 && cnt != type)
2267 if (!sb_has_quota_loaded(sb, cnt))
2270 if (flags & DQUOT_SUSPENDED) {
2271 spin_lock(&dq_state_lock);
2273 dquot_state_flag(DQUOT_SUSPENDED, cnt);
2274 spin_unlock(&dq_state_lock);
2276 spin_lock(&dq_state_lock);
2277 dqopt->flags &= ~dquot_state_flag(flags, cnt);
2278 /* Turning off suspended quotas? */
2279 if (!sb_has_quota_loaded(sb, cnt) &&
2280 sb_has_quota_suspended(sb, cnt)) {
2281 dqopt->flags &= ~dquot_state_flag(
2282 DQUOT_SUSPENDED, cnt);
2283 spin_unlock(&dq_state_lock);
2284 vfs_cleanup_quota_inode(sb, cnt);
2287 spin_unlock(&dq_state_lock);
2290 /* We still have to keep quota loaded? */
2291 if (sb_has_quota_loaded(sb, cnt) && !(flags & DQUOT_SUSPENDED))
2294 /* Note: these are blocking operations */
2295 drop_dquot_ref(sb, cnt);
2296 invalidate_dquots(sb, cnt);
2298 * Now all dquots should be invalidated, all writes done so we
2299 * should be only users of the info. No locks needed.
2301 if (info_dirty(&dqopt->info[cnt]))
2302 sb->dq_op->write_info(sb, cnt);
2303 if (dqopt->ops[cnt]->free_file_info)
2304 dqopt->ops[cnt]->free_file_info(sb, cnt);
2305 put_quota_format(dqopt->info[cnt].dqi_format);
2306 dqopt->info[cnt].dqi_flags = 0;
2307 dqopt->info[cnt].dqi_igrace = 0;
2308 dqopt->info[cnt].dqi_bgrace = 0;
2309 dqopt->ops[cnt] = NULL;
2312 /* Skip syncing and setting flags if quota files are hidden */
2313 if (dqopt->flags & DQUOT_QUOTA_SYS_FILE)
2316 /* Sync the superblock so that buffers with quota data are written to
2317 * disk (and so userspace sees correct data afterwards). */
2318 if (sb->s_op->sync_fs)
2319 sb->s_op->sync_fs(sb, 1);
2320 sync_blockdev(sb->s_bdev);
2321 /* Now the quota files are just ordinary files and we can set the
2322 * inode flags back. Moreover we discard the pagecache so that
2323 * userspace sees the writes we did bypassing the pagecache. We
2324 * must also discard the blockdev buffers so that we see the
2325 * changes done by userspace on the next quotaon() */
2326 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
2327 if (!sb_has_quota_loaded(sb, cnt) && dqopt->files[cnt]) {
2328 inode_lock(dqopt->files[cnt]);
2329 truncate_inode_pages(&dqopt->files[cnt]->i_data, 0);
2330 inode_unlock(dqopt->files[cnt]);
2333 invalidate_bdev(sb->s_bdev);
2335 /* We are done when suspending quotas */
2336 if (flags & DQUOT_SUSPENDED)
2339 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
2340 if (!sb_has_quota_loaded(sb, cnt))
2341 vfs_cleanup_quota_inode(sb, cnt);
2344 EXPORT_SYMBOL(dquot_disable);
2346 int dquot_quota_off(struct super_block *sb, int type)
2348 return dquot_disable(sb, type,
2349 DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED);
2351 EXPORT_SYMBOL(dquot_quota_off);
2354 * Turn quotas on on a device
2357 static int vfs_setup_quota_inode(struct inode *inode, int type)
2359 struct super_block *sb = inode->i_sb;
2360 struct quota_info *dqopt = sb_dqopt(sb);
2362 if (is_bad_inode(inode))
2364 if (!S_ISREG(inode->i_mode))
2366 if (IS_RDONLY(inode))
2368 if (sb_has_quota_loaded(sb, type))
2372 * Quota files should never be encrypted. They should be thought of as
2373 * filesystem metadata, not user data. New-style internal quota files
2374 * cannot be encrypted by users anyway, but old-style external quota
2375 * files could potentially be incorrectly created in an encrypted
2376 * directory, hence this explicit check. Some reasons why encrypted
2377 * quota files don't work include: (1) some filesystems that support
2378 * encryption don't handle it in their quota_read and quota_write, and
2379 * (2) cleaning up encrypted quota files at unmount would need special
2380 * consideration, as quota files are cleaned up later than user files.
2382 if (IS_ENCRYPTED(inode))
2385 dqopt->files[type] = igrab(inode);
2386 if (!dqopt->files[type])
2388 if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE)) {
2389 /* We don't want quota and atime on quota files (deadlocks
2390 * possible) Also nobody should write to the file - we use
2391 * special IO operations which ignore the immutable bit. */
2393 inode->i_flags |= S_NOQUOTA;
2394 inode_unlock(inode);
2396 * When S_NOQUOTA is set, remove dquot references as no more
2397 * references can be added
2399 __dquot_drop(inode);
2404 int dquot_load_quota_sb(struct super_block *sb, int type, int format_id,
2407 struct quota_format_type *fmt = find_quota_format(format_id);
2408 struct quota_info *dqopt = sb_dqopt(sb);
2411 lockdep_assert_held_write(&sb->s_umount);
2413 /* Just unsuspend quotas? */
2414 if (WARN_ON_ONCE(flags & DQUOT_SUSPENDED))
2419 if (!sb->dq_op || !sb->s_qcop ||
2420 (type == PRJQUOTA && sb->dq_op->get_projid == NULL)) {
2424 /* Filesystems outside of init_user_ns not yet supported */
2425 if (sb->s_user_ns != &init_user_ns) {
2429 /* Usage always has to be set... */
2430 if (!(flags & DQUOT_USAGE_ENABLED)) {
2434 if (sb_has_quota_loaded(sb, type)) {
2439 if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE)) {
2440 /* As we bypass the pagecache we must now flush all the
2441 * dirty data and invalidate caches so that kernel sees
2442 * changes from userspace. It is not enough to just flush
2443 * the quota file since if blocksize < pagesize, invalidation
2444 * of the cache could fail because of other unrelated dirty
2446 sync_filesystem(sb);
2447 invalidate_bdev(sb->s_bdev);
2451 if (!fmt->qf_ops->check_quota_file(sb, type))
2454 dqopt->ops[type] = fmt->qf_ops;
2455 dqopt->info[type].dqi_format = fmt;
2456 dqopt->info[type].dqi_fmt_id = format_id;
2457 INIT_LIST_HEAD(&dqopt->info[type].dqi_dirty_list);
2458 error = dqopt->ops[type]->read_file_info(sb, type);
2461 if (dqopt->flags & DQUOT_QUOTA_SYS_FILE) {
2462 spin_lock(&dq_data_lock);
2463 dqopt->info[type].dqi_flags |= DQF_SYS_FILE;
2464 spin_unlock(&dq_data_lock);
2466 spin_lock(&dq_state_lock);
2467 dqopt->flags |= dquot_state_flag(flags, type);
2468 spin_unlock(&dq_state_lock);
2470 error = add_dquot_ref(sb, type);
2472 dquot_disable(sb, type,
2473 DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED);
2477 put_quota_format(fmt);
2481 EXPORT_SYMBOL(dquot_load_quota_sb);
2484 * More powerful function for turning on quotas on given quota inode allowing
2485 * setting of individual quota flags
2487 int dquot_load_quota_inode(struct inode *inode, int type, int format_id,
2492 err = vfs_setup_quota_inode(inode, type);
2495 err = dquot_load_quota_sb(inode->i_sb, type, format_id, flags);
2497 vfs_cleanup_quota_inode(inode->i_sb, type);
2500 EXPORT_SYMBOL(dquot_load_quota_inode);
2502 /* Reenable quotas on remount RW */
2503 int dquot_resume(struct super_block *sb, int type)
2505 struct quota_info *dqopt = sb_dqopt(sb);
2509 /* s_umount should be held in exclusive mode */
2510 if (WARN_ON_ONCE(down_read_trylock(&sb->s_umount)))
2511 up_read(&sb->s_umount);
2513 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
2514 if (type != -1 && cnt != type)
2516 if (!sb_has_quota_suspended(sb, cnt))
2519 spin_lock(&dq_state_lock);
2520 flags = dqopt->flags & dquot_state_flag(DQUOT_USAGE_ENABLED |
2521 DQUOT_LIMITS_ENABLED,
2523 dqopt->flags &= ~dquot_state_flag(DQUOT_STATE_FLAGS, cnt);
2524 spin_unlock(&dq_state_lock);
2526 flags = dquot_generic_flag(flags, cnt);
2527 ret = dquot_load_quota_sb(sb, cnt, dqopt->info[cnt].dqi_fmt_id,
2530 vfs_cleanup_quota_inode(sb, cnt);
2535 EXPORT_SYMBOL(dquot_resume);
2537 int dquot_quota_on(struct super_block *sb, int type, int format_id,
2538 const struct path *path)
2540 int error = security_quota_on(path->dentry);
2543 /* Quota file not on the same filesystem? */
2544 if (path->dentry->d_sb != sb)
2547 error = dquot_load_quota_inode(d_inode(path->dentry), type,
2548 format_id, DQUOT_USAGE_ENABLED |
2549 DQUOT_LIMITS_ENABLED);
2552 EXPORT_SYMBOL(dquot_quota_on);
2555 * This function is used when filesystem needs to initialize quotas
2556 * during mount time.
2558 int dquot_quota_on_mount(struct super_block *sb, char *qf_name,
2559 int format_id, int type)
2561 struct dentry *dentry;
2564 dentry = lookup_positive_unlocked(qf_name, sb->s_root, strlen(qf_name));
2566 return PTR_ERR(dentry);
2568 error = security_quota_on(dentry);
2570 error = dquot_load_quota_inode(d_inode(dentry), type, format_id,
2571 DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED);
2576 EXPORT_SYMBOL(dquot_quota_on_mount);
2578 static int dquot_quota_enable(struct super_block *sb, unsigned int flags)
2582 struct quota_info *dqopt = sb_dqopt(sb);
2584 if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE))
2586 /* Accounting cannot be turned on while fs is mounted */
2587 flags &= ~(FS_QUOTA_UDQ_ACCT | FS_QUOTA_GDQ_ACCT | FS_QUOTA_PDQ_ACCT);
2590 for (type = 0; type < MAXQUOTAS; type++) {
2591 if (!(flags & qtype_enforce_flag(type)))
2593 /* Can't enforce without accounting */
2594 if (!sb_has_quota_usage_enabled(sb, type)) {
2598 if (sb_has_quota_limits_enabled(sb, type)) {
2602 spin_lock(&dq_state_lock);
2603 dqopt->flags |= dquot_state_flag(DQUOT_LIMITS_ENABLED, type);
2604 spin_unlock(&dq_state_lock);
2608 /* Backout enforcement enablement we already did */
2609 for (type--; type >= 0; type--) {
2610 if (flags & qtype_enforce_flag(type))
2611 dquot_disable(sb, type, DQUOT_LIMITS_ENABLED);
2613 /* Error code translation for better compatibility with XFS */
2619 static int dquot_quota_disable(struct super_block *sb, unsigned int flags)
2623 struct quota_info *dqopt = sb_dqopt(sb);
2625 if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE))
2628 * We don't support turning off accounting via quotactl. In principle
2629 * quota infrastructure can do this but filesystems don't expect
2630 * userspace to be able to do it.
2633 (FS_QUOTA_UDQ_ACCT | FS_QUOTA_GDQ_ACCT | FS_QUOTA_PDQ_ACCT))
2636 /* Filter out limits not enabled */
2637 for (type = 0; type < MAXQUOTAS; type++)
2638 if (!sb_has_quota_limits_enabled(sb, type))
2639 flags &= ~qtype_enforce_flag(type);
2643 for (type = 0; type < MAXQUOTAS; type++) {
2644 if (flags & qtype_enforce_flag(type)) {
2645 ret = dquot_disable(sb, type, DQUOT_LIMITS_ENABLED);
2652 /* Backout enforcement disabling we already did */
2653 for (type--; type >= 0; type--) {
2654 if (flags & qtype_enforce_flag(type)) {
2655 spin_lock(&dq_state_lock);
2657 dquot_state_flag(DQUOT_LIMITS_ENABLED, type);
2658 spin_unlock(&dq_state_lock);
2664 /* Generic routine for getting common part of quota structure */
2665 static void do_get_dqblk(struct dquot *dquot, struct qc_dqblk *di)
2667 struct mem_dqblk *dm = &dquot->dq_dqb;
2669 memset(di, 0, sizeof(*di));
2670 spin_lock(&dquot->dq_dqb_lock);
2671 di->d_spc_hardlimit = dm->dqb_bhardlimit;
2672 di->d_spc_softlimit = dm->dqb_bsoftlimit;
2673 di->d_ino_hardlimit = dm->dqb_ihardlimit;
2674 di->d_ino_softlimit = dm->dqb_isoftlimit;
2675 di->d_space = dm->dqb_curspace + dm->dqb_rsvspace;
2676 di->d_ino_count = dm->dqb_curinodes;
2677 di->d_spc_timer = dm->dqb_btime;
2678 di->d_ino_timer = dm->dqb_itime;
2679 spin_unlock(&dquot->dq_dqb_lock);
2682 int dquot_get_dqblk(struct super_block *sb, struct kqid qid,
2683 struct qc_dqblk *di)
2685 struct dquot *dquot;
2687 dquot = dqget(sb, qid);
2689 return PTR_ERR(dquot);
2690 do_get_dqblk(dquot, di);
2695 EXPORT_SYMBOL(dquot_get_dqblk);
2697 int dquot_get_next_dqblk(struct super_block *sb, struct kqid *qid,
2698 struct qc_dqblk *di)
2700 struct dquot *dquot;
2703 if (!sb->dq_op->get_next_id)
2705 err = sb->dq_op->get_next_id(sb, qid);
2708 dquot = dqget(sb, *qid);
2710 return PTR_ERR(dquot);
2711 do_get_dqblk(dquot, di);
2716 EXPORT_SYMBOL(dquot_get_next_dqblk);
2718 #define VFS_QC_MASK \
2719 (QC_SPACE | QC_SPC_SOFT | QC_SPC_HARD | \
2720 QC_INO_COUNT | QC_INO_SOFT | QC_INO_HARD | \
2721 QC_SPC_TIMER | QC_INO_TIMER)
2723 /* Generic routine for setting common part of quota structure */
2724 static int do_set_dqblk(struct dquot *dquot, struct qc_dqblk *di)
2726 struct mem_dqblk *dm = &dquot->dq_dqb;
2727 int check_blim = 0, check_ilim = 0;
2728 struct mem_dqinfo *dqi = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_id.type];
2730 if (di->d_fieldmask & ~VFS_QC_MASK)
2733 if (((di->d_fieldmask & QC_SPC_SOFT) &&
2734 di->d_spc_softlimit > dqi->dqi_max_spc_limit) ||
2735 ((di->d_fieldmask & QC_SPC_HARD) &&
2736 di->d_spc_hardlimit > dqi->dqi_max_spc_limit) ||
2737 ((di->d_fieldmask & QC_INO_SOFT) &&
2738 (di->d_ino_softlimit > dqi->dqi_max_ino_limit)) ||
2739 ((di->d_fieldmask & QC_INO_HARD) &&
2740 (di->d_ino_hardlimit > dqi->dqi_max_ino_limit)))
2743 spin_lock(&dquot->dq_dqb_lock);
2744 if (di->d_fieldmask & QC_SPACE) {
2745 dm->dqb_curspace = di->d_space - dm->dqb_rsvspace;
2747 set_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags);
2750 if (di->d_fieldmask & QC_SPC_SOFT)
2751 dm->dqb_bsoftlimit = di->d_spc_softlimit;
2752 if (di->d_fieldmask & QC_SPC_HARD)
2753 dm->dqb_bhardlimit = di->d_spc_hardlimit;
2754 if (di->d_fieldmask & (QC_SPC_SOFT | QC_SPC_HARD)) {
2756 set_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags);
2759 if (di->d_fieldmask & QC_INO_COUNT) {
2760 dm->dqb_curinodes = di->d_ino_count;
2762 set_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags);
2765 if (di->d_fieldmask & QC_INO_SOFT)
2766 dm->dqb_isoftlimit = di->d_ino_softlimit;
2767 if (di->d_fieldmask & QC_INO_HARD)
2768 dm->dqb_ihardlimit = di->d_ino_hardlimit;
2769 if (di->d_fieldmask & (QC_INO_SOFT | QC_INO_HARD)) {
2771 set_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags);
2774 if (di->d_fieldmask & QC_SPC_TIMER) {
2775 dm->dqb_btime = di->d_spc_timer;
2777 set_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags);
2780 if (di->d_fieldmask & QC_INO_TIMER) {
2781 dm->dqb_itime = di->d_ino_timer;
2783 set_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags);
2787 if (!dm->dqb_bsoftlimit ||
2788 dm->dqb_curspace + dm->dqb_rsvspace <= dm->dqb_bsoftlimit) {
2790 clear_bit(DQ_BLKS_B, &dquot->dq_flags);
2791 } else if (!(di->d_fieldmask & QC_SPC_TIMER))
2792 /* Set grace only if user hasn't provided his own... */
2793 dm->dqb_btime = ktime_get_real_seconds() + dqi->dqi_bgrace;
2796 if (!dm->dqb_isoftlimit ||
2797 dm->dqb_curinodes <= dm->dqb_isoftlimit) {
2799 clear_bit(DQ_INODES_B, &dquot->dq_flags);
2800 } else if (!(di->d_fieldmask & QC_INO_TIMER))
2801 /* Set grace only if user hasn't provided his own... */
2802 dm->dqb_itime = ktime_get_real_seconds() + dqi->dqi_igrace;
2804 if (dm->dqb_bhardlimit || dm->dqb_bsoftlimit || dm->dqb_ihardlimit ||
2806 clear_bit(DQ_FAKE_B, &dquot->dq_flags);
2808 set_bit(DQ_FAKE_B, &dquot->dq_flags);
2809 spin_unlock(&dquot->dq_dqb_lock);
2810 mark_dquot_dirty(dquot);
2815 int dquot_set_dqblk(struct super_block *sb, struct kqid qid,
2816 struct qc_dqblk *di)
2818 struct dquot *dquot;
2821 dquot = dqget(sb, qid);
2822 if (IS_ERR(dquot)) {
2823 rc = PTR_ERR(dquot);
2826 rc = do_set_dqblk(dquot, di);
2831 EXPORT_SYMBOL(dquot_set_dqblk);
2833 /* Generic routine for getting common part of quota file information */
2834 int dquot_get_state(struct super_block *sb, struct qc_state *state)
2836 struct mem_dqinfo *mi;
2837 struct qc_type_state *tstate;
2838 struct quota_info *dqopt = sb_dqopt(sb);
2841 memset(state, 0, sizeof(*state));
2842 for (type = 0; type < MAXQUOTAS; type++) {
2843 if (!sb_has_quota_active(sb, type))
2845 tstate = state->s_state + type;
2846 mi = sb_dqopt(sb)->info + type;
2847 tstate->flags = QCI_ACCT_ENABLED;
2848 spin_lock(&dq_data_lock);
2849 if (mi->dqi_flags & DQF_SYS_FILE)
2850 tstate->flags |= QCI_SYSFILE;
2851 if (mi->dqi_flags & DQF_ROOT_SQUASH)
2852 tstate->flags |= QCI_ROOT_SQUASH;
2853 if (sb_has_quota_limits_enabled(sb, type))
2854 tstate->flags |= QCI_LIMITS_ENFORCED;
2855 tstate->spc_timelimit = mi->dqi_bgrace;
2856 tstate->ino_timelimit = mi->dqi_igrace;
2857 if (dqopt->files[type]) {
2858 tstate->ino = dqopt->files[type]->i_ino;
2859 tstate->blocks = dqopt->files[type]->i_blocks;
2861 tstate->nextents = 1; /* We don't know... */
2862 spin_unlock(&dq_data_lock);
2866 EXPORT_SYMBOL(dquot_get_state);
2868 /* Generic routine for setting common part of quota file information */
2869 int dquot_set_dqinfo(struct super_block *sb, int type, struct qc_info *ii)
2871 struct mem_dqinfo *mi;
2873 if ((ii->i_fieldmask & QC_WARNS_MASK) ||
2874 (ii->i_fieldmask & QC_RT_SPC_TIMER))
2876 if (!sb_has_quota_active(sb, type))
2878 mi = sb_dqopt(sb)->info + type;
2879 if (ii->i_fieldmask & QC_FLAGS) {
2880 if ((ii->i_flags & QCI_ROOT_SQUASH &&
2881 mi->dqi_format->qf_fmt_id != QFMT_VFS_OLD))
2884 spin_lock(&dq_data_lock);
2885 if (ii->i_fieldmask & QC_SPC_TIMER)
2886 mi->dqi_bgrace = ii->i_spc_timelimit;
2887 if (ii->i_fieldmask & QC_INO_TIMER)
2888 mi->dqi_igrace = ii->i_ino_timelimit;
2889 if (ii->i_fieldmask & QC_FLAGS) {
2890 if (ii->i_flags & QCI_ROOT_SQUASH)
2891 mi->dqi_flags |= DQF_ROOT_SQUASH;
2893 mi->dqi_flags &= ~DQF_ROOT_SQUASH;
2895 spin_unlock(&dq_data_lock);
2896 mark_info_dirty(sb, type);
2897 /* Force write to disk */
2898 return sb->dq_op->write_info(sb, type);
2900 EXPORT_SYMBOL(dquot_set_dqinfo);
2902 const struct quotactl_ops dquot_quotactl_sysfile_ops = {
2903 .quota_enable = dquot_quota_enable,
2904 .quota_disable = dquot_quota_disable,
2905 .quota_sync = dquot_quota_sync,
2906 .get_state = dquot_get_state,
2907 .set_info = dquot_set_dqinfo,
2908 .get_dqblk = dquot_get_dqblk,
2909 .get_nextdqblk = dquot_get_next_dqblk,
2910 .set_dqblk = dquot_set_dqblk
2912 EXPORT_SYMBOL(dquot_quotactl_sysfile_ops);
2914 static int do_proc_dqstats(struct ctl_table *table, int write,
2915 void *buffer, size_t *lenp, loff_t *ppos)
2917 unsigned int type = (unsigned long *)table->data - dqstats.stat;
2918 s64 value = percpu_counter_sum(&dqstats.counter[type]);
2920 /* Filter negative values for non-monotonic counters */
2921 if (value < 0 && (type == DQST_ALLOC_DQUOTS ||
2922 type == DQST_FREE_DQUOTS))
2925 /* Update global table */
2926 dqstats.stat[type] = value;
2927 return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
2930 static struct ctl_table fs_dqstats_table[] = {
2932 .procname = "lookups",
2933 .data = &dqstats.stat[DQST_LOOKUPS],
2934 .maxlen = sizeof(unsigned long),
2936 .proc_handler = do_proc_dqstats,
2939 .procname = "drops",
2940 .data = &dqstats.stat[DQST_DROPS],
2941 .maxlen = sizeof(unsigned long),
2943 .proc_handler = do_proc_dqstats,
2946 .procname = "reads",
2947 .data = &dqstats.stat[DQST_READS],
2948 .maxlen = sizeof(unsigned long),
2950 .proc_handler = do_proc_dqstats,
2953 .procname = "writes",
2954 .data = &dqstats.stat[DQST_WRITES],
2955 .maxlen = sizeof(unsigned long),
2957 .proc_handler = do_proc_dqstats,
2960 .procname = "cache_hits",
2961 .data = &dqstats.stat[DQST_CACHE_HITS],
2962 .maxlen = sizeof(unsigned long),
2964 .proc_handler = do_proc_dqstats,
2967 .procname = "allocated_dquots",
2968 .data = &dqstats.stat[DQST_ALLOC_DQUOTS],
2969 .maxlen = sizeof(unsigned long),
2971 .proc_handler = do_proc_dqstats,
2974 .procname = "free_dquots",
2975 .data = &dqstats.stat[DQST_FREE_DQUOTS],
2976 .maxlen = sizeof(unsigned long),
2978 .proc_handler = do_proc_dqstats,
2981 .procname = "syncs",
2982 .data = &dqstats.stat[DQST_SYNCS],
2983 .maxlen = sizeof(unsigned long),
2985 .proc_handler = do_proc_dqstats,
2987 #ifdef CONFIG_PRINT_QUOTA_WARNING
2989 .procname = "warnings",
2990 .data = &flag_print_warnings,
2991 .maxlen = sizeof(int),
2993 .proc_handler = proc_dointvec,
2998 static int __init dquot_init(void)
3001 unsigned long nr_hash, order;
3002 struct shrinker *dqcache_shrinker;
3004 printk(KERN_NOTICE "VFS: Disk quotas %s\n", __DQUOT_VERSION__);
3006 register_sysctl_init("fs/quota", fs_dqstats_table);
3008 dquot_cachep = kmem_cache_create("dquot",
3009 sizeof(struct dquot), sizeof(unsigned long) * 4,
3010 (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|
3015 dquot_hash = (struct hlist_head *)__get_free_pages(GFP_KERNEL, order);
3017 panic("Cannot create dquot hash table");
3019 for (i = 0; i < _DQST_DQSTAT_LAST; i++) {
3020 ret = percpu_counter_init(&dqstats.counter[i], 0, GFP_KERNEL);
3022 panic("Cannot create dquot stat counters");
3025 /* Find power-of-two hlist_heads which can fit into allocation */
3026 nr_hash = (1UL << order) * PAGE_SIZE / sizeof(struct hlist_head);
3027 dq_hash_bits = ilog2(nr_hash);
3029 nr_hash = 1UL << dq_hash_bits;
3030 dq_hash_mask = nr_hash - 1;
3031 for (i = 0; i < nr_hash; i++)
3032 INIT_HLIST_HEAD(dquot_hash + i);
3034 pr_info("VFS: Dquot-cache hash table entries: %ld (order %ld,"
3035 " %ld bytes)\n", nr_hash, order, (PAGE_SIZE << order));
3037 dqcache_shrinker = shrinker_alloc(0, "dquota-cache");
3038 if (!dqcache_shrinker)
3039 panic("Cannot allocate dquot shrinker");
3041 dqcache_shrinker->count_objects = dqcache_shrink_count;
3042 dqcache_shrinker->scan_objects = dqcache_shrink_scan;
3044 shrinker_register(dqcache_shrinker);
3048 fs_initcall(dquot_init);