1 // SPDX-License-Identifier: GPL-2.0-only
3 * vfsv0 quota IO operations on file
6 #include <linux/errno.h>
8 #include <linux/mount.h>
9 #include <linux/dqblk_v2.h>
10 #include <linux/kernel.h>
11 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/slab.h>
14 #include <linux/quotaops.h>
16 #include <asm/byteorder.h>
18 #include "quota_tree.h"
20 MODULE_AUTHOR("Jan Kara");
21 MODULE_DESCRIPTION("Quota trie support");
22 MODULE_LICENSE("GPL");
24 #define __QUOTA_QT_PARANOIA
26 static int __get_index(struct qtree_mem_dqinfo *info, qid_t id, int depth)
28 unsigned int epb = info->dqi_usable_bs >> 2;
30 depth = info->dqi_qtree_depth - depth - 1;
36 static int get_index(struct qtree_mem_dqinfo *info, struct kqid qid, int depth)
38 qid_t id = from_kqid(&init_user_ns, qid);
40 return __get_index(info, id, depth);
43 /* Number of entries in one blocks */
44 static int qtree_dqstr_in_blk(struct qtree_mem_dqinfo *info)
46 return (info->dqi_usable_bs - sizeof(struct qt_disk_dqdbheader))
47 / info->dqi_entry_size;
50 static ssize_t read_blk(struct qtree_mem_dqinfo *info, uint blk, char *buf)
52 struct super_block *sb = info->dqi_sb;
54 memset(buf, 0, info->dqi_usable_bs);
55 return sb->s_op->quota_read(sb, info->dqi_type, buf,
56 info->dqi_usable_bs, (loff_t)blk << info->dqi_blocksize_bits);
59 static ssize_t write_blk(struct qtree_mem_dqinfo *info, uint blk, char *buf)
61 struct super_block *sb = info->dqi_sb;
64 ret = sb->s_op->quota_write(sb, info->dqi_type, buf,
65 info->dqi_usable_bs, (loff_t)blk << info->dqi_blocksize_bits);
66 if (ret != info->dqi_usable_bs) {
67 quota_error(sb, "dquota write failed");
74 static inline int do_check_range(struct super_block *sb, const char *val_name,
75 uint val, uint min_val, uint max_val)
77 if (val < min_val || val > max_val) {
78 quota_error(sb, "Getting %s %u out of range %u-%u",
79 val_name, val, min_val, max_val);
86 static int check_dquot_block_header(struct qtree_mem_dqinfo *info,
87 struct qt_disk_dqdbheader *dh)
91 err = do_check_range(info->dqi_sb, "dqdh_next_free",
92 le32_to_cpu(dh->dqdh_next_free), 0,
93 info->dqi_blocks - 1);
96 err = do_check_range(info->dqi_sb, "dqdh_prev_free",
97 le32_to_cpu(dh->dqdh_prev_free), 0,
98 info->dqi_blocks - 1);
103 /* Remove empty block from list and return it */
104 static int get_free_dqblk(struct qtree_mem_dqinfo *info)
106 char *buf = kmalloc(info->dqi_usable_bs, GFP_NOFS);
107 struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf;
112 if (info->dqi_free_blk) {
113 blk = info->dqi_free_blk;
114 ret = read_blk(info, blk, buf);
117 ret = check_dquot_block_header(info, dh);
120 info->dqi_free_blk = le32_to_cpu(dh->dqdh_next_free);
123 memset(buf, 0, info->dqi_usable_bs);
124 /* Assure block allocation... */
125 ret = write_blk(info, info->dqi_blocks, buf);
128 blk = info->dqi_blocks++;
130 mark_info_dirty(info->dqi_sb, info->dqi_type);
137 /* Insert empty block to the list */
138 static int put_free_dqblk(struct qtree_mem_dqinfo *info, char *buf, uint blk)
140 struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf;
143 dh->dqdh_next_free = cpu_to_le32(info->dqi_free_blk);
144 dh->dqdh_prev_free = cpu_to_le32(0);
145 dh->dqdh_entries = cpu_to_le16(0);
146 err = write_blk(info, blk, buf);
149 info->dqi_free_blk = blk;
150 mark_info_dirty(info->dqi_sb, info->dqi_type);
154 /* Remove given block from the list of blocks with free entries */
155 static int remove_free_dqentry(struct qtree_mem_dqinfo *info, char *buf,
158 char *tmpbuf = kmalloc(info->dqi_usable_bs, GFP_NOFS);
159 struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf;
160 uint nextblk = le32_to_cpu(dh->dqdh_next_free);
161 uint prevblk = le32_to_cpu(dh->dqdh_prev_free);
167 err = read_blk(info, nextblk, tmpbuf);
170 ((struct qt_disk_dqdbheader *)tmpbuf)->dqdh_prev_free =
172 err = write_blk(info, nextblk, tmpbuf);
177 err = read_blk(info, prevblk, tmpbuf);
180 ((struct qt_disk_dqdbheader *)tmpbuf)->dqdh_next_free =
182 err = write_blk(info, prevblk, tmpbuf);
186 info->dqi_free_entry = nextblk;
187 mark_info_dirty(info->dqi_sb, info->dqi_type);
190 dh->dqdh_next_free = dh->dqdh_prev_free = cpu_to_le32(0);
191 /* No matter whether write succeeds block is out of list */
192 if (write_blk(info, blk, buf) < 0)
193 quota_error(info->dqi_sb, "Can't write block (%u) "
194 "with free entries", blk);
201 /* Insert given block to the beginning of list with free entries */
202 static int insert_free_dqentry(struct qtree_mem_dqinfo *info, char *buf,
205 char *tmpbuf = kmalloc(info->dqi_usable_bs, GFP_NOFS);
206 struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf;
211 dh->dqdh_next_free = cpu_to_le32(info->dqi_free_entry);
212 dh->dqdh_prev_free = cpu_to_le32(0);
213 err = write_blk(info, blk, buf);
216 if (info->dqi_free_entry) {
217 err = read_blk(info, info->dqi_free_entry, tmpbuf);
220 ((struct qt_disk_dqdbheader *)tmpbuf)->dqdh_prev_free =
222 err = write_blk(info, info->dqi_free_entry, tmpbuf);
227 info->dqi_free_entry = blk;
228 mark_info_dirty(info->dqi_sb, info->dqi_type);
235 /* Is the entry in the block free? */
236 int qtree_entry_unused(struct qtree_mem_dqinfo *info, char *disk)
240 for (i = 0; i < info->dqi_entry_size; i++)
245 EXPORT_SYMBOL(qtree_entry_unused);
247 /* Find space for dquot */
248 static uint find_free_dqentry(struct qtree_mem_dqinfo *info,
249 struct dquot *dquot, int *err)
252 struct qt_disk_dqdbheader *dh;
253 char *buf = kmalloc(info->dqi_usable_bs, GFP_NOFS);
261 dh = (struct qt_disk_dqdbheader *)buf;
262 if (info->dqi_free_entry) {
263 blk = info->dqi_free_entry;
264 *err = read_blk(info, blk, buf);
267 *err = check_dquot_block_header(info, dh);
271 blk = get_free_dqblk(info);
277 memset(buf, 0, info->dqi_usable_bs);
278 /* This is enough as the block is already zeroed and the entry
279 * list is empty... */
280 info->dqi_free_entry = blk;
281 mark_info_dirty(dquot->dq_sb, dquot->dq_id.type);
283 /* Block will be full? */
284 if (le16_to_cpu(dh->dqdh_entries) + 1 >= qtree_dqstr_in_blk(info)) {
285 *err = remove_free_dqentry(info, buf, blk);
287 quota_error(dquot->dq_sb, "Can't remove block (%u) "
288 "from entry free list", blk);
292 le16_add_cpu(&dh->dqdh_entries, 1);
293 /* Find free structure in block */
294 ddquot = buf + sizeof(struct qt_disk_dqdbheader);
295 for (i = 0; i < qtree_dqstr_in_blk(info); i++) {
296 if (qtree_entry_unused(info, ddquot))
298 ddquot += info->dqi_entry_size;
300 #ifdef __QUOTA_QT_PARANOIA
301 if (i == qtree_dqstr_in_blk(info)) {
302 quota_error(dquot->dq_sb, "Data block full but it shouldn't");
307 *err = write_blk(info, blk, buf);
309 quota_error(dquot->dq_sb, "Can't write quota data block %u",
313 dquot->dq_off = ((loff_t)blk << info->dqi_blocksize_bits) +
314 sizeof(struct qt_disk_dqdbheader) +
315 i * info->dqi_entry_size;
323 /* Insert reference to structure into the trie */
324 static int do_insert_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot,
325 uint *treeblk, int depth)
327 char *buf = kmalloc(info->dqi_usable_bs, GFP_NOFS);
328 int ret = 0, newson = 0, newact = 0;
335 ret = get_free_dqblk(info);
339 memset(buf, 0, info->dqi_usable_bs);
342 ret = read_blk(info, *treeblk, buf);
344 quota_error(dquot->dq_sb, "Can't read tree quota "
345 "block %u", *treeblk);
350 newblk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]);
353 if (depth == info->dqi_qtree_depth - 1) {
354 #ifdef __QUOTA_QT_PARANOIA
356 quota_error(dquot->dq_sb, "Inserting already present "
357 "quota entry (block %u)",
358 le32_to_cpu(ref[get_index(info,
359 dquot->dq_id, depth)]));
364 newblk = find_free_dqentry(info, dquot, &ret);
366 ret = do_insert_tree(info, dquot, &newblk, depth+1);
368 if (newson && ret >= 0) {
369 ref[get_index(info, dquot->dq_id, depth)] =
371 ret = write_blk(info, *treeblk, buf);
372 } else if (newact && ret < 0) {
373 put_free_dqblk(info, buf, *treeblk);
380 /* Wrapper for inserting quota structure into tree */
381 static inline int dq_insert_tree(struct qtree_mem_dqinfo *info,
384 int tmp = QT_TREEOFF;
386 #ifdef __QUOTA_QT_PARANOIA
387 if (info->dqi_blocks <= QT_TREEOFF) {
388 quota_error(dquot->dq_sb, "Quota tree root isn't allocated!");
392 return do_insert_tree(info, dquot, &tmp, 0);
396 * We don't have to be afraid of deadlocks as we never have quotas on quota
399 int qtree_write_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
401 int type = dquot->dq_id.type;
402 struct super_block *sb = dquot->dq_sb;
404 char *ddquot = kmalloc(info->dqi_entry_size, GFP_NOFS);
409 /* dq_off is guarded by dqio_sem */
410 if (!dquot->dq_off) {
411 ret = dq_insert_tree(info, dquot);
413 quota_error(sb, "Error %zd occurred while creating "
419 spin_lock(&dquot->dq_dqb_lock);
420 info->dqi_ops->mem2disk_dqblk(ddquot, dquot);
421 spin_unlock(&dquot->dq_dqb_lock);
422 ret = sb->s_op->quota_write(sb, type, ddquot, info->dqi_entry_size,
424 if (ret != info->dqi_entry_size) {
425 quota_error(sb, "dquota write failed");
431 dqstats_inc(DQST_WRITES);
436 EXPORT_SYMBOL(qtree_write_dquot);
438 /* Free dquot entry in data block */
439 static int free_dqentry(struct qtree_mem_dqinfo *info, struct dquot *dquot,
442 struct qt_disk_dqdbheader *dh;
443 char *buf = kmalloc(info->dqi_usable_bs, GFP_NOFS);
448 if (dquot->dq_off >> info->dqi_blocksize_bits != blk) {
449 quota_error(dquot->dq_sb, "Quota structure has offset to "
450 "other block (%u) than it should (%u)", blk,
451 (uint)(dquot->dq_off >> info->dqi_blocksize_bits));
455 ret = read_blk(info, blk, buf);
457 quota_error(dquot->dq_sb, "Can't read quota data block %u",
461 dh = (struct qt_disk_dqdbheader *)buf;
462 ret = check_dquot_block_header(info, dh);
465 le16_add_cpu(&dh->dqdh_entries, -1);
466 if (!le16_to_cpu(dh->dqdh_entries)) { /* Block got free? */
467 ret = remove_free_dqentry(info, buf, blk);
469 ret = put_free_dqblk(info, buf, blk);
471 quota_error(dquot->dq_sb, "Can't move quota data block "
472 "(%u) to free list", blk);
477 (dquot->dq_off & ((1 << info->dqi_blocksize_bits) - 1)),
478 0, info->dqi_entry_size);
479 if (le16_to_cpu(dh->dqdh_entries) ==
480 qtree_dqstr_in_blk(info) - 1) {
481 /* Insert will write block itself */
482 ret = insert_free_dqentry(info, buf, blk);
484 quota_error(dquot->dq_sb, "Can't insert quota "
485 "data block (%u) to free entry list", blk);
489 ret = write_blk(info, blk, buf);
491 quota_error(dquot->dq_sb, "Can't write quota "
492 "data block %u", blk);
497 dquot->dq_off = 0; /* Quota is now unattached */
503 /* Remove reference to dquot from tree */
504 static int remove_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot,
505 uint *blk, int depth)
507 char *buf = kmalloc(info->dqi_usable_bs, GFP_NOFS);
510 __le32 *ref = (__le32 *)buf;
514 ret = read_blk(info, *blk, buf);
516 quota_error(dquot->dq_sb, "Can't read quota data block %u",
520 newblk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]);
521 if (newblk < QT_TREEOFF || newblk >= info->dqi_blocks) {
522 quota_error(dquot->dq_sb, "Getting block too big (%u >= %u)",
523 newblk, info->dqi_blocks);
528 if (depth == info->dqi_qtree_depth - 1) {
529 ret = free_dqentry(info, dquot, newblk);
532 ret = remove_tree(info, dquot, &newblk, depth+1);
534 if (ret >= 0 && !newblk) {
536 ref[get_index(info, dquot->dq_id, depth)] = cpu_to_le32(0);
537 /* Block got empty? */
538 for (i = 0; i < (info->dqi_usable_bs >> 2) && !ref[i]; i++)
540 /* Don't put the root block into the free block list */
541 if (i == (info->dqi_usable_bs >> 2)
542 && *blk != QT_TREEOFF) {
543 put_free_dqblk(info, buf, *blk);
546 ret = write_blk(info, *blk, buf);
548 quota_error(dquot->dq_sb,
549 "Can't write quota tree block %u",
558 /* Delete dquot from tree */
559 int qtree_delete_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
561 uint tmp = QT_TREEOFF;
563 if (!dquot->dq_off) /* Even not allocated? */
565 return remove_tree(info, dquot, &tmp, 0);
567 EXPORT_SYMBOL(qtree_delete_dquot);
569 /* Find entry in block */
570 static loff_t find_block_dqentry(struct qtree_mem_dqinfo *info,
571 struct dquot *dquot, uint blk)
573 char *buf = kmalloc(info->dqi_usable_bs, GFP_NOFS);
580 ret = read_blk(info, blk, buf);
582 quota_error(dquot->dq_sb, "Can't read quota tree "
586 ddquot = buf + sizeof(struct qt_disk_dqdbheader);
587 for (i = 0; i < qtree_dqstr_in_blk(info); i++) {
588 if (info->dqi_ops->is_id(ddquot, dquot))
590 ddquot += info->dqi_entry_size;
592 if (i == qtree_dqstr_in_blk(info)) {
593 quota_error(dquot->dq_sb,
594 "Quota for id %u referenced but not present",
595 from_kqid(&init_user_ns, dquot->dq_id));
599 ret = ((loff_t)blk << info->dqi_blocksize_bits) + sizeof(struct
600 qt_disk_dqdbheader) + i * info->dqi_entry_size;
607 /* Find entry for given id in the tree */
608 static loff_t find_tree_dqentry(struct qtree_mem_dqinfo *info,
609 struct dquot *dquot, uint blk, int depth)
611 char *buf = kmalloc(info->dqi_usable_bs, GFP_NOFS);
613 __le32 *ref = (__le32 *)buf;
617 ret = read_blk(info, blk, buf);
619 quota_error(dquot->dq_sb, "Can't read quota tree block %u",
624 blk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]);
625 if (!blk) /* No reference? */
627 if (blk < QT_TREEOFF || blk >= info->dqi_blocks) {
628 quota_error(dquot->dq_sb, "Getting block too big (%u >= %u)",
629 blk, info->dqi_blocks);
634 if (depth < info->dqi_qtree_depth - 1)
635 ret = find_tree_dqentry(info, dquot, blk, depth+1);
637 ret = find_block_dqentry(info, dquot, blk);
643 /* Find entry for given id in the tree - wrapper function */
644 static inline loff_t find_dqentry(struct qtree_mem_dqinfo *info,
647 return find_tree_dqentry(info, dquot, QT_TREEOFF, 0);
650 int qtree_read_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
652 int type = dquot->dq_id.type;
653 struct super_block *sb = dquot->dq_sb;
658 #ifdef __QUOTA_QT_PARANOIA
659 /* Invalidated quota? */
660 if (!sb_dqopt(dquot->dq_sb)->files[type]) {
661 quota_error(sb, "Quota invalidated while reading!");
665 /* Do we know offset of the dquot entry in the quota file? */
666 if (!dquot->dq_off) {
667 offset = find_dqentry(info, dquot);
668 if (offset <= 0) { /* Entry not present? */
670 quota_error(sb,"Can't read quota structure "
672 from_kqid(&init_user_ns,
675 set_bit(DQ_FAKE_B, &dquot->dq_flags);
676 memset(&dquot->dq_dqb, 0, sizeof(struct mem_dqblk));
680 dquot->dq_off = offset;
682 ddquot = kmalloc(info->dqi_entry_size, GFP_NOFS);
685 ret = sb->s_op->quota_read(sb, type, ddquot, info->dqi_entry_size,
687 if (ret != info->dqi_entry_size) {
690 quota_error(sb, "Error while reading quota structure for id %u",
691 from_kqid(&init_user_ns, dquot->dq_id));
692 set_bit(DQ_FAKE_B, &dquot->dq_flags);
693 memset(&dquot->dq_dqb, 0, sizeof(struct mem_dqblk));
697 spin_lock(&dquot->dq_dqb_lock);
698 info->dqi_ops->disk2mem_dqblk(dquot, ddquot);
699 if (!dquot->dq_dqb.dqb_bhardlimit &&
700 !dquot->dq_dqb.dqb_bsoftlimit &&
701 !dquot->dq_dqb.dqb_ihardlimit &&
702 !dquot->dq_dqb.dqb_isoftlimit)
703 set_bit(DQ_FAKE_B, &dquot->dq_flags);
704 spin_unlock(&dquot->dq_dqb_lock);
707 dqstats_inc(DQST_READS);
710 EXPORT_SYMBOL(qtree_read_dquot);
712 /* Check whether dquot should not be deleted. We know we are
713 * the only one operating on dquot (thanks to dq_lock) */
714 int qtree_release_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
716 if (test_bit(DQ_FAKE_B, &dquot->dq_flags) &&
717 !(dquot->dq_dqb.dqb_curinodes | dquot->dq_dqb.dqb_curspace))
718 return qtree_delete_dquot(info, dquot);
721 EXPORT_SYMBOL(qtree_release_dquot);
723 static int find_next_id(struct qtree_mem_dqinfo *info, qid_t *id,
724 unsigned int blk, int depth)
726 char *buf = kmalloc(info->dqi_usable_bs, GFP_NOFS);
727 __le32 *ref = (__le32 *)buf;
729 unsigned int epb = info->dqi_usable_bs >> 2;
730 unsigned int level_inc = 1;
736 for (i = depth; i < info->dqi_qtree_depth - 1; i++)
739 ret = read_blk(info, blk, buf);
741 quota_error(info->dqi_sb,
742 "Can't read quota tree block %u", blk);
745 for (i = __get_index(info, *id, depth); i < epb; i++) {
746 if (ref[i] == cpu_to_le32(0)) {
750 if (depth == info->dqi_qtree_depth - 1) {
754 ret = find_next_id(info, id, le32_to_cpu(ref[i]), depth + 1);
767 int qtree_get_next_id(struct qtree_mem_dqinfo *info, struct kqid *qid)
769 qid_t id = from_kqid(&init_user_ns, *qid);
772 ret = find_next_id(info, &id, QT_TREEOFF, 0);
775 *qid = make_kqid(&init_user_ns, qid->type, id);
778 EXPORT_SYMBOL(qtree_get_next_id);