1 // SPDX-License-Identifier: GPL-2.0+
3 * cpfile.c - NILFS checkpoint file.
5 * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
7 * Written by Koji Sato.
10 #include <linux/kernel.h>
12 #include <linux/string.h>
13 #include <linux/buffer_head.h>
14 #include <linux/errno.h>
19 static inline unsigned long
20 nilfs_cpfile_checkpoints_per_block(const struct inode *cpfile)
22 return NILFS_MDT(cpfile)->mi_entries_per_block;
25 /* block number from the beginning of the file */
27 nilfs_cpfile_get_blkoff(const struct inode *cpfile, __u64 cno)
29 __u64 tcno = cno + NILFS_MDT(cpfile)->mi_first_entry_offset - 1;
31 do_div(tcno, nilfs_cpfile_checkpoints_per_block(cpfile));
32 return (unsigned long)tcno;
37 nilfs_cpfile_get_offset(const struct inode *cpfile, __u64 cno)
39 __u64 tcno = cno + NILFS_MDT(cpfile)->mi_first_entry_offset - 1;
41 return do_div(tcno, nilfs_cpfile_checkpoints_per_block(cpfile));
44 static __u64 nilfs_cpfile_first_checkpoint_in_block(const struct inode *cpfile,
47 return (__u64)nilfs_cpfile_checkpoints_per_block(cpfile) * blkoff
48 + 1 - NILFS_MDT(cpfile)->mi_first_entry_offset;
52 nilfs_cpfile_checkpoints_in_block(const struct inode *cpfile,
57 nilfs_cpfile_checkpoints_per_block(cpfile) -
58 nilfs_cpfile_get_offset(cpfile, curr),
62 static inline int nilfs_cpfile_is_in_first(const struct inode *cpfile,
65 return nilfs_cpfile_get_blkoff(cpfile, cno) == 0;
69 nilfs_cpfile_block_add_valid_checkpoints(const struct inode *cpfile,
70 struct buffer_head *bh,
74 struct nilfs_checkpoint *cp = kaddr + bh_offset(bh);
77 count = le32_to_cpu(cp->cp_checkpoints_count) + n;
78 cp->cp_checkpoints_count = cpu_to_le32(count);
83 nilfs_cpfile_block_sub_valid_checkpoints(const struct inode *cpfile,
84 struct buffer_head *bh,
88 struct nilfs_checkpoint *cp = kaddr + bh_offset(bh);
91 WARN_ON(le32_to_cpu(cp->cp_checkpoints_count) < n);
92 count = le32_to_cpu(cp->cp_checkpoints_count) - n;
93 cp->cp_checkpoints_count = cpu_to_le32(count);
97 static inline struct nilfs_cpfile_header *
98 nilfs_cpfile_block_get_header(const struct inode *cpfile,
99 struct buffer_head *bh,
102 return kaddr + bh_offset(bh);
105 static struct nilfs_checkpoint *
106 nilfs_cpfile_block_get_checkpoint(const struct inode *cpfile, __u64 cno,
107 struct buffer_head *bh,
110 return kaddr + bh_offset(bh) + nilfs_cpfile_get_offset(cpfile, cno) *
111 NILFS_MDT(cpfile)->mi_entry_size;
114 static void nilfs_cpfile_block_init(struct inode *cpfile,
115 struct buffer_head *bh,
118 struct nilfs_checkpoint *cp = kaddr + bh_offset(bh);
119 size_t cpsz = NILFS_MDT(cpfile)->mi_entry_size;
120 int n = nilfs_cpfile_checkpoints_per_block(cpfile);
123 nilfs_checkpoint_set_invalid(cp);
124 cp = (void *)cp + cpsz;
128 static inline int nilfs_cpfile_get_header_block(struct inode *cpfile,
129 struct buffer_head **bhp)
131 return nilfs_mdt_get_block(cpfile, 0, 0, NULL, bhp);
134 static inline int nilfs_cpfile_get_checkpoint_block(struct inode *cpfile,
137 struct buffer_head **bhp)
139 return nilfs_mdt_get_block(cpfile,
140 nilfs_cpfile_get_blkoff(cpfile, cno),
141 create, nilfs_cpfile_block_init, bhp);
145 * nilfs_cpfile_find_checkpoint_block - find and get a buffer on cpfile
146 * @cpfile: inode of cpfile
147 * @start_cno: start checkpoint number (inclusive)
148 * @end_cno: end checkpoint number (inclusive)
149 * @cnop: place to store the next checkpoint number
150 * @bhp: place to store a pointer to buffer_head struct
152 * Return Value: On success, it returns 0. On error, the following negative
153 * error code is returned.
155 * %-ENOMEM - Insufficient memory available.
159 * %-ENOENT - no block exists in the range.
161 static int nilfs_cpfile_find_checkpoint_block(struct inode *cpfile,
162 __u64 start_cno, __u64 end_cno,
164 struct buffer_head **bhp)
166 unsigned long start, end, blkoff;
169 if (unlikely(start_cno > end_cno))
172 start = nilfs_cpfile_get_blkoff(cpfile, start_cno);
173 end = nilfs_cpfile_get_blkoff(cpfile, end_cno);
175 ret = nilfs_mdt_find_block(cpfile, start, end, &blkoff, bhp);
177 *cnop = (blkoff == start) ? start_cno :
178 nilfs_cpfile_first_checkpoint_in_block(cpfile, blkoff);
182 static inline int nilfs_cpfile_delete_checkpoint_block(struct inode *cpfile,
185 return nilfs_mdt_delete_block(cpfile,
186 nilfs_cpfile_get_blkoff(cpfile, cno));
190 * nilfs_cpfile_get_checkpoint - get a checkpoint
191 * @cpfile: inode of checkpoint file
192 * @cno: checkpoint number
193 * @create: create flag
194 * @cpp: pointer to a checkpoint
195 * @bhp: pointer to a buffer head
197 * Description: nilfs_cpfile_get_checkpoint() acquires the checkpoint
198 * specified by @cno. A new checkpoint will be created if @cno is the current
199 * checkpoint number and @create is nonzero.
201 * Return Value: On success, 0 is returned, and the checkpoint and the
202 * buffer head of the buffer on which the checkpoint is located are stored in
203 * the place pointed by @cpp and @bhp, respectively. On error, one of the
204 * following negative error codes is returned.
208 * %-ENOMEM - Insufficient amount of memory available.
210 * %-ENOENT - No such checkpoint.
212 * %-EINVAL - invalid checkpoint.
214 int nilfs_cpfile_get_checkpoint(struct inode *cpfile,
217 struct nilfs_checkpoint **cpp,
218 struct buffer_head **bhp)
220 struct buffer_head *header_bh, *cp_bh;
221 struct nilfs_cpfile_header *header;
222 struct nilfs_checkpoint *cp;
226 if (unlikely(cno < 1 || cno > nilfs_mdt_cno(cpfile) ||
227 (cno < nilfs_mdt_cno(cpfile) && create)))
230 down_write(&NILFS_MDT(cpfile)->mi_sem);
232 ret = nilfs_cpfile_get_header_block(cpfile, &header_bh);
235 ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, create, &cp_bh);
238 kaddr = kmap(cp_bh->b_page);
239 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr);
240 if (nilfs_checkpoint_invalid(cp)) {
242 kunmap(cp_bh->b_page);
247 /* a newly-created checkpoint */
248 nilfs_checkpoint_clear_invalid(cp);
249 if (!nilfs_cpfile_is_in_first(cpfile, cno))
250 nilfs_cpfile_block_add_valid_checkpoints(cpfile, cp_bh,
252 mark_buffer_dirty(cp_bh);
254 kaddr = kmap_atomic(header_bh->b_page);
255 header = nilfs_cpfile_block_get_header(cpfile, header_bh,
257 le64_add_cpu(&header->ch_ncheckpoints, 1);
258 kunmap_atomic(kaddr);
259 mark_buffer_dirty(header_bh);
260 nilfs_mdt_mark_dirty(cpfile);
271 up_write(&NILFS_MDT(cpfile)->mi_sem);
276 * nilfs_cpfile_put_checkpoint - put a checkpoint
277 * @cpfile: inode of checkpoint file
278 * @cno: checkpoint number
281 * Description: nilfs_cpfile_put_checkpoint() releases the checkpoint
282 * specified by @cno. @bh must be the buffer head which has been returned by
283 * a previous call to nilfs_cpfile_get_checkpoint() with @cno.
285 void nilfs_cpfile_put_checkpoint(struct inode *cpfile, __u64 cno,
286 struct buffer_head *bh)
293 * nilfs_cpfile_delete_checkpoints - delete checkpoints
294 * @cpfile: inode of checkpoint file
295 * @start: start checkpoint number
296 * @end: end checkpoint numer
298 * Description: nilfs_cpfile_delete_checkpoints() deletes the checkpoints in
299 * the period from @start to @end, excluding @end itself. The checkpoints
300 * which have been already deleted are ignored.
302 * Return Value: On success, 0 is returned. On error, one of the following
303 * negative error codes is returned.
307 * %-ENOMEM - Insufficient amount of memory available.
309 * %-EINVAL - invalid checkpoints.
311 int nilfs_cpfile_delete_checkpoints(struct inode *cpfile,
315 struct buffer_head *header_bh, *cp_bh;
316 struct nilfs_cpfile_header *header;
317 struct nilfs_checkpoint *cp;
318 size_t cpsz = NILFS_MDT(cpfile)->mi_entry_size;
321 unsigned long tnicps;
322 int ret, ncps, nicps, nss, count, i;
324 if (unlikely(start == 0 || start > end)) {
325 nilfs_msg(cpfile->i_sb, KERN_ERR,
326 "cannot delete checkpoints: invalid range [%llu, %llu)",
327 (unsigned long long)start, (unsigned long long)end);
331 down_write(&NILFS_MDT(cpfile)->mi_sem);
333 ret = nilfs_cpfile_get_header_block(cpfile, &header_bh);
339 for (cno = start; cno < end; cno += ncps) {
340 ncps = nilfs_cpfile_checkpoints_in_block(cpfile, cno, end);
341 ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &cp_bh);
350 kaddr = kmap_atomic(cp_bh->b_page);
351 cp = nilfs_cpfile_block_get_checkpoint(
352 cpfile, cno, cp_bh, kaddr);
354 for (i = 0; i < ncps; i++, cp = (void *)cp + cpsz) {
355 if (nilfs_checkpoint_snapshot(cp)) {
357 } else if (!nilfs_checkpoint_invalid(cp)) {
358 nilfs_checkpoint_set_invalid(cp);
364 mark_buffer_dirty(cp_bh);
365 nilfs_mdt_mark_dirty(cpfile);
366 if (!nilfs_cpfile_is_in_first(cpfile, cno)) {
368 nilfs_cpfile_block_sub_valid_checkpoints(
369 cpfile, cp_bh, kaddr, nicps);
372 kunmap_atomic(kaddr);
375 nilfs_cpfile_delete_checkpoint_block(
379 nilfs_msg(cpfile->i_sb, KERN_ERR,
380 "error %d deleting checkpoint block",
387 kunmap_atomic(kaddr);
392 kaddr = kmap_atomic(header_bh->b_page);
393 header = nilfs_cpfile_block_get_header(cpfile, header_bh,
395 le64_add_cpu(&header->ch_ncheckpoints, -(u64)tnicps);
396 mark_buffer_dirty(header_bh);
397 nilfs_mdt_mark_dirty(cpfile);
398 kunmap_atomic(kaddr);
406 up_write(&NILFS_MDT(cpfile)->mi_sem);
410 static void nilfs_cpfile_checkpoint_to_cpinfo(struct inode *cpfile,
411 struct nilfs_checkpoint *cp,
412 struct nilfs_cpinfo *ci)
414 ci->ci_flags = le32_to_cpu(cp->cp_flags);
415 ci->ci_cno = le64_to_cpu(cp->cp_cno);
416 ci->ci_create = le64_to_cpu(cp->cp_create);
417 ci->ci_nblk_inc = le64_to_cpu(cp->cp_nblk_inc);
418 ci->ci_inodes_count = le64_to_cpu(cp->cp_inodes_count);
419 ci->ci_blocks_count = le64_to_cpu(cp->cp_blocks_count);
420 ci->ci_next = le64_to_cpu(cp->cp_snapshot_list.ssl_next);
423 static ssize_t nilfs_cpfile_do_get_cpinfo(struct inode *cpfile, __u64 *cnop,
424 void *buf, unsigned int cisz,
427 struct nilfs_checkpoint *cp;
428 struct nilfs_cpinfo *ci = buf;
429 struct buffer_head *bh;
430 size_t cpsz = NILFS_MDT(cpfile)->mi_entry_size;
431 __u64 cur_cno = nilfs_mdt_cno(cpfile), cno = *cnop;
437 return -ENOENT; /* checkpoint number 0 is invalid */
438 down_read(&NILFS_MDT(cpfile)->mi_sem);
440 for (n = 0; n < nci; cno += ncps) {
441 ret = nilfs_cpfile_find_checkpoint_block(
442 cpfile, cno, cur_cno - 1, &cno, &bh);
444 if (likely(ret == -ENOENT))
448 ncps = nilfs_cpfile_checkpoints_in_block(cpfile, cno, cur_cno);
450 kaddr = kmap_atomic(bh->b_page);
451 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, bh, kaddr);
452 for (i = 0; i < ncps && n < nci; i++, cp = (void *)cp + cpsz) {
453 if (!nilfs_checkpoint_invalid(cp)) {
454 nilfs_cpfile_checkpoint_to_cpinfo(cpfile, cp,
456 ci = (void *)ci + cisz;
460 kunmap_atomic(kaddr);
466 ci = (void *)ci - cisz;
467 *cnop = ci->ci_cno + 1;
471 up_read(&NILFS_MDT(cpfile)->mi_sem);
475 static ssize_t nilfs_cpfile_do_get_ssinfo(struct inode *cpfile, __u64 *cnop,
476 void *buf, unsigned int cisz,
479 struct buffer_head *bh;
480 struct nilfs_cpfile_header *header;
481 struct nilfs_checkpoint *cp;
482 struct nilfs_cpinfo *ci = buf;
483 __u64 curr = *cnop, next;
484 unsigned long curr_blkoff, next_blkoff;
488 down_read(&NILFS_MDT(cpfile)->mi_sem);
491 ret = nilfs_cpfile_get_header_block(cpfile, &bh);
494 kaddr = kmap_atomic(bh->b_page);
495 header = nilfs_cpfile_block_get_header(cpfile, bh, kaddr);
496 curr = le64_to_cpu(header->ch_snapshot_list.ssl_next);
497 kunmap_atomic(kaddr);
503 } else if (unlikely(curr == ~(__u64)0)) {
508 curr_blkoff = nilfs_cpfile_get_blkoff(cpfile, curr);
509 ret = nilfs_cpfile_get_checkpoint_block(cpfile, curr, 0, &bh);
510 if (unlikely(ret < 0)) {
512 ret = 0; /* No snapshots (started from a hole block) */
515 kaddr = kmap_atomic(bh->b_page);
517 cp = nilfs_cpfile_block_get_checkpoint(cpfile, curr, bh, kaddr);
518 curr = ~(__u64)0; /* Terminator */
519 if (unlikely(nilfs_checkpoint_invalid(cp) ||
520 !nilfs_checkpoint_snapshot(cp)))
522 nilfs_cpfile_checkpoint_to_cpinfo(cpfile, cp, ci);
523 ci = (void *)ci + cisz;
525 next = le64_to_cpu(cp->cp_snapshot_list.ssl_next);
527 break; /* reach end of the snapshot list */
529 next_blkoff = nilfs_cpfile_get_blkoff(cpfile, next);
530 if (curr_blkoff != next_blkoff) {
531 kunmap_atomic(kaddr);
533 ret = nilfs_cpfile_get_checkpoint_block(cpfile, next,
535 if (unlikely(ret < 0)) {
536 WARN_ON(ret == -ENOENT);
539 kaddr = kmap_atomic(bh->b_page);
542 curr_blkoff = next_blkoff;
544 kunmap_atomic(kaddr);
550 up_read(&NILFS_MDT(cpfile)->mi_sem);
555 * nilfs_cpfile_get_cpinfo -
562 ssize_t nilfs_cpfile_get_cpinfo(struct inode *cpfile, __u64 *cnop, int mode,
563 void *buf, unsigned int cisz, size_t nci)
566 case NILFS_CHECKPOINT:
567 return nilfs_cpfile_do_get_cpinfo(cpfile, cnop, buf, cisz, nci);
569 return nilfs_cpfile_do_get_ssinfo(cpfile, cnop, buf, cisz, nci);
576 * nilfs_cpfile_delete_checkpoint -
580 int nilfs_cpfile_delete_checkpoint(struct inode *cpfile, __u64 cno)
582 struct nilfs_cpinfo ci;
586 nci = nilfs_cpfile_do_get_cpinfo(cpfile, &tcno, &ci, sizeof(ci), 1);
589 else if (nci == 0 || ci.ci_cno != cno)
591 else if (nilfs_cpinfo_snapshot(&ci))
594 return nilfs_cpfile_delete_checkpoints(cpfile, cno, cno + 1);
597 static struct nilfs_snapshot_list *
598 nilfs_cpfile_block_get_snapshot_list(const struct inode *cpfile,
600 struct buffer_head *bh,
603 struct nilfs_cpfile_header *header;
604 struct nilfs_checkpoint *cp;
605 struct nilfs_snapshot_list *list;
608 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, bh, kaddr);
609 list = &cp->cp_snapshot_list;
611 header = nilfs_cpfile_block_get_header(cpfile, bh, kaddr);
612 list = &header->ch_snapshot_list;
617 static int nilfs_cpfile_set_snapshot(struct inode *cpfile, __u64 cno)
619 struct buffer_head *header_bh, *curr_bh, *prev_bh, *cp_bh;
620 struct nilfs_cpfile_header *header;
621 struct nilfs_checkpoint *cp;
622 struct nilfs_snapshot_list *list;
624 unsigned long curr_blkoff, prev_blkoff;
629 return -ENOENT; /* checkpoint number 0 is invalid */
630 down_write(&NILFS_MDT(cpfile)->mi_sem);
632 ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &cp_bh);
635 kaddr = kmap_atomic(cp_bh->b_page);
636 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr);
637 if (nilfs_checkpoint_invalid(cp)) {
639 kunmap_atomic(kaddr);
642 if (nilfs_checkpoint_snapshot(cp)) {
644 kunmap_atomic(kaddr);
647 kunmap_atomic(kaddr);
649 ret = nilfs_cpfile_get_header_block(cpfile, &header_bh);
652 kaddr = kmap_atomic(header_bh->b_page);
653 header = nilfs_cpfile_block_get_header(cpfile, header_bh, kaddr);
654 list = &header->ch_snapshot_list;
659 prev = le64_to_cpu(list->ssl_prev);
661 prev_blkoff = nilfs_cpfile_get_blkoff(cpfile, prev);
663 if (curr_blkoff != prev_blkoff) {
664 kunmap_atomic(kaddr);
666 ret = nilfs_cpfile_get_checkpoint_block(cpfile, curr,
670 kaddr = kmap_atomic(curr_bh->b_page);
672 curr_blkoff = prev_blkoff;
673 cp = nilfs_cpfile_block_get_checkpoint(
674 cpfile, curr, curr_bh, kaddr);
675 list = &cp->cp_snapshot_list;
676 prev = le64_to_cpu(list->ssl_prev);
678 kunmap_atomic(kaddr);
681 ret = nilfs_cpfile_get_checkpoint_block(cpfile, prev, 0,
690 kaddr = kmap_atomic(curr_bh->b_page);
691 list = nilfs_cpfile_block_get_snapshot_list(
692 cpfile, curr, curr_bh, kaddr);
693 list->ssl_prev = cpu_to_le64(cno);
694 kunmap_atomic(kaddr);
696 kaddr = kmap_atomic(cp_bh->b_page);
697 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr);
698 cp->cp_snapshot_list.ssl_next = cpu_to_le64(curr);
699 cp->cp_snapshot_list.ssl_prev = cpu_to_le64(prev);
700 nilfs_checkpoint_set_snapshot(cp);
701 kunmap_atomic(kaddr);
703 kaddr = kmap_atomic(prev_bh->b_page);
704 list = nilfs_cpfile_block_get_snapshot_list(
705 cpfile, prev, prev_bh, kaddr);
706 list->ssl_next = cpu_to_le64(cno);
707 kunmap_atomic(kaddr);
709 kaddr = kmap_atomic(header_bh->b_page);
710 header = nilfs_cpfile_block_get_header(cpfile, header_bh, kaddr);
711 le64_add_cpu(&header->ch_nsnapshots, 1);
712 kunmap_atomic(kaddr);
714 mark_buffer_dirty(prev_bh);
715 mark_buffer_dirty(curr_bh);
716 mark_buffer_dirty(cp_bh);
717 mark_buffer_dirty(header_bh);
718 nilfs_mdt_mark_dirty(cpfile);
732 up_write(&NILFS_MDT(cpfile)->mi_sem);
736 static int nilfs_cpfile_clear_snapshot(struct inode *cpfile, __u64 cno)
738 struct buffer_head *header_bh, *next_bh, *prev_bh, *cp_bh;
739 struct nilfs_cpfile_header *header;
740 struct nilfs_checkpoint *cp;
741 struct nilfs_snapshot_list *list;
747 return -ENOENT; /* checkpoint number 0 is invalid */
748 down_write(&NILFS_MDT(cpfile)->mi_sem);
750 ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &cp_bh);
753 kaddr = kmap_atomic(cp_bh->b_page);
754 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr);
755 if (nilfs_checkpoint_invalid(cp)) {
757 kunmap_atomic(kaddr);
760 if (!nilfs_checkpoint_snapshot(cp)) {
762 kunmap_atomic(kaddr);
766 list = &cp->cp_snapshot_list;
767 next = le64_to_cpu(list->ssl_next);
768 prev = le64_to_cpu(list->ssl_prev);
769 kunmap_atomic(kaddr);
771 ret = nilfs_cpfile_get_header_block(cpfile, &header_bh);
775 ret = nilfs_cpfile_get_checkpoint_block(cpfile, next, 0,
784 ret = nilfs_cpfile_get_checkpoint_block(cpfile, prev, 0,
793 kaddr = kmap_atomic(next_bh->b_page);
794 list = nilfs_cpfile_block_get_snapshot_list(
795 cpfile, next, next_bh, kaddr);
796 list->ssl_prev = cpu_to_le64(prev);
797 kunmap_atomic(kaddr);
799 kaddr = kmap_atomic(prev_bh->b_page);
800 list = nilfs_cpfile_block_get_snapshot_list(
801 cpfile, prev, prev_bh, kaddr);
802 list->ssl_next = cpu_to_le64(next);
803 kunmap_atomic(kaddr);
805 kaddr = kmap_atomic(cp_bh->b_page);
806 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr);
807 cp->cp_snapshot_list.ssl_next = cpu_to_le64(0);
808 cp->cp_snapshot_list.ssl_prev = cpu_to_le64(0);
809 nilfs_checkpoint_clear_snapshot(cp);
810 kunmap_atomic(kaddr);
812 kaddr = kmap_atomic(header_bh->b_page);
813 header = nilfs_cpfile_block_get_header(cpfile, header_bh, kaddr);
814 le64_add_cpu(&header->ch_nsnapshots, -1);
815 kunmap_atomic(kaddr);
817 mark_buffer_dirty(next_bh);
818 mark_buffer_dirty(prev_bh);
819 mark_buffer_dirty(cp_bh);
820 mark_buffer_dirty(header_bh);
821 nilfs_mdt_mark_dirty(cpfile);
835 up_write(&NILFS_MDT(cpfile)->mi_sem);
840 * nilfs_cpfile_is_snapshot -
841 * @cpfile: inode of checkpoint file
842 * @cno: checkpoint number
846 * Return Value: On success, 1 is returned if the checkpoint specified by
847 * @cno is a snapshot, or 0 if not. On error, one of the following negative
848 * error codes is returned.
852 * %-ENOMEM - Insufficient amount of memory available.
854 * %-ENOENT - No such checkpoint.
856 int nilfs_cpfile_is_snapshot(struct inode *cpfile, __u64 cno)
858 struct buffer_head *bh;
859 struct nilfs_checkpoint *cp;
864 * CP number is invalid if it's zero or larger than the
865 * largest existing one.
867 if (cno == 0 || cno >= nilfs_mdt_cno(cpfile))
869 down_read(&NILFS_MDT(cpfile)->mi_sem);
871 ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &bh);
874 kaddr = kmap_atomic(bh->b_page);
875 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, bh, kaddr);
876 if (nilfs_checkpoint_invalid(cp))
879 ret = nilfs_checkpoint_snapshot(cp);
880 kunmap_atomic(kaddr);
884 up_read(&NILFS_MDT(cpfile)->mi_sem);
889 * nilfs_cpfile_change_cpmode - change checkpoint mode
890 * @cpfile: inode of checkpoint file
891 * @cno: checkpoint number
892 * @status: mode of checkpoint
894 * Description: nilfs_change_cpmode() changes the mode of the checkpoint
895 * specified by @cno. The mode @mode is NILFS_CHECKPOINT or NILFS_SNAPSHOT.
897 * Return Value: On success, 0 is returned. On error, one of the following
898 * negative error codes is returned.
902 * %-ENOMEM - Insufficient amount of memory available.
904 * %-ENOENT - No such checkpoint.
906 int nilfs_cpfile_change_cpmode(struct inode *cpfile, __u64 cno, int mode)
911 case NILFS_CHECKPOINT:
912 if (nilfs_checkpoint_is_mounted(cpfile->i_sb, cno))
914 * Current implementation does not have to protect
915 * plain read-only mounts since they are exclusive
916 * with a read/write mount and are protected from the
921 ret = nilfs_cpfile_clear_snapshot(cpfile, cno);
924 return nilfs_cpfile_set_snapshot(cpfile, cno);
931 * nilfs_cpfile_get_stat - get checkpoint statistics
932 * @cpfile: inode of checkpoint file
933 * @stat: pointer to a structure of checkpoint statistics
935 * Description: nilfs_cpfile_get_stat() returns information about checkpoints.
937 * Return Value: On success, 0 is returned, and checkpoints information is
938 * stored in the place pointed by @stat. On error, one of the following
939 * negative error codes is returned.
943 * %-ENOMEM - Insufficient amount of memory available.
945 int nilfs_cpfile_get_stat(struct inode *cpfile, struct nilfs_cpstat *cpstat)
947 struct buffer_head *bh;
948 struct nilfs_cpfile_header *header;
952 down_read(&NILFS_MDT(cpfile)->mi_sem);
954 ret = nilfs_cpfile_get_header_block(cpfile, &bh);
957 kaddr = kmap_atomic(bh->b_page);
958 header = nilfs_cpfile_block_get_header(cpfile, bh, kaddr);
959 cpstat->cs_cno = nilfs_mdt_cno(cpfile);
960 cpstat->cs_ncps = le64_to_cpu(header->ch_ncheckpoints);
961 cpstat->cs_nsss = le64_to_cpu(header->ch_nsnapshots);
962 kunmap_atomic(kaddr);
966 up_read(&NILFS_MDT(cpfile)->mi_sem);
971 * nilfs_cpfile_read - read or get cpfile inode
972 * @sb: super block instance
973 * @cpsize: size of a checkpoint entry
974 * @raw_inode: on-disk cpfile inode
975 * @inodep: buffer to store the inode
977 int nilfs_cpfile_read(struct super_block *sb, size_t cpsize,
978 struct nilfs_inode *raw_inode, struct inode **inodep)
980 struct inode *cpfile;
983 if (cpsize > sb->s_blocksize) {
984 nilfs_msg(sb, KERN_ERR,
985 "too large checkpoint size: %zu bytes", cpsize);
987 } else if (cpsize < NILFS_MIN_CHECKPOINT_SIZE) {
988 nilfs_msg(sb, KERN_ERR,
989 "too small checkpoint size: %zu bytes", cpsize);
993 cpfile = nilfs_iget_locked(sb, NULL, NILFS_CPFILE_INO);
994 if (unlikely(!cpfile))
996 if (!(cpfile->i_state & I_NEW))
999 err = nilfs_mdt_init(cpfile, NILFS_MDT_GFP, 0);
1003 nilfs_mdt_set_entry_size(cpfile, cpsize,
1004 sizeof(struct nilfs_cpfile_header));
1006 err = nilfs_read_inode_common(cpfile, raw_inode);
1010 unlock_new_inode(cpfile);
1015 iget_failed(cpfile);