1 // SPDX-License-Identifier: GPL-2.0
3 * Write ahead logging implementation copyright Chris Mason 2000
5 * The background commits make this code very interrelated, and
6 * overly complex. I need to rethink things a bit....The major players:
8 * journal_begin -- call with the number of blocks you expect to log.
9 * If the current transaction is too
10 * old, it will block until the current transaction is
11 * finished, and then start a new one.
12 * Usually, your transaction will get joined in with
13 * previous ones for speed.
15 * journal_join -- same as journal_begin, but won't block on the current
16 * transaction regardless of age. Don't ever call
17 * this. Ever. There are only two places it should be
18 * called from, and they are both inside this file.
20 * journal_mark_dirty -- adds blocks into this transaction. clears any flags
21 * that might make them get sent to disk
22 * and then marks them BH_JDirty. Puts the buffer head
23 * into the current transaction hash.
25 * journal_end -- if the current transaction is batchable, it does nothing
26 * otherwise, it could do an async/synchronous commit, or
27 * a full flush of all log and real blocks in the
30 * flush_old_commits -- if the current transaction is too old, it is ended and
31 * commit blocks are sent to disk. Forces commit blocks
32 * to disk for all backgrounded commits that have been
34 * -- Note, if you call this as an immediate flush from
35 * from within kupdate, it will ignore the immediate flag
38 #include <linux/time.h>
39 #include <linux/semaphore.h>
40 #include <linux/vmalloc.h>
42 #include <linux/kernel.h>
43 #include <linux/errno.h>
44 #include <linux/fcntl.h>
45 #include <linux/stat.h>
46 #include <linux/string.h>
47 #include <linux/buffer_head.h>
48 #include <linux/workqueue.h>
49 #include <linux/writeback.h>
50 #include <linux/blkdev.h>
51 #include <linux/backing-dev.h>
52 #include <linux/uaccess.h>
53 #include <linux/slab.h>
56 /* gets a struct reiserfs_journal_list * from a list head */
57 #define JOURNAL_LIST_ENTRY(h) (list_entry((h), struct reiserfs_journal_list, \
59 #define JOURNAL_WORK_ENTRY(h) (list_entry((h), struct reiserfs_journal_list, \
62 /* must be correct to keep the desc and commit structs at 4k */
63 #define JOURNAL_TRANS_HALF 1018
64 #define BUFNR 64 /*read ahead */
66 /* cnode stat bits. Move these into reiserfs_fs.h */
68 /* this block was freed, and can't be written. */
70 /* this block was freed during this transaction, and can't be written */
71 #define BLOCK_FREED_HOLDER 3
73 /* used in flush_journal_list */
74 #define BLOCK_NEEDS_FLUSH 4
75 #define BLOCK_DIRTIED 5
77 /* journal list state bits */
78 #define LIST_TOUCHED 1
80 #define LIST_COMMIT_PENDING 4 /* someone will commit this list */
82 /* flags for do_journal_end */
83 #define FLUSH_ALL 1 /* flush commit and real blocks */
84 #define COMMIT_NOW 2 /* end and commit this transaction */
85 #define WAIT 4 /* wait for the log blocks to hit the disk */
87 static int do_journal_end(struct reiserfs_transaction_handle *, int flags);
88 static int flush_journal_list(struct super_block *s,
89 struct reiserfs_journal_list *jl, int flushall);
90 static int flush_commit_list(struct super_block *s,
91 struct reiserfs_journal_list *jl, int flushall);
92 static int can_dirty(struct reiserfs_journal_cnode *cn);
93 static int journal_join(struct reiserfs_transaction_handle *th,
94 struct super_block *sb);
95 static void release_journal_dev(struct super_block *super,
96 struct reiserfs_journal *journal);
97 static int dirty_one_transaction(struct super_block *s,
98 struct reiserfs_journal_list *jl);
99 static void flush_async_commits(struct work_struct *work);
100 static void queue_log_writer(struct super_block *s);
102 /* values for join in do_journal_begin_r */
104 JBEGIN_REG = 0, /* regular journal begin */
105 /* join the running transaction if at all possible */
107 /* called from cleanup code, ignores aborted flag */
111 static int do_journal_begin_r(struct reiserfs_transaction_handle *th,
112 struct super_block *sb,
113 unsigned long nblocks, int join);
115 static void init_journal_hash(struct super_block *sb)
117 struct reiserfs_journal *journal = SB_JOURNAL(sb);
118 memset(journal->j_hash_table, 0,
119 JOURNAL_HASH_SIZE * sizeof(struct reiserfs_journal_cnode *));
123 * clears BH_Dirty and sticks the buffer on the clean list. Called because
124 * I can't allow refile_buffer to make schedule happen after I've freed a
125 * block. Look at remove_from_transaction and journal_mark_freed for
128 static int reiserfs_clean_and_file_buffer(struct buffer_head *bh)
131 clear_buffer_dirty(bh);
132 clear_buffer_journal_test(bh);
137 static struct reiserfs_bitmap_node *allocate_bitmap_node(struct super_block
140 struct reiserfs_bitmap_node *bn;
143 bn = kmalloc(sizeof(struct reiserfs_bitmap_node), GFP_NOFS);
147 bn->data = kzalloc(sb->s_blocksize, GFP_NOFS);
153 INIT_LIST_HEAD(&bn->list);
157 static struct reiserfs_bitmap_node *get_bitmap_node(struct super_block *sb)
159 struct reiserfs_journal *journal = SB_JOURNAL(sb);
160 struct reiserfs_bitmap_node *bn = NULL;
161 struct list_head *entry = journal->j_bitmap_nodes.next;
163 journal->j_used_bitmap_nodes++;
166 if (entry != &journal->j_bitmap_nodes) {
167 bn = list_entry(entry, struct reiserfs_bitmap_node, list);
169 memset(bn->data, 0, sb->s_blocksize);
170 journal->j_free_bitmap_nodes--;
173 bn = allocate_bitmap_node(sb);
180 static inline void free_bitmap_node(struct super_block *sb,
181 struct reiserfs_bitmap_node *bn)
183 struct reiserfs_journal *journal = SB_JOURNAL(sb);
184 journal->j_used_bitmap_nodes--;
185 if (journal->j_free_bitmap_nodes > REISERFS_MAX_BITMAP_NODES) {
189 list_add(&bn->list, &journal->j_bitmap_nodes);
190 journal->j_free_bitmap_nodes++;
194 static void allocate_bitmap_nodes(struct super_block *sb)
197 struct reiserfs_journal *journal = SB_JOURNAL(sb);
198 struct reiserfs_bitmap_node *bn = NULL;
199 for (i = 0; i < REISERFS_MIN_BITMAP_NODES; i++) {
200 bn = allocate_bitmap_node(sb);
202 list_add(&bn->list, &journal->j_bitmap_nodes);
203 journal->j_free_bitmap_nodes++;
205 /* this is ok, we'll try again when more are needed */
211 static int set_bit_in_list_bitmap(struct super_block *sb,
213 struct reiserfs_list_bitmap *jb)
215 unsigned int bmap_nr = block / (sb->s_blocksize << 3);
216 unsigned int bit_nr = block % (sb->s_blocksize << 3);
218 if (!jb->bitmaps[bmap_nr]) {
219 jb->bitmaps[bmap_nr] = get_bitmap_node(sb);
221 set_bit(bit_nr, (unsigned long *)jb->bitmaps[bmap_nr]->data);
225 static void cleanup_bitmap_list(struct super_block *sb,
226 struct reiserfs_list_bitmap *jb)
229 if (jb->bitmaps == NULL)
232 for (i = 0; i < reiserfs_bmap_count(sb); i++) {
233 if (jb->bitmaps[i]) {
234 free_bitmap_node(sb, jb->bitmaps[i]);
235 jb->bitmaps[i] = NULL;
241 * only call this on FS unmount.
243 static int free_list_bitmaps(struct super_block *sb,
244 struct reiserfs_list_bitmap *jb_array)
247 struct reiserfs_list_bitmap *jb;
248 for (i = 0; i < JOURNAL_NUM_BITMAPS; i++) {
250 jb->journal_list = NULL;
251 cleanup_bitmap_list(sb, jb);
258 static int free_bitmap_nodes(struct super_block *sb)
260 struct reiserfs_journal *journal = SB_JOURNAL(sb);
261 struct list_head *next = journal->j_bitmap_nodes.next;
262 struct reiserfs_bitmap_node *bn;
264 while (next != &journal->j_bitmap_nodes) {
265 bn = list_entry(next, struct reiserfs_bitmap_node, list);
269 next = journal->j_bitmap_nodes.next;
270 journal->j_free_bitmap_nodes--;
277 * get memory for JOURNAL_NUM_BITMAPS worth of bitmaps.
278 * jb_array is the array to be filled in.
280 int reiserfs_allocate_list_bitmaps(struct super_block *sb,
281 struct reiserfs_list_bitmap *jb_array,
282 unsigned int bmap_nr)
286 struct reiserfs_list_bitmap *jb;
287 int mem = bmap_nr * sizeof(struct reiserfs_bitmap_node *);
289 for (i = 0; i < JOURNAL_NUM_BITMAPS; i++) {
291 jb->journal_list = NULL;
292 jb->bitmaps = vzalloc(mem);
294 reiserfs_warning(sb, "clm-2000", "unable to "
295 "allocate bitmaps for journal lists");
301 free_list_bitmaps(sb, jb_array);
308 * find an available list bitmap. If you can't find one, flush a commit list
311 static struct reiserfs_list_bitmap *get_list_bitmap(struct super_block *sb,
312 struct reiserfs_journal_list
316 struct reiserfs_journal *journal = SB_JOURNAL(sb);
317 struct reiserfs_list_bitmap *jb = NULL;
319 for (j = 0; j < (JOURNAL_NUM_BITMAPS * 3); j++) {
320 i = journal->j_list_bitmap_index;
321 journal->j_list_bitmap_index = (i + 1) % JOURNAL_NUM_BITMAPS;
322 jb = journal->j_list_bitmap + i;
323 if (journal->j_list_bitmap[i].journal_list) {
324 flush_commit_list(sb,
325 journal->j_list_bitmap[i].
327 if (!journal->j_list_bitmap[i].journal_list) {
334 /* double check to make sure if flushed correctly */
335 if (jb->journal_list)
337 jb->journal_list = jl;
342 * allocates a new chunk of X nodes, and links them all together as a list.
343 * Uses the cnode->next and cnode->prev pointers
344 * returns NULL on failure
346 static struct reiserfs_journal_cnode *allocate_cnodes(int num_cnodes)
348 struct reiserfs_journal_cnode *head;
350 if (num_cnodes <= 0) {
353 head = vzalloc(num_cnodes * sizeof(struct reiserfs_journal_cnode));
358 head[0].next = head + 1;
359 for (i = 1; i < num_cnodes; i++) {
360 head[i].prev = head + (i - 1);
361 head[i].next = head + (i + 1); /* if last one, overwrite it after the if */
363 head[num_cnodes - 1].next = NULL;
367 /* pulls a cnode off the free list, or returns NULL on failure */
368 static struct reiserfs_journal_cnode *get_cnode(struct super_block *sb)
370 struct reiserfs_journal_cnode *cn;
371 struct reiserfs_journal *journal = SB_JOURNAL(sb);
373 reiserfs_check_lock_depth(sb, "get_cnode");
375 if (journal->j_cnode_free <= 0) {
378 journal->j_cnode_used++;
379 journal->j_cnode_free--;
380 cn = journal->j_cnode_free_list;
385 cn->next->prev = NULL;
387 journal->j_cnode_free_list = cn->next;
388 memset(cn, 0, sizeof(struct reiserfs_journal_cnode));
393 * returns a cnode to the free list
395 static void free_cnode(struct super_block *sb,
396 struct reiserfs_journal_cnode *cn)
398 struct reiserfs_journal *journal = SB_JOURNAL(sb);
400 reiserfs_check_lock_depth(sb, "free_cnode");
402 journal->j_cnode_used--;
403 journal->j_cnode_free++;
404 /* memset(cn, 0, sizeof(struct reiserfs_journal_cnode)) ; */
405 cn->next = journal->j_cnode_free_list;
406 if (journal->j_cnode_free_list) {
407 journal->j_cnode_free_list->prev = cn;
409 cn->prev = NULL; /* not needed with the memset, but I might kill the memset, and forget to do this */
410 journal->j_cnode_free_list = cn;
413 static void clear_prepared_bits(struct buffer_head *bh)
415 clear_buffer_journal_prepared(bh);
416 clear_buffer_journal_restore_dirty(bh);
420 * return a cnode with same dev, block number and size in table,
421 * or null if not found
423 static inline struct reiserfs_journal_cnode *get_journal_hash_dev(struct
427 reiserfs_journal_cnode
431 struct reiserfs_journal_cnode *cn;
432 cn = journal_hash(table, sb, bl);
434 if (cn->blocknr == bl && cn->sb == sb)
438 return (struct reiserfs_journal_cnode *)0;
442 * this actually means 'can this block be reallocated yet?'. If you set
443 * search_all, a block can only be allocated if it is not in the current
444 * transaction, was not freed by the current transaction, and has no chance
445 * of ever being overwritten by a replay after crashing.
447 * If you don't set search_all, a block can only be allocated if it is not
448 * in the current transaction. Since deleting a block removes it from the
449 * current transaction, this case should never happen. If you don't set
450 * search_all, make sure you never write the block without logging it.
452 * next_zero_bit is a suggestion about the next block to try for find_forward.
453 * when bl is rejected because it is set in a journal list bitmap, we search
454 * for the next zero bit in the bitmap that rejected bl. Then, we return
455 * that through next_zero_bit for find_forward to try.
457 * Just because we return something in next_zero_bit does not mean we won't
458 * reject it on the next call to reiserfs_in_journal
460 int reiserfs_in_journal(struct super_block *sb,
461 unsigned int bmap_nr, int bit_nr, int search_all,
462 b_blocknr_t * next_zero_bit)
464 struct reiserfs_journal *journal = SB_JOURNAL(sb);
465 struct reiserfs_journal_cnode *cn;
466 struct reiserfs_list_bitmap *jb;
470 *next_zero_bit = 0; /* always start this at zero. */
472 PROC_INFO_INC(sb, journal.in_journal);
474 * If we aren't doing a search_all, this is a metablock, and it
475 * will be logged before use. if we crash before the transaction
476 * that freed it commits, this transaction won't have committed
477 * either, and the block will never be written
480 for (i = 0; i < JOURNAL_NUM_BITMAPS; i++) {
481 PROC_INFO_INC(sb, journal.in_journal_bitmap);
482 jb = journal->j_list_bitmap + i;
483 if (jb->journal_list && jb->bitmaps[bmap_nr] &&
485 (unsigned long *)jb->bitmaps[bmap_nr]->
488 find_next_zero_bit((unsigned long *)
489 (jb->bitmaps[bmap_nr]->
491 sb->s_blocksize << 3,
498 bl = bmap_nr * (sb->s_blocksize << 3) + bit_nr;
499 /* is it in any old transactions? */
502 get_journal_hash_dev(sb, journal->j_list_hash_table, bl))) {
506 /* is it in the current transaction. This should never happen */
507 if ((cn = get_journal_hash_dev(sb, journal->j_hash_table, bl))) {
512 PROC_INFO_INC(sb, journal.in_journal_reusable);
517 /* insert cn into table */
518 static inline void insert_journal_hash(struct reiserfs_journal_cnode **table,
519 struct reiserfs_journal_cnode *cn)
521 struct reiserfs_journal_cnode *cn_orig;
523 cn_orig = journal_hash(table, cn->sb, cn->blocknr);
529 journal_hash(table, cn->sb, cn->blocknr) = cn;
532 /* lock the current transaction */
533 static inline void lock_journal(struct super_block *sb)
535 PROC_INFO_INC(sb, journal.lock_journal);
537 reiserfs_mutex_lock_safe(&SB_JOURNAL(sb)->j_mutex, sb);
540 /* unlock the current transaction */
541 static inline void unlock_journal(struct super_block *sb)
543 mutex_unlock(&SB_JOURNAL(sb)->j_mutex);
546 static inline void get_journal_list(struct reiserfs_journal_list *jl)
551 static inline void put_journal_list(struct super_block *s,
552 struct reiserfs_journal_list *jl)
554 if (jl->j_refcount < 1) {
555 reiserfs_panic(s, "journal-2", "trans id %u, refcount at %d",
556 jl->j_trans_id, jl->j_refcount);
558 if (--jl->j_refcount == 0)
563 * this used to be much more involved, and I'm keeping it just in case
564 * things get ugly again. it gets called by flush_commit_list, and
565 * cleans up any data stored about blocks freed during a transaction.
567 static void cleanup_freed_for_journal_list(struct super_block *sb,
568 struct reiserfs_journal_list *jl)
571 struct reiserfs_list_bitmap *jb = jl->j_list_bitmap;
573 cleanup_bitmap_list(sb, jb);
575 jl->j_list_bitmap->journal_list = NULL;
576 jl->j_list_bitmap = NULL;
579 static int journal_list_still_alive(struct super_block *s,
580 unsigned int trans_id)
582 struct reiserfs_journal *journal = SB_JOURNAL(s);
583 struct list_head *entry = &journal->j_journal_list;
584 struct reiserfs_journal_list *jl;
586 if (!list_empty(entry)) {
587 jl = JOURNAL_LIST_ENTRY(entry->next);
588 if (jl->j_trans_id <= trans_id) {
596 * If page->mapping was null, we failed to truncate this page for
597 * some reason. Most likely because it was truncated after being
598 * logged via data=journal.
600 * This does a check to see if the buffer belongs to one of these
601 * lost pages before doing the final put_bh. If page->mapping was
602 * null, it tries to free buffers on the page, which should make the
603 * final put_page drop the page from the lru.
605 static void release_buffer_page(struct buffer_head *bh)
607 struct page *page = bh->b_page;
608 if (!page->mapping && trylock_page(page)) {
612 try_to_free_buffers(page);
620 static void reiserfs_end_buffer_io_sync(struct buffer_head *bh, int uptodate)
622 if (buffer_journaled(bh)) {
623 reiserfs_warning(NULL, "clm-2084",
624 "pinned buffer %lu:%pg sent to disk",
625 bh->b_blocknr, bh->b_bdev);
628 set_buffer_uptodate(bh);
630 clear_buffer_uptodate(bh);
633 release_buffer_page(bh);
636 static void reiserfs_end_ordered_io(struct buffer_head *bh, int uptodate)
639 set_buffer_uptodate(bh);
641 clear_buffer_uptodate(bh);
646 static void submit_logged_buffer(struct buffer_head *bh)
649 bh->b_end_io = reiserfs_end_buffer_io_sync;
650 clear_buffer_journal_new(bh);
651 clear_buffer_dirty(bh);
652 if (!test_clear_buffer_journal_test(bh))
654 if (!buffer_uptodate(bh))
656 submit_bh(REQ_OP_WRITE, 0, bh);
659 static void submit_ordered_buffer(struct buffer_head *bh)
662 bh->b_end_io = reiserfs_end_ordered_io;
663 clear_buffer_dirty(bh);
664 if (!buffer_uptodate(bh))
666 submit_bh(REQ_OP_WRITE, 0, bh);
669 #define CHUNK_SIZE 32
670 struct buffer_chunk {
671 struct buffer_head *bh[CHUNK_SIZE];
675 static void write_chunk(struct buffer_chunk *chunk)
678 for (i = 0; i < chunk->nr; i++) {
679 submit_logged_buffer(chunk->bh[i]);
684 static void write_ordered_chunk(struct buffer_chunk *chunk)
687 for (i = 0; i < chunk->nr; i++) {
688 submit_ordered_buffer(chunk->bh[i]);
693 static int add_to_chunk(struct buffer_chunk *chunk, struct buffer_head *bh,
694 spinlock_t * lock, void (fn) (struct buffer_chunk *))
697 BUG_ON(chunk->nr >= CHUNK_SIZE);
698 chunk->bh[chunk->nr++] = bh;
699 if (chunk->nr >= CHUNK_SIZE) {
712 static atomic_t nr_reiserfs_jh = ATOMIC_INIT(0);
713 static struct reiserfs_jh *alloc_jh(void)
715 struct reiserfs_jh *jh;
717 jh = kmalloc(sizeof(*jh), GFP_NOFS);
719 atomic_inc(&nr_reiserfs_jh);
727 * we want to free the jh when the buffer has been written
730 void reiserfs_free_jh(struct buffer_head *bh)
732 struct reiserfs_jh *jh;
736 bh->b_private = NULL;
738 list_del_init(&jh->list);
740 if (atomic_read(&nr_reiserfs_jh) <= 0)
742 atomic_dec(&nr_reiserfs_jh);
747 static inline int __add_jh(struct reiserfs_journal *j, struct buffer_head *bh,
750 struct reiserfs_jh *jh;
753 spin_lock(&j->j_dirty_buffers_lock);
754 if (!bh->b_private) {
755 spin_unlock(&j->j_dirty_buffers_lock);
759 list_del_init(&jh->list);
764 spin_lock(&j->j_dirty_buffers_lock);
766 * buffer must be locked for __add_jh, should be able to have
767 * two adds at the same time
769 BUG_ON(bh->b_private);
773 jh->jl = j->j_current_jl;
775 list_add_tail(&jh->list, &jh->jl->j_tail_bh_list);
777 list_add_tail(&jh->list, &jh->jl->j_bh_list);
779 spin_unlock(&j->j_dirty_buffers_lock);
783 int reiserfs_add_tail_list(struct inode *inode, struct buffer_head *bh)
785 return __add_jh(SB_JOURNAL(inode->i_sb), bh, 1);
787 int reiserfs_add_ordered_list(struct inode *inode, struct buffer_head *bh)
789 return __add_jh(SB_JOURNAL(inode->i_sb), bh, 0);
792 #define JH_ENTRY(l) list_entry((l), struct reiserfs_jh, list)
793 static int write_ordered_buffers(spinlock_t * lock,
794 struct reiserfs_journal *j,
795 struct reiserfs_journal_list *jl,
796 struct list_head *list)
798 struct buffer_head *bh;
799 struct reiserfs_jh *jh;
800 int ret = j->j_errno;
801 struct buffer_chunk chunk;
802 struct list_head tmp;
803 INIT_LIST_HEAD(&tmp);
807 while (!list_empty(list)) {
808 jh = JH_ENTRY(list->next);
811 if (!trylock_buffer(bh)) {
812 if (!buffer_dirty(bh)) {
813 list_move(&jh->list, &tmp);
818 write_ordered_chunk(&chunk);
825 * in theory, dirty non-uptodate buffers should never get here,
826 * but the upper layer io error paths still have a few quirks.
827 * Handle them here as gracefully as we can
829 if (!buffer_uptodate(bh) && buffer_dirty(bh)) {
830 clear_buffer_dirty(bh);
833 if (buffer_dirty(bh)) {
834 list_move(&jh->list, &tmp);
835 add_to_chunk(&chunk, bh, lock, write_ordered_chunk);
837 reiserfs_free_jh(bh);
842 cond_resched_lock(lock);
846 write_ordered_chunk(&chunk);
849 while (!list_empty(&tmp)) {
850 jh = JH_ENTRY(tmp.prev);
853 reiserfs_free_jh(bh);
855 if (buffer_locked(bh)) {
860 if (!buffer_uptodate(bh)) {
864 * ugly interaction with invalidatepage here.
865 * reiserfs_invalidate_page will pin any buffer that has a
866 * valid journal head from an older transaction. If someone
867 * else sets our buffer dirty after we write it in the first
868 * loop, and then someone truncates the page away, nobody
869 * will ever write the buffer. We're safe if we write the
870 * page one last time after freeing the journal header.
872 if (buffer_dirty(bh) && unlikely(bh->b_page->mapping == NULL)) {
874 ll_rw_block(REQ_OP_WRITE, 0, 1, &bh);
878 cond_resched_lock(lock);
884 static int flush_older_commits(struct super_block *s,
885 struct reiserfs_journal_list *jl)
887 struct reiserfs_journal *journal = SB_JOURNAL(s);
888 struct reiserfs_journal_list *other_jl;
889 struct reiserfs_journal_list *first_jl;
890 struct list_head *entry;
891 unsigned int trans_id = jl->j_trans_id;
892 unsigned int other_trans_id;
893 unsigned int first_trans_id;
897 * first we walk backwards to find the oldest uncommitted transation
900 entry = jl->j_list.prev;
902 other_jl = JOURNAL_LIST_ENTRY(entry);
903 if (entry == &journal->j_journal_list ||
904 atomic_read(&other_jl->j_older_commits_done))
908 entry = other_jl->j_list.prev;
911 /* if we didn't find any older uncommitted transactions, return now */
912 if (first_jl == jl) {
916 first_trans_id = first_jl->j_trans_id;
918 entry = &first_jl->j_list;
920 other_jl = JOURNAL_LIST_ENTRY(entry);
921 other_trans_id = other_jl->j_trans_id;
923 if (other_trans_id < trans_id) {
924 if (atomic_read(&other_jl->j_commit_left) != 0) {
925 flush_commit_list(s, other_jl, 0);
927 /* list we were called with is gone, return */
928 if (!journal_list_still_alive(s, trans_id))
932 * the one we just flushed is gone, this means
933 * all older lists are also gone, so first_jl
934 * is no longer valid either. Go back to the
937 if (!journal_list_still_alive
938 (s, other_trans_id)) {
943 if (entry == &journal->j_journal_list)
952 static int reiserfs_async_progress_wait(struct super_block *s)
954 struct reiserfs_journal *j = SB_JOURNAL(s);
956 if (atomic_read(&j->j_async_throttle)) {
959 depth = reiserfs_write_unlock_nested(s);
960 congestion_wait(BLK_RW_ASYNC, HZ / 10);
961 reiserfs_write_lock_nested(s, depth);
968 * if this journal list still has commit blocks unflushed, send them to disk.
970 * log areas must be flushed in order (transaction 2 can't commit before
971 * transaction 1) Before the commit block can by written, every other log
972 * block must be safely on disk
974 static int flush_commit_list(struct super_block *s,
975 struct reiserfs_journal_list *jl, int flushall)
979 struct buffer_head *tbh = NULL;
980 unsigned int trans_id = jl->j_trans_id;
981 struct reiserfs_journal *journal = SB_JOURNAL(s);
986 reiserfs_check_lock_depth(s, "flush_commit_list");
988 if (atomic_read(&jl->j_older_commits_done)) {
993 * before we can put our commit blocks on disk, we have to make
994 * sure everyone older than us is on disk too
996 BUG_ON(jl->j_len <= 0);
997 BUG_ON(trans_id == journal->j_trans_id);
999 get_journal_list(jl);
1001 if (flush_older_commits(s, jl) == 1) {
1003 * list disappeared during flush_older_commits.
1010 /* make sure nobody is trying to flush this one at the same time */
1011 reiserfs_mutex_lock_safe(&jl->j_commit_mutex, s);
1013 if (!journal_list_still_alive(s, trans_id)) {
1014 mutex_unlock(&jl->j_commit_mutex);
1017 BUG_ON(jl->j_trans_id == 0);
1019 /* this commit is done, exit */
1020 if (atomic_read(&jl->j_commit_left) <= 0) {
1022 atomic_set(&jl->j_older_commits_done, 1);
1024 mutex_unlock(&jl->j_commit_mutex);
1028 if (!list_empty(&jl->j_bh_list)) {
1032 * We might sleep in numerous places inside
1033 * write_ordered_buffers. Relax the write lock.
1035 depth = reiserfs_write_unlock_nested(s);
1036 ret = write_ordered_buffers(&journal->j_dirty_buffers_lock,
1037 journal, jl, &jl->j_bh_list);
1038 if (ret < 0 && retval == 0)
1040 reiserfs_write_lock_nested(s, depth);
1042 BUG_ON(!list_empty(&jl->j_bh_list));
1044 * for the description block and all the log blocks, submit any buffers
1045 * that haven't already reached the disk. Try to write at least 256
1046 * log blocks. later on, we will only wait on blocks that correspond
1047 * to this transaction, but while we're unplugging we might as well
1048 * get a chunk of data on there.
1050 atomic_inc(&journal->j_async_throttle);
1051 write_len = jl->j_len + 1;
1052 if (write_len < 256)
1054 for (i = 0 ; i < write_len ; i++) {
1055 bn = SB_ONDISK_JOURNAL_1st_BLOCK(s) + (jl->j_start + i) %
1056 SB_ONDISK_JOURNAL_SIZE(s);
1057 tbh = journal_find_get_block(s, bn);
1059 if (buffer_dirty(tbh)) {
1060 depth = reiserfs_write_unlock_nested(s);
1061 ll_rw_block(REQ_OP_WRITE, 0, 1, &tbh);
1062 reiserfs_write_lock_nested(s, depth);
1067 atomic_dec(&journal->j_async_throttle);
1069 for (i = 0; i < (jl->j_len + 1); i++) {
1070 bn = SB_ONDISK_JOURNAL_1st_BLOCK(s) +
1071 (jl->j_start + i) % SB_ONDISK_JOURNAL_SIZE(s);
1072 tbh = journal_find_get_block(s, bn);
1074 depth = reiserfs_write_unlock_nested(s);
1075 __wait_on_buffer(tbh);
1076 reiserfs_write_lock_nested(s, depth);
1078 * since we're using ll_rw_blk above, it might have skipped
1079 * over a locked buffer. Double check here
1081 /* redundant, sync_dirty_buffer() checks */
1082 if (buffer_dirty(tbh)) {
1083 depth = reiserfs_write_unlock_nested(s);
1084 sync_dirty_buffer(tbh);
1085 reiserfs_write_lock_nested(s, depth);
1087 if (unlikely(!buffer_uptodate(tbh))) {
1088 #ifdef CONFIG_REISERFS_CHECK
1089 reiserfs_warning(s, "journal-601",
1090 "buffer write failed");
1094 /* once for journal_find_get_block */
1096 /* once due to original getblk in do_journal_end */
1098 atomic_dec(&jl->j_commit_left);
1101 BUG_ON(atomic_read(&jl->j_commit_left) != 1);
1104 * If there was a write error in the journal - we can't commit
1105 * this transaction - it will be invalid and, if successful,
1106 * will just end up propagating the write error out to
1109 if (likely(!retval && !reiserfs_is_journal_aborted (journal))) {
1110 if (buffer_dirty(jl->j_commit_bh))
1112 mark_buffer_dirty(jl->j_commit_bh) ;
1113 depth = reiserfs_write_unlock_nested(s);
1114 if (reiserfs_barrier_flush(s))
1115 __sync_dirty_buffer(jl->j_commit_bh,
1116 REQ_SYNC | REQ_PREFLUSH | REQ_FUA);
1118 sync_dirty_buffer(jl->j_commit_bh);
1119 reiserfs_write_lock_nested(s, depth);
1123 * If there was a write error in the journal - we can't commit this
1124 * transaction - it will be invalid and, if successful, will just end
1125 * up propagating the write error out to the filesystem.
1127 if (unlikely(!buffer_uptodate(jl->j_commit_bh))) {
1128 #ifdef CONFIG_REISERFS_CHECK
1129 reiserfs_warning(s, "journal-615", "buffer write failed");
1133 bforget(jl->j_commit_bh);
1134 if (journal->j_last_commit_id != 0 &&
1135 (jl->j_trans_id - journal->j_last_commit_id) != 1) {
1136 reiserfs_warning(s, "clm-2200", "last commit %lu, current %lu",
1137 journal->j_last_commit_id, jl->j_trans_id);
1139 journal->j_last_commit_id = jl->j_trans_id;
1142 * now, every commit block is on the disk. It is safe to allow
1143 * blocks freed during this transaction to be reallocated
1145 cleanup_freed_for_journal_list(s, jl);
1147 retval = retval ? retval : journal->j_errno;
1149 /* mark the metadata dirty */
1151 dirty_one_transaction(s, jl);
1152 atomic_dec(&jl->j_commit_left);
1155 atomic_set(&jl->j_older_commits_done, 1);
1157 mutex_unlock(&jl->j_commit_mutex);
1159 put_journal_list(s, jl);
1162 reiserfs_abort(s, retval, "Journal write error in %s",
1168 * flush_journal_list frequently needs to find a newer transaction for a
1169 * given block. This does that, or returns NULL if it can't find anything
1171 static struct reiserfs_journal_list *find_newer_jl_for_cn(struct
1172 reiserfs_journal_cnode
1175 struct super_block *sb = cn->sb;
1176 b_blocknr_t blocknr = cn->blocknr;
1180 if (cn->sb == sb && cn->blocknr == blocknr && cn->jlist) {
1188 static void remove_journal_hash(struct super_block *,
1189 struct reiserfs_journal_cnode **,
1190 struct reiserfs_journal_list *, unsigned long,
1194 * once all the real blocks have been flushed, it is safe to remove them
1195 * from the journal list for this transaction. Aside from freeing the
1196 * cnode, this also allows the block to be reallocated for data blocks
1197 * if it had been deleted.
1199 static void remove_all_from_journal_list(struct super_block *sb,
1200 struct reiserfs_journal_list *jl,
1203 struct reiserfs_journal *journal = SB_JOURNAL(sb);
1204 struct reiserfs_journal_cnode *cn, *last;
1205 cn = jl->j_realblock;
1208 * which is better, to lock once around the whole loop, or
1209 * to lock for each call to remove_journal_hash?
1212 if (cn->blocknr != 0) {
1214 reiserfs_warning(sb, "reiserfs-2201",
1215 "block %u, bh is %d, state %ld",
1216 cn->blocknr, cn->bh ? 1 : 0,
1220 remove_journal_hash(sb, journal->j_list_hash_table,
1221 jl, cn->blocknr, 1);
1225 free_cnode(sb, last);
1227 jl->j_realblock = NULL;
1231 * if this timestamp is greater than the timestamp we wrote last to the
1232 * header block, write it to the header block. once this is done, I can
1233 * safely say the log area for this transaction won't ever be replayed,
1234 * and I can start releasing blocks in this transaction for reuse as data
1235 * blocks. called by flush_journal_list, before it calls
1236 * remove_all_from_journal_list
1238 static int _update_journal_header_block(struct super_block *sb,
1239 unsigned long offset,
1240 unsigned int trans_id)
1242 struct reiserfs_journal_header *jh;
1243 struct reiserfs_journal *journal = SB_JOURNAL(sb);
1246 if (reiserfs_is_journal_aborted(journal))
1249 if (trans_id >= journal->j_last_flush_trans_id) {
1250 if (buffer_locked((journal->j_header_bh))) {
1251 depth = reiserfs_write_unlock_nested(sb);
1252 __wait_on_buffer(journal->j_header_bh);
1253 reiserfs_write_lock_nested(sb, depth);
1254 if (unlikely(!buffer_uptodate(journal->j_header_bh))) {
1255 #ifdef CONFIG_REISERFS_CHECK
1256 reiserfs_warning(sb, "journal-699",
1257 "buffer write failed");
1262 journal->j_last_flush_trans_id = trans_id;
1263 journal->j_first_unflushed_offset = offset;
1264 jh = (struct reiserfs_journal_header *)(journal->j_header_bh->
1266 jh->j_last_flush_trans_id = cpu_to_le32(trans_id);
1267 jh->j_first_unflushed_offset = cpu_to_le32(offset);
1268 jh->j_mount_id = cpu_to_le32(journal->j_mount_id);
1270 set_buffer_dirty(journal->j_header_bh);
1271 depth = reiserfs_write_unlock_nested(sb);
1273 if (reiserfs_barrier_flush(sb))
1274 __sync_dirty_buffer(journal->j_header_bh,
1275 REQ_SYNC | REQ_PREFLUSH | REQ_FUA);
1277 sync_dirty_buffer(journal->j_header_bh);
1279 reiserfs_write_lock_nested(sb, depth);
1280 if (!buffer_uptodate(journal->j_header_bh)) {
1281 reiserfs_warning(sb, "journal-837",
1282 "IO error during journal replay");
1289 static int update_journal_header_block(struct super_block *sb,
1290 unsigned long offset,
1291 unsigned int trans_id)
1293 return _update_journal_header_block(sb, offset, trans_id);
1297 ** flush any and all journal lists older than you are
1298 ** can only be called from flush_journal_list
1300 static int flush_older_journal_lists(struct super_block *sb,
1301 struct reiserfs_journal_list *jl)
1303 struct list_head *entry;
1304 struct reiserfs_journal_list *other_jl;
1305 struct reiserfs_journal *journal = SB_JOURNAL(sb);
1306 unsigned int trans_id = jl->j_trans_id;
1309 * we know we are the only ones flushing things, no extra race
1310 * protection is required.
1313 entry = journal->j_journal_list.next;
1315 if (entry == &journal->j_journal_list)
1317 other_jl = JOURNAL_LIST_ENTRY(entry);
1318 if (other_jl->j_trans_id < trans_id) {
1319 BUG_ON(other_jl->j_refcount <= 0);
1320 /* do not flush all */
1321 flush_journal_list(sb, other_jl, 0);
1323 /* other_jl is now deleted from the list */
1329 static void del_from_work_list(struct super_block *s,
1330 struct reiserfs_journal_list *jl)
1332 struct reiserfs_journal *journal = SB_JOURNAL(s);
1333 if (!list_empty(&jl->j_working_list)) {
1334 list_del_init(&jl->j_working_list);
1335 journal->j_num_work_lists--;
1340 * flush a journal list, both commit and real blocks
1342 * always set flushall to 1, unless you are calling from inside
1343 * flush_journal_list
1345 * IMPORTANT. This can only be called while there are no journal writers,
1346 * and the journal is locked. That means it can only be called from
1347 * do_journal_end, or by journal_release
1349 static int flush_journal_list(struct super_block *s,
1350 struct reiserfs_journal_list *jl, int flushall)
1352 struct reiserfs_journal_list *pjl;
1353 struct reiserfs_journal_cnode *cn, *last;
1357 struct buffer_head *saved_bh;
1358 unsigned long j_len_saved = jl->j_len;
1359 struct reiserfs_journal *journal = SB_JOURNAL(s);
1363 BUG_ON(j_len_saved <= 0);
1365 if (atomic_read(&journal->j_wcount) != 0) {
1366 reiserfs_warning(s, "clm-2048", "called with wcount %d",
1367 atomic_read(&journal->j_wcount));
1370 /* if flushall == 0, the lock is already held */
1372 reiserfs_mutex_lock_safe(&journal->j_flush_mutex, s);
1373 } else if (mutex_trylock(&journal->j_flush_mutex)) {
1378 if (j_len_saved > journal->j_trans_max) {
1379 reiserfs_panic(s, "journal-715", "length is %lu, trans id %lu",
1380 j_len_saved, jl->j_trans_id);
1384 /* if all the work is already done, get out of here */
1385 if (atomic_read(&jl->j_nonzerolen) <= 0 &&
1386 atomic_read(&jl->j_commit_left) <= 0) {
1387 goto flush_older_and_return;
1391 * start by putting the commit list on disk. This will also flush
1392 * the commit lists of any olders transactions
1394 flush_commit_list(s, jl, 1);
1396 if (!(jl->j_state & LIST_DIRTY)
1397 && !reiserfs_is_journal_aborted(journal))
1400 /* are we done now? */
1401 if (atomic_read(&jl->j_nonzerolen) <= 0 &&
1402 atomic_read(&jl->j_commit_left) <= 0) {
1403 goto flush_older_and_return;
1407 * loop through each cnode, see if we need to write it,
1408 * or wait on a more recent transaction, or just ignore it
1410 if (atomic_read(&journal->j_wcount) != 0) {
1411 reiserfs_panic(s, "journal-844", "journal list is flushing, "
1414 cn = jl->j_realblock;
1419 /* blocknr of 0 is no longer in the hash, ignore it */
1420 if (cn->blocknr == 0) {
1425 * This transaction failed commit.
1426 * Don't write out to the disk
1428 if (!(jl->j_state & LIST_DIRTY))
1431 pjl = find_newer_jl_for_cn(cn);
1433 * the order is important here. We check pjl to make sure we
1434 * don't clear BH_JDirty_wait if we aren't the one writing this
1437 if (!pjl && cn->bh) {
1441 * we do this to make sure nobody releases the
1442 * buffer while we are working with it
1446 if (buffer_journal_dirty(saved_bh)) {
1447 BUG_ON(!can_dirty(cn));
1450 } else if (can_dirty(cn)) {
1452 * everything with !pjl && jwait
1453 * should be writable
1460 * if someone has this block in a newer transaction, just make
1461 * sure they are committed, and don't try writing it to disk
1464 if (atomic_read(&pjl->j_commit_left))
1465 flush_commit_list(s, pjl, 1);
1470 * bh == NULL when the block got to disk on its own, OR,
1471 * the block got freed in a future transaction
1473 if (saved_bh == NULL) {
1478 * this should never happen. kupdate_one_transaction has
1479 * this list locked while it works, so we should never see a
1480 * buffer here that is not marked JDirty_wait
1482 if ((!was_jwait) && !buffer_locked(saved_bh)) {
1483 reiserfs_warning(s, "journal-813",
1484 "BAD! buffer %llu %cdirty %cjwait, "
1485 "not in a newer transaction",
1486 (unsigned long long)saved_bh->
1487 b_blocknr, was_dirty ? ' ' : '!',
1488 was_jwait ? ' ' : '!');
1492 * we inc again because saved_bh gets decremented
1496 set_bit(BLOCK_NEEDS_FLUSH, &cn->state);
1497 lock_buffer(saved_bh);
1498 BUG_ON(cn->blocknr != saved_bh->b_blocknr);
1499 if (buffer_dirty(saved_bh))
1500 submit_logged_buffer(saved_bh);
1502 unlock_buffer(saved_bh);
1505 reiserfs_warning(s, "clm-2082",
1506 "Unable to flush buffer %llu in %s",
1507 (unsigned long long)saved_bh->
1508 b_blocknr, __func__);
1515 * we incremented this to keep others from
1516 * taking the buffer head away
1519 if (atomic_read(&saved_bh->b_count) < 0) {
1520 reiserfs_warning(s, "journal-945",
1521 "saved_bh->b_count < 0");
1526 cn = jl->j_realblock;
1528 if (test_bit(BLOCK_NEEDS_FLUSH, &cn->state)) {
1530 reiserfs_panic(s, "journal-1011",
1534 depth = reiserfs_write_unlock_nested(s);
1535 __wait_on_buffer(cn->bh);
1536 reiserfs_write_lock_nested(s, depth);
1539 reiserfs_panic(s, "journal-1012",
1542 if (unlikely(!buffer_uptodate(cn->bh))) {
1543 #ifdef CONFIG_REISERFS_CHECK
1544 reiserfs_warning(s, "journal-949",
1545 "buffer write failed");
1550 * note, we must clear the JDirty_wait bit
1551 * after the up to date check, otherwise we
1552 * race against our flushpage routine
1554 BUG_ON(!test_clear_buffer_journal_dirty
1557 /* drop one ref for us */
1559 /* drop one ref for journal_mark_dirty */
1560 release_buffer_page(cn->bh);
1567 reiserfs_abort(s, -EIO,
1568 "Write error while pushing transaction to disk in %s",
1570 flush_older_and_return:
1573 * before we can update the journal header block, we _must_ flush all
1574 * real blocks from all older transactions to disk. This is because
1575 * once the header block is updated, this transaction will not be
1576 * replayed after a crash
1579 flush_older_journal_lists(s, jl);
1582 err = journal->j_errno;
1584 * before we can remove everything from the hash tables for this
1585 * transaction, we must make sure it can never be replayed
1587 * since we are only called from do_journal_end, we know for sure there
1588 * are no allocations going on while we are flushing journal lists. So,
1589 * we only need to update the journal header block for the last list
1592 if (!err && flushall) {
1594 update_journal_header_block(s,
1595 (jl->j_start + jl->j_len +
1596 2) % SB_ONDISK_JOURNAL_SIZE(s),
1599 reiserfs_abort(s, -EIO,
1600 "Write error while updating journal header in %s",
1603 remove_all_from_journal_list(s, jl, 0);
1604 list_del_init(&jl->j_list);
1605 journal->j_num_lists--;
1606 del_from_work_list(s, jl);
1608 if (journal->j_last_flush_id != 0 &&
1609 (jl->j_trans_id - journal->j_last_flush_id) != 1) {
1610 reiserfs_warning(s, "clm-2201", "last flush %lu, current %lu",
1611 journal->j_last_flush_id, jl->j_trans_id);
1613 journal->j_last_flush_id = jl->j_trans_id;
1616 * not strictly required since we are freeing the list, but it should
1617 * help find code using dead lists later on
1620 atomic_set(&jl->j_nonzerolen, 0);
1622 jl->j_realblock = NULL;
1623 jl->j_commit_bh = NULL;
1626 put_journal_list(s, jl);
1628 mutex_unlock(&journal->j_flush_mutex);
1632 static int write_one_transaction(struct super_block *s,
1633 struct reiserfs_journal_list *jl,
1634 struct buffer_chunk *chunk)
1636 struct reiserfs_journal_cnode *cn;
1639 jl->j_state |= LIST_TOUCHED;
1640 del_from_work_list(s, jl);
1641 if (jl->j_len == 0 || atomic_read(&jl->j_nonzerolen) == 0) {
1645 cn = jl->j_realblock;
1648 * if the blocknr == 0, this has been cleared from the hash,
1651 if (cn->blocknr == 0) {
1654 if (cn->bh && can_dirty(cn) && buffer_dirty(cn->bh)) {
1655 struct buffer_head *tmp_bh;
1657 * we can race against journal_mark_freed when we try
1658 * to lock_buffer(cn->bh), so we have to inc the buffer
1659 * count, and recheck things after locking
1663 lock_buffer(tmp_bh);
1664 if (cn->bh && can_dirty(cn) && buffer_dirty(tmp_bh)) {
1665 if (!buffer_journal_dirty(tmp_bh) ||
1666 buffer_journal_prepared(tmp_bh))
1668 add_to_chunk(chunk, tmp_bh, NULL, write_chunk);
1671 /* note, cn->bh might be null now */
1672 unlock_buffer(tmp_bh);
1683 /* used by flush_commit_list */
1684 static int dirty_one_transaction(struct super_block *s,
1685 struct reiserfs_journal_list *jl)
1687 struct reiserfs_journal_cnode *cn;
1688 struct reiserfs_journal_list *pjl;
1691 jl->j_state |= LIST_DIRTY;
1692 cn = jl->j_realblock;
1695 * look for a more recent transaction that logged this
1696 * buffer. Only the most recent transaction with a buffer in
1697 * it is allowed to send that buffer to disk
1699 pjl = find_newer_jl_for_cn(cn);
1700 if (!pjl && cn->blocknr && cn->bh
1701 && buffer_journal_dirty(cn->bh)) {
1702 BUG_ON(!can_dirty(cn));
1704 * if the buffer is prepared, it will either be logged
1705 * or restored. If restored, we need to make sure
1706 * it actually gets marked dirty
1708 clear_buffer_journal_new(cn->bh);
1709 if (buffer_journal_prepared(cn->bh)) {
1710 set_buffer_journal_restore_dirty(cn->bh);
1712 set_buffer_journal_test(cn->bh);
1713 mark_buffer_dirty(cn->bh);
1721 static int kupdate_transactions(struct super_block *s,
1722 struct reiserfs_journal_list *jl,
1723 struct reiserfs_journal_list **next_jl,
1724 unsigned int *next_trans_id,
1725 int num_blocks, int num_trans)
1729 int transactions_flushed = 0;
1730 unsigned int orig_trans_id = jl->j_trans_id;
1731 struct buffer_chunk chunk;
1732 struct list_head *entry;
1733 struct reiserfs_journal *journal = SB_JOURNAL(s);
1736 reiserfs_mutex_lock_safe(&journal->j_flush_mutex, s);
1737 if (!journal_list_still_alive(s, orig_trans_id)) {
1742 * we've got j_flush_mutex held, nobody is going to delete any
1743 * of these lists out from underneath us
1745 while ((num_trans && transactions_flushed < num_trans) ||
1746 (!num_trans && written < num_blocks)) {
1748 if (jl->j_len == 0 || (jl->j_state & LIST_TOUCHED) ||
1749 atomic_read(&jl->j_commit_left)
1750 || !(jl->j_state & LIST_DIRTY)) {
1751 del_from_work_list(s, jl);
1754 ret = write_one_transaction(s, jl, &chunk);
1758 transactions_flushed++;
1760 entry = jl->j_list.next;
1763 if (entry == &journal->j_journal_list) {
1766 jl = JOURNAL_LIST_ENTRY(entry);
1768 /* don't bother with older transactions */
1769 if (jl->j_trans_id <= orig_trans_id)
1773 write_chunk(&chunk);
1777 mutex_unlock(&journal->j_flush_mutex);
1782 * for o_sync and fsync heavy applications, they tend to use
1783 * all the journa list slots with tiny transactions. These
1784 * trigger lots and lots of calls to update the header block, which
1785 * adds seeks and slows things down.
1787 * This function tries to clear out a large chunk of the journal lists
1788 * at once, which makes everything faster since only the newest journal
1789 * list updates the header block
1791 static int flush_used_journal_lists(struct super_block *s,
1792 struct reiserfs_journal_list *jl)
1794 unsigned long len = 0;
1795 unsigned long cur_len;
1799 struct reiserfs_journal_list *tjl;
1800 struct reiserfs_journal_list *flush_jl;
1801 unsigned int trans_id;
1802 struct reiserfs_journal *journal = SB_JOURNAL(s);
1804 flush_jl = tjl = jl;
1806 /* in data logging mode, try harder to flush a lot of blocks */
1807 if (reiserfs_data_log(s))
1809 /* flush for 256 transactions or limit blocks, whichever comes first */
1810 for (i = 0; i < 256 && len < limit; i++) {
1811 if (atomic_read(&tjl->j_commit_left) ||
1812 tjl->j_trans_id < jl->j_trans_id) {
1815 cur_len = atomic_read(&tjl->j_nonzerolen);
1817 tjl->j_state &= ~LIST_TOUCHED;
1821 if (tjl->j_list.next == &journal->j_journal_list)
1823 tjl = JOURNAL_LIST_ENTRY(tjl->j_list.next);
1825 get_journal_list(jl);
1826 get_journal_list(flush_jl);
1829 * try to find a group of blocks we can flush across all the
1830 * transactions, but only bother if we've actually spanned
1831 * across multiple lists
1833 if (flush_jl != jl) {
1834 ret = kupdate_transactions(s, jl, &tjl, &trans_id, len, i);
1836 flush_journal_list(s, flush_jl, 1);
1837 put_journal_list(s, flush_jl);
1838 put_journal_list(s, jl);
1843 * removes any nodes in table with name block and dev as bh.
1844 * only touchs the hnext and hprev pointers.
1846 void remove_journal_hash(struct super_block *sb,
1847 struct reiserfs_journal_cnode **table,
1848 struct reiserfs_journal_list *jl,
1849 unsigned long block, int remove_freed)
1851 struct reiserfs_journal_cnode *cur;
1852 struct reiserfs_journal_cnode **head;
1854 head = &(journal_hash(table, sb, block));
1860 if (cur->blocknr == block && cur->sb == sb
1861 && (jl == NULL || jl == cur->jlist)
1862 && (!test_bit(BLOCK_FREED, &cur->state) || remove_freed)) {
1864 cur->hnext->hprev = cur->hprev;
1867 cur->hprev->hnext = cur->hnext;
1875 * anybody who clears the cur->bh will also
1876 * dec the nonzerolen
1878 if (cur->bh && cur->jlist)
1879 atomic_dec(&cur->jlist->j_nonzerolen);
1887 static void free_journal_ram(struct super_block *sb)
1889 struct reiserfs_journal *journal = SB_JOURNAL(sb);
1890 kfree(journal->j_current_jl);
1891 journal->j_num_lists--;
1893 vfree(journal->j_cnode_free_orig);
1894 free_list_bitmaps(sb, journal->j_list_bitmap);
1895 free_bitmap_nodes(sb); /* must be after free_list_bitmaps */
1896 if (journal->j_header_bh) {
1897 brelse(journal->j_header_bh);
1900 * j_header_bh is on the journal dev, make sure
1901 * not to release the journal dev until we brelse j_header_bh
1903 release_journal_dev(sb, journal);
1908 * call on unmount. Only set error to 1 if you haven't made your way out
1909 * of read_super() yet. Any other caller must keep error at 0.
1911 static int do_journal_release(struct reiserfs_transaction_handle *th,
1912 struct super_block *sb, int error)
1914 struct reiserfs_transaction_handle myth;
1916 struct reiserfs_journal *journal = SB_JOURNAL(sb);
1919 * we only want to flush out transactions if we were
1920 * called with error == 0
1922 if (!error && !sb_rdonly(sb)) {
1923 /* end the current trans */
1924 BUG_ON(!th->t_trans_id);
1925 do_journal_end(th, FLUSH_ALL);
1928 * make sure something gets logged to force
1929 * our way into the flush code
1931 if (!journal_join(&myth, sb)) {
1932 reiserfs_prepare_for_journal(sb,
1933 SB_BUFFER_WITH_SB(sb),
1935 journal_mark_dirty(&myth, SB_BUFFER_WITH_SB(sb));
1936 do_journal_end(&myth, FLUSH_ALL);
1941 /* this also catches errors during the do_journal_end above */
1942 if (!error && reiserfs_is_journal_aborted(journal)) {
1943 memset(&myth, 0, sizeof(myth));
1944 if (!journal_join_abort(&myth, sb)) {
1945 reiserfs_prepare_for_journal(sb,
1946 SB_BUFFER_WITH_SB(sb),
1948 journal_mark_dirty(&myth, SB_BUFFER_WITH_SB(sb));
1949 do_journal_end(&myth, FLUSH_ALL);
1955 * We must release the write lock here because
1956 * the workqueue job (flush_async_commit) needs this lock
1958 reiserfs_write_unlock(sb);
1961 * Cancel flushing of old commits. Note that neither of these works
1962 * will be requeued because superblock is being shutdown and doesn't
1963 * have MS_ACTIVE set.
1965 reiserfs_cancel_old_flush(sb);
1966 /* wait for all commits to finish */
1967 cancel_delayed_work_sync(&SB_JOURNAL(sb)->j_work);
1969 free_journal_ram(sb);
1971 reiserfs_write_lock(sb);
1976 /* * call on unmount. flush all journal trans, release all alloc'd ram */
1977 int journal_release(struct reiserfs_transaction_handle *th,
1978 struct super_block *sb)
1980 return do_journal_release(th, sb, 0);
1983 /* only call from an error condition inside reiserfs_read_super! */
1984 int journal_release_error(struct reiserfs_transaction_handle *th,
1985 struct super_block *sb)
1987 return do_journal_release(th, sb, 1);
1991 * compares description block with commit block.
1992 * returns 1 if they differ, 0 if they are the same
1994 static int journal_compare_desc_commit(struct super_block *sb,
1995 struct reiserfs_journal_desc *desc,
1996 struct reiserfs_journal_commit *commit)
1998 if (get_commit_trans_id(commit) != get_desc_trans_id(desc) ||
1999 get_commit_trans_len(commit) != get_desc_trans_len(desc) ||
2000 get_commit_trans_len(commit) > SB_JOURNAL(sb)->j_trans_max ||
2001 get_commit_trans_len(commit) <= 0) {
2008 * returns 0 if it did not find a description block
2009 * returns -1 if it found a corrupt commit block
2010 * returns 1 if both desc and commit were valid
2011 * NOTE: only called during fs mount
2013 static int journal_transaction_is_valid(struct super_block *sb,
2014 struct buffer_head *d_bh,
2015 unsigned int *oldest_invalid_trans_id,
2016 unsigned long *newest_mount_id)
2018 struct reiserfs_journal_desc *desc;
2019 struct reiserfs_journal_commit *commit;
2020 struct buffer_head *c_bh;
2021 unsigned long offset;
2026 desc = (struct reiserfs_journal_desc *)d_bh->b_data;
2027 if (get_desc_trans_len(desc) > 0
2028 && !memcmp(get_journal_desc_magic(d_bh), JOURNAL_DESC_MAGIC, 8)) {
2029 if (oldest_invalid_trans_id && *oldest_invalid_trans_id
2030 && get_desc_trans_id(desc) > *oldest_invalid_trans_id) {
2031 reiserfs_debug(sb, REISERFS_DEBUG_CODE,
2032 "journal-986: transaction "
2033 "is valid returning because trans_id %d is greater than "
2034 "oldest_invalid %lu",
2035 get_desc_trans_id(desc),
2036 *oldest_invalid_trans_id);
2040 && *newest_mount_id > get_desc_mount_id(desc)) {
2041 reiserfs_debug(sb, REISERFS_DEBUG_CODE,
2042 "journal-1087: transaction "
2043 "is valid returning because mount_id %d is less than "
2044 "newest_mount_id %lu",
2045 get_desc_mount_id(desc),
2049 if (get_desc_trans_len(desc) > SB_JOURNAL(sb)->j_trans_max) {
2050 reiserfs_warning(sb, "journal-2018",
2051 "Bad transaction length %d "
2052 "encountered, ignoring transaction",
2053 get_desc_trans_len(desc));
2056 offset = d_bh->b_blocknr - SB_ONDISK_JOURNAL_1st_BLOCK(sb);
2059 * ok, we have a journal description block,
2060 * let's see if the transaction was valid
2064 SB_ONDISK_JOURNAL_1st_BLOCK(sb) +
2065 ((offset + get_desc_trans_len(desc) +
2066 1) % SB_ONDISK_JOURNAL_SIZE(sb)));
2069 commit = (struct reiserfs_journal_commit *)c_bh->b_data;
2070 if (journal_compare_desc_commit(sb, desc, commit)) {
2071 reiserfs_debug(sb, REISERFS_DEBUG_CODE,
2072 "journal_transaction_is_valid, commit offset %ld had bad "
2073 "time %d or length %d",
2075 SB_ONDISK_JOURNAL_1st_BLOCK(sb),
2076 get_commit_trans_id(commit),
2077 get_commit_trans_len(commit));
2079 if (oldest_invalid_trans_id) {
2080 *oldest_invalid_trans_id =
2081 get_desc_trans_id(desc);
2082 reiserfs_debug(sb, REISERFS_DEBUG_CODE,
2084 "transaction_is_valid setting oldest invalid trans_id "
2086 get_desc_trans_id(desc));
2091 reiserfs_debug(sb, REISERFS_DEBUG_CODE,
2092 "journal-1006: found valid "
2093 "transaction start offset %llu, len %d id %d",
2095 SB_ONDISK_JOURNAL_1st_BLOCK(sb),
2096 get_desc_trans_len(desc),
2097 get_desc_trans_id(desc));
2104 static void brelse_array(struct buffer_head **heads, int num)
2107 for (i = 0; i < num; i++) {
2113 * given the start, and values for the oldest acceptable transactions,
2114 * this either reads in a replays a transaction, or returns because the
2115 * transaction is invalid, or too old.
2116 * NOTE: only called during fs mount
2118 static int journal_read_transaction(struct super_block *sb,
2119 unsigned long cur_dblock,
2120 unsigned long oldest_start,
2121 unsigned int oldest_trans_id,
2122 unsigned long newest_mount_id)
2124 struct reiserfs_journal *journal = SB_JOURNAL(sb);
2125 struct reiserfs_journal_desc *desc;
2126 struct reiserfs_journal_commit *commit;
2127 unsigned int trans_id = 0;
2128 struct buffer_head *c_bh;
2129 struct buffer_head *d_bh;
2130 struct buffer_head **log_blocks = NULL;
2131 struct buffer_head **real_blocks = NULL;
2132 unsigned int trans_offset;
2136 d_bh = journal_bread(sb, cur_dblock);
2139 desc = (struct reiserfs_journal_desc *)d_bh->b_data;
2140 trans_offset = d_bh->b_blocknr - SB_ONDISK_JOURNAL_1st_BLOCK(sb);
2141 reiserfs_debug(sb, REISERFS_DEBUG_CODE, "journal-1037: "
2142 "journal_read_transaction, offset %llu, len %d mount_id %d",
2143 d_bh->b_blocknr - SB_ONDISK_JOURNAL_1st_BLOCK(sb),
2144 get_desc_trans_len(desc), get_desc_mount_id(desc));
2145 if (get_desc_trans_id(desc) < oldest_trans_id) {
2146 reiserfs_debug(sb, REISERFS_DEBUG_CODE, "journal-1039: "
2147 "journal_read_trans skipping because %lu is too old",
2149 SB_ONDISK_JOURNAL_1st_BLOCK(sb));
2153 if (get_desc_mount_id(desc) != newest_mount_id) {
2154 reiserfs_debug(sb, REISERFS_DEBUG_CODE, "journal-1146: "
2155 "journal_read_trans skipping because %d is != "
2156 "newest_mount_id %lu", get_desc_mount_id(desc),
2161 c_bh = journal_bread(sb, SB_ONDISK_JOURNAL_1st_BLOCK(sb) +
2162 ((trans_offset + get_desc_trans_len(desc) + 1) %
2163 SB_ONDISK_JOURNAL_SIZE(sb)));
2168 commit = (struct reiserfs_journal_commit *)c_bh->b_data;
2169 if (journal_compare_desc_commit(sb, desc, commit)) {
2170 reiserfs_debug(sb, REISERFS_DEBUG_CODE,
2171 "journal_read_transaction, "
2172 "commit offset %llu had bad time %d or length %d",
2174 SB_ONDISK_JOURNAL_1st_BLOCK(sb),
2175 get_commit_trans_id(commit),
2176 get_commit_trans_len(commit));
2182 if (bdev_read_only(sb->s_bdev)) {
2183 reiserfs_warning(sb, "clm-2076",
2184 "device is readonly, unable to replay log");
2190 trans_id = get_desc_trans_id(desc);
2192 * now we know we've got a good transaction, and it was
2193 * inside the valid time ranges
2195 log_blocks = kmalloc(get_desc_trans_len(desc) *
2196 sizeof(struct buffer_head *), GFP_NOFS);
2197 real_blocks = kmalloc(get_desc_trans_len(desc) *
2198 sizeof(struct buffer_head *), GFP_NOFS);
2199 if (!log_blocks || !real_blocks) {
2204 reiserfs_warning(sb, "journal-1169",
2205 "kmalloc failed, unable to mount FS");
2208 /* get all the buffer heads */
2209 trans_half = journal_trans_half(sb->s_blocksize);
2210 for (i = 0; i < get_desc_trans_len(desc); i++) {
2213 SB_ONDISK_JOURNAL_1st_BLOCK(sb) +
2215 i) % SB_ONDISK_JOURNAL_SIZE(sb));
2216 if (i < trans_half) {
2219 le32_to_cpu(desc->j_realblock[i]));
2223 le32_to_cpu(commit->
2224 j_realblock[i - trans_half]));
2226 if (real_blocks[i]->b_blocknr > SB_BLOCK_COUNT(sb)) {
2227 reiserfs_warning(sb, "journal-1207",
2228 "REPLAY FAILURE fsck required! "
2229 "Block to replay is outside of "
2233 /* make sure we don't try to replay onto log or reserved area */
2234 if (is_block_in_log_or_reserved_area
2235 (sb, real_blocks[i]->b_blocknr)) {
2236 reiserfs_warning(sb, "journal-1204",
2237 "REPLAY FAILURE fsck required! "
2238 "Trying to replay onto a log block");
2240 brelse_array(log_blocks, i);
2241 brelse_array(real_blocks, i);
2249 /* read in the log blocks, memcpy to the corresponding real block */
2250 ll_rw_block(REQ_OP_READ, 0, get_desc_trans_len(desc), log_blocks);
2251 for (i = 0; i < get_desc_trans_len(desc); i++) {
2253 wait_on_buffer(log_blocks[i]);
2254 if (!buffer_uptodate(log_blocks[i])) {
2255 reiserfs_warning(sb, "journal-1212",
2256 "REPLAY FAILURE fsck required! "
2257 "buffer write failed");
2258 brelse_array(log_blocks + i,
2259 get_desc_trans_len(desc) - i);
2260 brelse_array(real_blocks, get_desc_trans_len(desc));
2267 memcpy(real_blocks[i]->b_data, log_blocks[i]->b_data,
2268 real_blocks[i]->b_size);
2269 set_buffer_uptodate(real_blocks[i]);
2270 brelse(log_blocks[i]);
2272 /* flush out the real blocks */
2273 for (i = 0; i < get_desc_trans_len(desc); i++) {
2274 set_buffer_dirty(real_blocks[i]);
2275 write_dirty_buffer(real_blocks[i], 0);
2277 for (i = 0; i < get_desc_trans_len(desc); i++) {
2278 wait_on_buffer(real_blocks[i]);
2279 if (!buffer_uptodate(real_blocks[i])) {
2280 reiserfs_warning(sb, "journal-1226",
2281 "REPLAY FAILURE, fsck required! "
2282 "buffer write failed");
2283 brelse_array(real_blocks + i,
2284 get_desc_trans_len(desc) - i);
2291 brelse(real_blocks[i]);
2294 SB_ONDISK_JOURNAL_1st_BLOCK(sb) +
2295 ((trans_offset + get_desc_trans_len(desc) +
2296 2) % SB_ONDISK_JOURNAL_SIZE(sb));
2297 reiserfs_debug(sb, REISERFS_DEBUG_CODE,
2298 "journal-1095: setting journal " "start to offset %ld",
2299 cur_dblock - SB_ONDISK_JOURNAL_1st_BLOCK(sb));
2302 * init starting values for the first transaction, in case
2303 * this is the last transaction to be replayed.
2305 journal->j_start = cur_dblock - SB_ONDISK_JOURNAL_1st_BLOCK(sb);
2306 journal->j_last_flush_trans_id = trans_id;
2307 journal->j_trans_id = trans_id + 1;
2308 /* check for trans_id overflow */
2309 if (journal->j_trans_id == 0)
2310 journal->j_trans_id = 10;
2319 * This function reads blocks starting from block and to max_block of bufsize
2320 * size (but no more than BUFNR blocks at a time). This proved to improve
2321 * mounting speed on self-rebuilding raid5 arrays at least.
2322 * Right now it is only used from journal code. But later we might use it
2323 * from other places.
2324 * Note: Do not use journal_getblk/sb_getblk functions here!
2326 static struct buffer_head *reiserfs_breada(struct block_device *dev,
2327 b_blocknr_t block, int bufsize,
2328 b_blocknr_t max_block)
2330 struct buffer_head *bhlist[BUFNR];
2331 unsigned int blocks = BUFNR;
2332 struct buffer_head *bh;
2335 bh = __getblk(dev, block, bufsize);
2336 if (buffer_uptodate(bh))
2339 if (block + BUFNR > max_block) {
2340 blocks = max_block - block;
2344 for (i = 1; i < blocks; i++) {
2345 bh = __getblk(dev, block + i, bufsize);
2346 if (buffer_uptodate(bh)) {
2352 ll_rw_block(REQ_OP_READ, 0, j, bhlist);
2353 for (i = 1; i < j; i++)
2357 if (buffer_uptodate(bh))
2364 * read and replay the log
2365 * on a clean unmount, the journal header's next unflushed pointer will be
2366 * to an invalid transaction. This tests that before finding all the
2367 * transactions in the log, which makes normal mount times fast.
2369 * After a crash, this starts with the next unflushed transaction, and
2370 * replays until it finds one too old, or invalid.
2372 * On exit, it sets things up so the first transaction will work correctly.
2373 * NOTE: only called during fs mount
2375 static int journal_read(struct super_block *sb)
2377 struct reiserfs_journal *journal = SB_JOURNAL(sb);
2378 struct reiserfs_journal_desc *desc;
2379 unsigned int oldest_trans_id = 0;
2380 unsigned int oldest_invalid_trans_id = 0;
2382 unsigned long oldest_start = 0;
2383 unsigned long cur_dblock = 0;
2384 unsigned long newest_mount_id = 9;
2385 struct buffer_head *d_bh;
2386 struct reiserfs_journal_header *jh;
2387 int valid_journal_header = 0;
2388 int replay_count = 0;
2389 int continue_replay = 1;
2392 cur_dblock = SB_ONDISK_JOURNAL_1st_BLOCK(sb);
2393 reiserfs_info(sb, "checking transaction log (%pg)\n",
2395 start = get_seconds();
2398 * step 1, read in the journal header block. Check the transaction
2399 * it says is the first unflushed, and if that transaction is not
2400 * valid, replay is done
2402 journal->j_header_bh = journal_bread(sb,
2403 SB_ONDISK_JOURNAL_1st_BLOCK(sb)
2404 + SB_ONDISK_JOURNAL_SIZE(sb));
2405 if (!journal->j_header_bh) {
2408 jh = (struct reiserfs_journal_header *)(journal->j_header_bh->b_data);
2409 if (le32_to_cpu(jh->j_first_unflushed_offset) <
2410 SB_ONDISK_JOURNAL_SIZE(sb)
2411 && le32_to_cpu(jh->j_last_flush_trans_id) > 0) {
2413 SB_ONDISK_JOURNAL_1st_BLOCK(sb) +
2414 le32_to_cpu(jh->j_first_unflushed_offset);
2415 oldest_trans_id = le32_to_cpu(jh->j_last_flush_trans_id) + 1;
2416 newest_mount_id = le32_to_cpu(jh->j_mount_id);
2417 reiserfs_debug(sb, REISERFS_DEBUG_CODE,
2418 "journal-1153: found in "
2419 "header: first_unflushed_offset %d, last_flushed_trans_id "
2420 "%lu", le32_to_cpu(jh->j_first_unflushed_offset),
2421 le32_to_cpu(jh->j_last_flush_trans_id));
2422 valid_journal_header = 1;
2425 * now, we try to read the first unflushed offset. If it
2426 * is not valid, there is nothing more we can do, and it
2427 * makes no sense to read through the whole log.
2431 SB_ONDISK_JOURNAL_1st_BLOCK(sb) +
2432 le32_to_cpu(jh->j_first_unflushed_offset));
2433 ret = journal_transaction_is_valid(sb, d_bh, NULL, NULL);
2435 continue_replay = 0;
2438 goto start_log_replay;
2442 * ok, there are transactions that need to be replayed. start
2443 * with the first log block, find all the valid transactions, and
2444 * pick out the oldest.
2446 while (continue_replay
2448 (SB_ONDISK_JOURNAL_1st_BLOCK(sb) +
2449 SB_ONDISK_JOURNAL_SIZE(sb))) {
2451 * Note that it is required for blocksize of primary fs
2452 * device and journal device to be the same
2455 reiserfs_breada(journal->j_dev_bd, cur_dblock,
2457 SB_ONDISK_JOURNAL_1st_BLOCK(sb) +
2458 SB_ONDISK_JOURNAL_SIZE(sb));
2460 journal_transaction_is_valid(sb, d_bh,
2461 &oldest_invalid_trans_id,
2464 desc = (struct reiserfs_journal_desc *)d_bh->b_data;
2465 if (oldest_start == 0) { /* init all oldest_ values */
2466 oldest_trans_id = get_desc_trans_id(desc);
2467 oldest_start = d_bh->b_blocknr;
2468 newest_mount_id = get_desc_mount_id(desc);
2469 reiserfs_debug(sb, REISERFS_DEBUG_CODE,
2470 "journal-1179: Setting "
2471 "oldest_start to offset %llu, trans_id %lu",
2473 SB_ONDISK_JOURNAL_1st_BLOCK
2474 (sb), oldest_trans_id);
2475 } else if (oldest_trans_id > get_desc_trans_id(desc)) {
2476 /* one we just read was older */
2477 oldest_trans_id = get_desc_trans_id(desc);
2478 oldest_start = d_bh->b_blocknr;
2479 reiserfs_debug(sb, REISERFS_DEBUG_CODE,
2480 "journal-1180: Resetting "
2481 "oldest_start to offset %lu, trans_id %lu",
2483 SB_ONDISK_JOURNAL_1st_BLOCK
2484 (sb), oldest_trans_id);
2486 if (newest_mount_id < get_desc_mount_id(desc)) {
2487 newest_mount_id = get_desc_mount_id(desc);
2488 reiserfs_debug(sb, REISERFS_DEBUG_CODE,
2489 "journal-1299: Setting "
2490 "newest_mount_id to %d",
2491 get_desc_mount_id(desc));
2493 cur_dblock += get_desc_trans_len(desc) + 2;
2501 cur_dblock = oldest_start;
2502 if (oldest_trans_id) {
2503 reiserfs_debug(sb, REISERFS_DEBUG_CODE,
2504 "journal-1206: Starting replay "
2505 "from offset %llu, trans_id %lu",
2506 cur_dblock - SB_ONDISK_JOURNAL_1st_BLOCK(sb),
2511 while (continue_replay && oldest_trans_id > 0) {
2513 journal_read_transaction(sb, cur_dblock, oldest_start,
2514 oldest_trans_id, newest_mount_id);
2517 } else if (ret != 0) {
2521 SB_ONDISK_JOURNAL_1st_BLOCK(sb) + journal->j_start;
2523 if (cur_dblock == oldest_start)
2527 if (oldest_trans_id == 0) {
2528 reiserfs_debug(sb, REISERFS_DEBUG_CODE,
2529 "journal-1225: No valid " "transactions found");
2532 * j_start does not get set correctly if we don't replay any
2533 * transactions. if we had a valid journal_header, set j_start
2534 * to the first unflushed transaction value, copy the trans_id
2537 if (valid_journal_header && replay_count == 0) {
2538 journal->j_start = le32_to_cpu(jh->j_first_unflushed_offset);
2539 journal->j_trans_id =
2540 le32_to_cpu(jh->j_last_flush_trans_id) + 1;
2541 /* check for trans_id overflow */
2542 if (journal->j_trans_id == 0)
2543 journal->j_trans_id = 10;
2544 journal->j_last_flush_trans_id =
2545 le32_to_cpu(jh->j_last_flush_trans_id);
2546 journal->j_mount_id = le32_to_cpu(jh->j_mount_id) + 1;
2548 journal->j_mount_id = newest_mount_id + 1;
2550 reiserfs_debug(sb, REISERFS_DEBUG_CODE, "journal-1299: Setting "
2551 "newest_mount_id to %lu", journal->j_mount_id);
2552 journal->j_first_unflushed_offset = journal->j_start;
2553 if (replay_count > 0) {
2555 "replayed %d transactions in %lu seconds\n",
2556 replay_count, get_seconds() - start);
2558 /* needed to satisfy the locking in _update_journal_header_block */
2559 reiserfs_write_lock(sb);
2560 if (!bdev_read_only(sb->s_bdev) &&
2561 _update_journal_header_block(sb, journal->j_start,
2562 journal->j_last_flush_trans_id)) {
2563 reiserfs_write_unlock(sb);
2565 * replay failed, caller must call free_journal_ram and abort
2570 reiserfs_write_unlock(sb);
2574 static struct reiserfs_journal_list *alloc_journal_list(struct super_block *s)
2576 struct reiserfs_journal_list *jl;
2577 jl = kzalloc(sizeof(struct reiserfs_journal_list),
2578 GFP_NOFS | __GFP_NOFAIL);
2579 INIT_LIST_HEAD(&jl->j_list);
2580 INIT_LIST_HEAD(&jl->j_working_list);
2581 INIT_LIST_HEAD(&jl->j_tail_bh_list);
2582 INIT_LIST_HEAD(&jl->j_bh_list);
2583 mutex_init(&jl->j_commit_mutex);
2584 SB_JOURNAL(s)->j_num_lists++;
2585 get_journal_list(jl);
2589 static void journal_list_init(struct super_block *sb)
2591 SB_JOURNAL(sb)->j_current_jl = alloc_journal_list(sb);
2594 static void release_journal_dev(struct super_block *super,
2595 struct reiserfs_journal *journal)
2597 if (journal->j_dev_bd != NULL) {
2598 blkdev_put(journal->j_dev_bd, journal->j_dev_mode);
2599 journal->j_dev_bd = NULL;
2603 static int journal_init_dev(struct super_block *super,
2604 struct reiserfs_journal *journal,
2605 const char *jdev_name)
2609 fmode_t blkdev_mode = FMODE_READ | FMODE_WRITE | FMODE_EXCL;
2610 char b[BDEVNAME_SIZE];
2614 journal->j_dev_bd = NULL;
2615 jdev = SB_ONDISK_JOURNAL_DEVICE(super) ?
2616 new_decode_dev(SB_ONDISK_JOURNAL_DEVICE(super)) : super->s_dev;
2618 if (bdev_read_only(super->s_bdev))
2619 blkdev_mode = FMODE_READ;
2621 /* there is no "jdev" option and journal is on separate device */
2622 if ((!jdev_name || !jdev_name[0])) {
2623 if (jdev == super->s_dev)
2624 blkdev_mode &= ~FMODE_EXCL;
2625 journal->j_dev_bd = blkdev_get_by_dev(jdev, blkdev_mode,
2627 journal->j_dev_mode = blkdev_mode;
2628 if (IS_ERR(journal->j_dev_bd)) {
2629 result = PTR_ERR(journal->j_dev_bd);
2630 journal->j_dev_bd = NULL;
2631 reiserfs_warning(super, "sh-458",
2632 "cannot init journal device '%s': %i",
2633 __bdevname(jdev, b), result);
2635 } else if (jdev != super->s_dev)
2636 set_blocksize(journal->j_dev_bd, super->s_blocksize);
2641 journal->j_dev_mode = blkdev_mode;
2642 journal->j_dev_bd = blkdev_get_by_path(jdev_name, blkdev_mode, journal);
2643 if (IS_ERR(journal->j_dev_bd)) {
2644 result = PTR_ERR(journal->j_dev_bd);
2645 journal->j_dev_bd = NULL;
2646 reiserfs_warning(super, "sh-457",
2647 "journal_init_dev: Cannot open '%s': %i",
2652 set_blocksize(journal->j_dev_bd, super->s_blocksize);
2653 reiserfs_info(super,
2654 "journal_init_dev: journal device: %pg\n",
2660 * When creating/tuning a file system user can assign some
2661 * journal params within boundaries which depend on the ratio
2662 * blocksize/standard_blocksize.
2664 * For blocks >= standard_blocksize transaction size should
2665 * be not less then JOURNAL_TRANS_MIN_DEFAULT, and not more
2666 * then JOURNAL_TRANS_MAX_DEFAULT.
2668 * For blocks < standard_blocksize these boundaries should be
2669 * decreased proportionally.
2671 #define REISERFS_STANDARD_BLKSIZE (4096)
2673 static int check_advise_trans_params(struct super_block *sb,
2674 struct reiserfs_journal *journal)
2676 if (journal->j_trans_max) {
2677 /* Non-default journal params. Do sanity check for them. */
2679 if (sb->s_blocksize < REISERFS_STANDARD_BLKSIZE)
2680 ratio = REISERFS_STANDARD_BLKSIZE / sb->s_blocksize;
2682 if (journal->j_trans_max > JOURNAL_TRANS_MAX_DEFAULT / ratio ||
2683 journal->j_trans_max < JOURNAL_TRANS_MIN_DEFAULT / ratio ||
2684 SB_ONDISK_JOURNAL_SIZE(sb) / journal->j_trans_max <
2685 JOURNAL_MIN_RATIO) {
2686 reiserfs_warning(sb, "sh-462",
2687 "bad transaction max size (%u). "
2688 "FSCK?", journal->j_trans_max);
2691 if (journal->j_max_batch != (journal->j_trans_max) *
2692 JOURNAL_MAX_BATCH_DEFAULT/JOURNAL_TRANS_MAX_DEFAULT) {
2693 reiserfs_warning(sb, "sh-463",
2694 "bad transaction max batch (%u). "
2695 "FSCK?", journal->j_max_batch);
2700 * Default journal params.
2701 * The file system was created by old version
2702 * of mkreiserfs, so some fields contain zeros,
2703 * and we need to advise proper values for them
2705 if (sb->s_blocksize != REISERFS_STANDARD_BLKSIZE) {
2706 reiserfs_warning(sb, "sh-464", "bad blocksize (%u)",
2710 journal->j_trans_max = JOURNAL_TRANS_MAX_DEFAULT;
2711 journal->j_max_batch = JOURNAL_MAX_BATCH_DEFAULT;
2712 journal->j_max_commit_age = JOURNAL_MAX_COMMIT_AGE;
2717 /* must be called once on fs mount. calls journal_read for you */
2718 int journal_init(struct super_block *sb, const char *j_dev_name,
2719 int old_format, unsigned int commit_max_age)
2721 int num_cnodes = SB_ONDISK_JOURNAL_SIZE(sb) * 2;
2722 struct buffer_head *bhjh;
2723 struct reiserfs_super_block *rs;
2724 struct reiserfs_journal_header *jh;
2725 struct reiserfs_journal *journal;
2726 struct reiserfs_journal_list *jl;
2729 journal = SB_JOURNAL(sb) = vzalloc(sizeof(struct reiserfs_journal));
2731 reiserfs_warning(sb, "journal-1256",
2732 "unable to get memory for journal structure");
2735 INIT_LIST_HEAD(&journal->j_bitmap_nodes);
2736 INIT_LIST_HEAD(&journal->j_prealloc_list);
2737 INIT_LIST_HEAD(&journal->j_working_list);
2738 INIT_LIST_HEAD(&journal->j_journal_list);
2739 journal->j_persistent_trans = 0;
2740 if (reiserfs_allocate_list_bitmaps(sb, journal->j_list_bitmap,
2741 reiserfs_bmap_count(sb)))
2742 goto free_and_return;
2744 allocate_bitmap_nodes(sb);
2746 /* reserved for journal area support */
2747 SB_JOURNAL_1st_RESERVED_BLOCK(sb) = (old_format ?
2748 REISERFS_OLD_DISK_OFFSET_IN_BYTES
2750 reiserfs_bmap_count(sb) +
2752 REISERFS_DISK_OFFSET_IN_BYTES /
2753 sb->s_blocksize + 2);
2756 * Sanity check to see is the standard journal fitting
2757 * within first bitmap (actual for small blocksizes)
2759 if (!SB_ONDISK_JOURNAL_DEVICE(sb) &&
2760 (SB_JOURNAL_1st_RESERVED_BLOCK(sb) +
2761 SB_ONDISK_JOURNAL_SIZE(sb) > sb->s_blocksize * 8)) {
2762 reiserfs_warning(sb, "journal-1393",
2763 "journal does not fit for area addressed "
2764 "by first of bitmap blocks. It starts at "
2765 "%u and its size is %u. Block size %ld",
2766 SB_JOURNAL_1st_RESERVED_BLOCK(sb),
2767 SB_ONDISK_JOURNAL_SIZE(sb),
2769 goto free_and_return;
2773 * Sanity check to see if journal first block is correct.
2774 * If journal first block is invalid it can cause
2775 * zeroing important superblock members.
2777 if (!SB_ONDISK_JOURNAL_DEVICE(sb) &&
2778 SB_ONDISK_JOURNAL_1st_BLOCK(sb) < SB_JOURNAL_1st_RESERVED_BLOCK(sb)) {
2779 reiserfs_warning(sb, "journal-1393",
2780 "journal 1st super block is invalid: 1st reserved block %d, but actual 1st block is %d",
2781 SB_JOURNAL_1st_RESERVED_BLOCK(sb),
2782 SB_ONDISK_JOURNAL_1st_BLOCK(sb));
2783 goto free_and_return;
2786 if (journal_init_dev(sb, journal, j_dev_name) != 0) {
2787 reiserfs_warning(sb, "sh-462",
2788 "unable to initialize journal device");
2789 goto free_and_return;
2792 rs = SB_DISK_SUPER_BLOCK(sb);
2794 /* read journal header */
2795 bhjh = journal_bread(sb,
2796 SB_ONDISK_JOURNAL_1st_BLOCK(sb) +
2797 SB_ONDISK_JOURNAL_SIZE(sb));
2799 reiserfs_warning(sb, "sh-459",
2800 "unable to read journal header");
2801 goto free_and_return;
2803 jh = (struct reiserfs_journal_header *)(bhjh->b_data);
2805 /* make sure that journal matches to the super block */
2806 if (is_reiserfs_jr(rs)
2807 && (le32_to_cpu(jh->jh_journal.jp_journal_magic) !=
2808 sb_jp_journal_magic(rs))) {
2809 reiserfs_warning(sb, "sh-460",
2810 "journal header magic %x (device %pg) does "
2811 "not match to magic found in super block %x",
2812 jh->jh_journal.jp_journal_magic,
2814 sb_jp_journal_magic(rs));
2816 goto free_and_return;
2819 journal->j_trans_max = le32_to_cpu(jh->jh_journal.jp_journal_trans_max);
2820 journal->j_max_batch = le32_to_cpu(jh->jh_journal.jp_journal_max_batch);
2821 journal->j_max_commit_age =
2822 le32_to_cpu(jh->jh_journal.jp_journal_max_commit_age);
2823 journal->j_max_trans_age = JOURNAL_MAX_TRANS_AGE;
2825 if (check_advise_trans_params(sb, journal) != 0)
2826 goto free_and_return;
2827 journal->j_default_max_commit_age = journal->j_max_commit_age;
2829 if (commit_max_age != 0) {
2830 journal->j_max_commit_age = commit_max_age;
2831 journal->j_max_trans_age = commit_max_age;
2834 reiserfs_info(sb, "journal params: device %pg, size %u, "
2835 "journal first block %u, max trans len %u, max batch %u, "
2836 "max commit age %u, max trans age %u\n",
2838 SB_ONDISK_JOURNAL_SIZE(sb),
2839 SB_ONDISK_JOURNAL_1st_BLOCK(sb),
2840 journal->j_trans_max,
2841 journal->j_max_batch,
2842 journal->j_max_commit_age, journal->j_max_trans_age);
2846 journal->j_list_bitmap_index = 0;
2847 journal_list_init(sb);
2849 memset(journal->j_list_hash_table, 0,
2850 JOURNAL_HASH_SIZE * sizeof(struct reiserfs_journal_cnode *));
2852 INIT_LIST_HEAD(&journal->j_dirty_buffers);
2853 spin_lock_init(&journal->j_dirty_buffers_lock);
2855 journal->j_start = 0;
2857 journal->j_len_alloc = 0;
2858 atomic_set(&journal->j_wcount, 0);
2859 atomic_set(&journal->j_async_throttle, 0);
2860 journal->j_bcount = 0;
2861 journal->j_trans_start_time = 0;
2862 journal->j_last = NULL;
2863 journal->j_first = NULL;
2864 init_waitqueue_head(&journal->j_join_wait);
2865 mutex_init(&journal->j_mutex);
2866 mutex_init(&journal->j_flush_mutex);
2868 journal->j_trans_id = 10;
2869 journal->j_mount_id = 10;
2870 journal->j_state = 0;
2871 atomic_set(&journal->j_jlock, 0);
2872 journal->j_cnode_free_list = allocate_cnodes(num_cnodes);
2873 journal->j_cnode_free_orig = journal->j_cnode_free_list;
2874 journal->j_cnode_free = journal->j_cnode_free_list ? num_cnodes : 0;
2875 journal->j_cnode_used = 0;
2876 journal->j_must_wait = 0;
2878 if (journal->j_cnode_free == 0) {
2879 reiserfs_warning(sb, "journal-2004", "Journal cnode memory "
2880 "allocation failed (%ld bytes). Journal is "
2881 "too large for available memory. Usually "
2882 "this is due to a journal that is too large.",
2883 sizeof (struct reiserfs_journal_cnode) * num_cnodes);
2884 goto free_and_return;
2887 init_journal_hash(sb);
2888 jl = journal->j_current_jl;
2891 * get_list_bitmap() may call flush_commit_list() which
2892 * requires the lock. Calling flush_commit_list() shouldn't happen
2893 * this early but I like to be paranoid.
2895 reiserfs_write_lock(sb);
2896 jl->j_list_bitmap = get_list_bitmap(sb, jl);
2897 reiserfs_write_unlock(sb);
2898 if (!jl->j_list_bitmap) {
2899 reiserfs_warning(sb, "journal-2005",
2900 "get_list_bitmap failed for journal list 0");
2901 goto free_and_return;
2904 ret = journal_read(sb);
2906 reiserfs_warning(sb, "reiserfs-2006",
2907 "Replay Failure, unable to mount");
2908 goto free_and_return;
2911 INIT_DELAYED_WORK(&journal->j_work, flush_async_commits);
2912 journal->j_work_sb = sb;
2915 free_journal_ram(sb);
2920 * test for a polite end of the current transaction. Used by file_write,
2921 * and should be used by delete to make sure they don't write more than
2922 * can fit inside a single transaction
2924 int journal_transaction_should_end(struct reiserfs_transaction_handle *th,
2927 struct reiserfs_journal *journal = SB_JOURNAL(th->t_super);
2928 time_t now = get_seconds();
2929 /* cannot restart while nested */
2930 BUG_ON(!th->t_trans_id);
2931 if (th->t_refcount > 1)
2933 if (journal->j_must_wait > 0 ||
2934 (journal->j_len_alloc + new_alloc) >= journal->j_max_batch ||
2935 atomic_read(&journal->j_jlock) ||
2936 (now - journal->j_trans_start_time) > journal->j_max_trans_age ||
2937 journal->j_cnode_free < (journal->j_trans_max * 3)) {
2941 journal->j_len_alloc += new_alloc;
2942 th->t_blocks_allocated += new_alloc ;
2946 /* this must be called inside a transaction */
2947 void reiserfs_block_writes(struct reiserfs_transaction_handle *th)
2949 struct reiserfs_journal *journal = SB_JOURNAL(th->t_super);
2950 BUG_ON(!th->t_trans_id);
2951 journal->j_must_wait = 1;
2952 set_bit(J_WRITERS_BLOCKED, &journal->j_state);
2956 /* this must be called without a transaction started */
2957 void reiserfs_allow_writes(struct super_block *s)
2959 struct reiserfs_journal *journal = SB_JOURNAL(s);
2960 clear_bit(J_WRITERS_BLOCKED, &journal->j_state);
2961 wake_up(&journal->j_join_wait);
2964 /* this must be called without a transaction started */
2965 void reiserfs_wait_on_write_block(struct super_block *s)
2967 struct reiserfs_journal *journal = SB_JOURNAL(s);
2968 wait_event(journal->j_join_wait,
2969 !test_bit(J_WRITERS_BLOCKED, &journal->j_state));
2972 static void queue_log_writer(struct super_block *s)
2974 wait_queue_entry_t wait;
2975 struct reiserfs_journal *journal = SB_JOURNAL(s);
2976 set_bit(J_WRITERS_QUEUED, &journal->j_state);
2979 * we don't want to use wait_event here because
2980 * we only want to wait once.
2982 init_waitqueue_entry(&wait, current);
2983 add_wait_queue(&journal->j_join_wait, &wait);
2984 set_current_state(TASK_UNINTERRUPTIBLE);
2985 if (test_bit(J_WRITERS_QUEUED, &journal->j_state)) {
2986 int depth = reiserfs_write_unlock_nested(s);
2988 reiserfs_write_lock_nested(s, depth);
2990 __set_current_state(TASK_RUNNING);
2991 remove_wait_queue(&journal->j_join_wait, &wait);
2994 static void wake_queued_writers(struct super_block *s)
2996 struct reiserfs_journal *journal = SB_JOURNAL(s);
2997 if (test_and_clear_bit(J_WRITERS_QUEUED, &journal->j_state))
2998 wake_up(&journal->j_join_wait);
3001 static void let_transaction_grow(struct super_block *sb, unsigned int trans_id)
3003 struct reiserfs_journal *journal = SB_JOURNAL(sb);
3004 unsigned long bcount = journal->j_bcount;
3008 depth = reiserfs_write_unlock_nested(sb);
3009 schedule_timeout_uninterruptible(1);
3010 reiserfs_write_lock_nested(sb, depth);
3012 journal->j_current_jl->j_state |= LIST_COMMIT_PENDING;
3013 while ((atomic_read(&journal->j_wcount) > 0 ||
3014 atomic_read(&journal->j_jlock)) &&
3015 journal->j_trans_id == trans_id) {
3016 queue_log_writer(sb);
3018 if (journal->j_trans_id != trans_id)
3020 if (bcount == journal->j_bcount)
3022 bcount = journal->j_bcount;
3027 * join == true if you must join an existing transaction.
3028 * join == false if you can deal with waiting for others to finish
3030 * this will block until the transaction is joinable. send the number of
3031 * blocks you expect to use in nblocks.
3033 static int do_journal_begin_r(struct reiserfs_transaction_handle *th,
3034 struct super_block *sb, unsigned long nblocks,
3037 time_t now = get_seconds();
3038 unsigned int old_trans_id;
3039 struct reiserfs_journal *journal = SB_JOURNAL(sb);
3040 struct reiserfs_transaction_handle myth;
3041 int sched_count = 0;
3045 reiserfs_check_lock_depth(sb, "journal_begin");
3046 BUG_ON(nblocks > journal->j_trans_max);
3048 PROC_INFO_INC(sb, journal.journal_being);
3049 /* set here for journal_join */
3055 if (join != JBEGIN_ABORT && reiserfs_is_journal_aborted(journal)) {
3057 retval = journal->j_errno;
3060 journal->j_bcount++;
3062 if (test_bit(J_WRITERS_BLOCKED, &journal->j_state)) {
3064 depth = reiserfs_write_unlock_nested(sb);
3065 reiserfs_wait_on_write_block(sb);
3066 reiserfs_write_lock_nested(sb, depth);
3067 PROC_INFO_INC(sb, journal.journal_relock_writers);
3070 now = get_seconds();
3073 * if there is no room in the journal OR
3074 * if this transaction is too old, and we weren't called joinable,
3075 * wait for it to finish before beginning we don't sleep if there
3076 * aren't other writers
3079 if ((!join && journal->j_must_wait > 0) ||
3081 && (journal->j_len_alloc + nblocks + 2) >= journal->j_max_batch)
3082 || (!join && atomic_read(&journal->j_wcount) > 0
3083 && journal->j_trans_start_time > 0
3084 && (now - journal->j_trans_start_time) >
3085 journal->j_max_trans_age) || (!join
3086 && atomic_read(&journal->j_jlock))
3087 || (!join && journal->j_cnode_free < (journal->j_trans_max * 3))) {
3089 old_trans_id = journal->j_trans_id;
3090 /* allow others to finish this transaction */
3093 if (!join && (journal->j_len_alloc + nblocks + 2) >=
3094 journal->j_max_batch &&
3095 ((journal->j_len + nblocks + 2) * 100) <
3096 (journal->j_len_alloc * 75)) {
3097 if (atomic_read(&journal->j_wcount) > 10) {
3099 queue_log_writer(sb);
3104 * don't mess with joining the transaction if all we
3105 * have to do is wait for someone else to do a commit
3107 if (atomic_read(&journal->j_jlock)) {
3108 while (journal->j_trans_id == old_trans_id &&
3109 atomic_read(&journal->j_jlock)) {
3110 queue_log_writer(sb);
3114 retval = journal_join(&myth, sb);
3118 /* someone might have ended the transaction while we joined */
3119 if (old_trans_id != journal->j_trans_id) {
3120 retval = do_journal_end(&myth, 0);
3122 retval = do_journal_end(&myth, COMMIT_NOW);
3128 PROC_INFO_INC(sb, journal.journal_relock_wcount);
3131 /* we are the first writer, set trans_id */
3132 if (journal->j_trans_start_time == 0) {
3133 journal->j_trans_start_time = get_seconds();
3135 atomic_inc(&journal->j_wcount);
3136 journal->j_len_alloc += nblocks;
3137 th->t_blocks_logged = 0;
3138 th->t_blocks_allocated = nblocks;
3139 th->t_trans_id = journal->j_trans_id;
3141 INIT_LIST_HEAD(&th->t_list);
3145 memset(th, 0, sizeof(*th));
3147 * Re-set th->t_super, so we can properly keep track of how many
3148 * persistent transactions there are. We need to do this so if this
3149 * call is part of a failed restart_transaction, we can free it later
3155 struct reiserfs_transaction_handle *reiserfs_persistent_transaction(struct
3161 struct reiserfs_transaction_handle *th;
3164 * if we're nesting into an existing transaction. It will be
3165 * persistent on its own
3167 if (reiserfs_transaction_running(s)) {
3168 th = current->journal_info;
3170 BUG_ON(th->t_refcount < 2);
3174 th = kmalloc(sizeof(struct reiserfs_transaction_handle), GFP_NOFS);
3177 ret = journal_begin(th, s, nblocks);
3183 SB_JOURNAL(s)->j_persistent_trans++;
3187 int reiserfs_end_persistent_transaction(struct reiserfs_transaction_handle *th)
3189 struct super_block *s = th->t_super;
3192 ret = journal_end(th);
3195 if (th->t_refcount == 0) {
3196 SB_JOURNAL(s)->j_persistent_trans--;
3202 static int journal_join(struct reiserfs_transaction_handle *th,
3203 struct super_block *sb)
3205 struct reiserfs_transaction_handle *cur_th = current->journal_info;
3208 * this keeps do_journal_end from NULLing out the
3209 * current->journal_info pointer
3211 th->t_handle_save = cur_th;
3212 BUG_ON(cur_th && cur_th->t_refcount > 1);
3213 return do_journal_begin_r(th, sb, 1, JBEGIN_JOIN);
3216 int journal_join_abort(struct reiserfs_transaction_handle *th,
3217 struct super_block *sb)
3219 struct reiserfs_transaction_handle *cur_th = current->journal_info;
3222 * this keeps do_journal_end from NULLing out the
3223 * current->journal_info pointer
3225 th->t_handle_save = cur_th;
3226 BUG_ON(cur_th && cur_th->t_refcount > 1);
3227 return do_journal_begin_r(th, sb, 1, JBEGIN_ABORT);
3230 int journal_begin(struct reiserfs_transaction_handle *th,
3231 struct super_block *sb, unsigned long nblocks)
3233 struct reiserfs_transaction_handle *cur_th = current->journal_info;
3236 th->t_handle_save = NULL;
3238 /* we are nesting into the current transaction */
3239 if (cur_th->t_super == sb) {
3240 BUG_ON(!cur_th->t_refcount);
3241 cur_th->t_refcount++;
3242 memcpy(th, cur_th, sizeof(*th));
3243 if (th->t_refcount <= 1)
3244 reiserfs_warning(sb, "reiserfs-2005",
3245 "BAD: refcount <= 1, but "
3246 "journal_info != 0");
3250 * we've ended up with a handle from a different
3251 * filesystem. save it and restore on journal_end.
3252 * This should never really happen...
3254 reiserfs_warning(sb, "clm-2100",
3255 "nesting info a different FS");
3256 th->t_handle_save = current->journal_info;
3257 current->journal_info = th;
3260 current->journal_info = th;
3262 ret = do_journal_begin_r(th, sb, nblocks, JBEGIN_REG);
3263 BUG_ON(current->journal_info != th);
3266 * I guess this boils down to being the reciprocal of clm-2100 above.
3267 * If do_journal_begin_r fails, we need to put it back, since
3268 * journal_end won't be called to do it. */
3270 current->journal_info = th->t_handle_save;
3272 BUG_ON(!th->t_refcount);
3278 * puts bh into the current transaction. If it was already there, reorders
3279 * removes the old pointers from the hash, and puts new ones in (to make
3280 * sure replay happen in the right order).
3282 * if it was dirty, cleans and files onto the clean list. I can't let it
3283 * be dirty again until the transaction is committed.
3285 * if j_len, is bigger than j_len_alloc, it pushes j_len_alloc to 10 + j_len.
3287 int journal_mark_dirty(struct reiserfs_transaction_handle *th,
3288 struct buffer_head *bh)
3290 struct super_block *sb = th->t_super;
3291 struct reiserfs_journal *journal = SB_JOURNAL(sb);
3292 struct reiserfs_journal_cnode *cn = NULL;
3293 int count_already_incd = 0;
3295 BUG_ON(!th->t_trans_id);
3297 PROC_INFO_INC(sb, journal.mark_dirty);
3298 if (th->t_trans_id != journal->j_trans_id) {
3299 reiserfs_panic(th->t_super, "journal-1577",
3300 "handle trans id %ld != current trans id %ld",
3301 th->t_trans_id, journal->j_trans_id);
3304 prepared = test_clear_buffer_journal_prepared(bh);
3305 clear_buffer_journal_restore_dirty(bh);
3306 /* already in this transaction, we are done */
3307 if (buffer_journaled(bh)) {
3308 PROC_INFO_INC(sb, journal.mark_dirty_already);
3313 * this must be turned into a panic instead of a warning. We can't
3314 * allow a dirty or journal_dirty or locked buffer to be logged, as
3315 * some changes could get to disk too early. NOT GOOD.
3317 if (!prepared || buffer_dirty(bh)) {
3318 reiserfs_warning(sb, "journal-1777",
3319 "buffer %llu bad state "
3320 "%cPREPARED %cLOCKED %cDIRTY %cJDIRTY_WAIT",
3321 (unsigned long long)bh->b_blocknr,
3322 prepared ? ' ' : '!',
3323 buffer_locked(bh) ? ' ' : '!',
3324 buffer_dirty(bh) ? ' ' : '!',
3325 buffer_journal_dirty(bh) ? ' ' : '!');
3328 if (atomic_read(&journal->j_wcount) <= 0) {
3329 reiserfs_warning(sb, "journal-1409",
3330 "returning because j_wcount was %d",
3331 atomic_read(&journal->j_wcount));
3335 * this error means I've screwed up, and we've overflowed
3336 * the transaction. Nothing can be done here, except make the
3337 * FS readonly or panic.
3339 if (journal->j_len >= journal->j_trans_max) {
3340 reiserfs_panic(th->t_super, "journal-1413",
3341 "j_len (%lu) is too big",
3345 if (buffer_journal_dirty(bh)) {
3346 count_already_incd = 1;
3347 PROC_INFO_INC(sb, journal.mark_dirty_notjournal);
3348 clear_buffer_journal_dirty(bh);
3351 if (journal->j_len > journal->j_len_alloc) {
3352 journal->j_len_alloc = journal->j_len + JOURNAL_PER_BALANCE_CNT;
3355 set_buffer_journaled(bh);
3357 /* now put this guy on the end */
3361 reiserfs_panic(sb, "journal-4", "get_cnode failed!");
3364 if (th->t_blocks_logged == th->t_blocks_allocated) {
3365 th->t_blocks_allocated += JOURNAL_PER_BALANCE_CNT;
3366 journal->j_len_alloc += JOURNAL_PER_BALANCE_CNT;
3368 th->t_blocks_logged++;
3372 cn->blocknr = bh->b_blocknr;
3375 insert_journal_hash(journal->j_hash_table, cn);
3376 if (!count_already_incd) {
3381 cn->prev = journal->j_last;
3383 if (journal->j_last) {
3384 journal->j_last->next = cn;
3385 journal->j_last = cn;
3387 journal->j_first = cn;
3388 journal->j_last = cn;
3390 reiserfs_schedule_old_flush(sb);
3394 int journal_end(struct reiserfs_transaction_handle *th)
3396 struct super_block *sb = th->t_super;
3397 if (!current->journal_info && th->t_refcount > 1)
3398 reiserfs_warning(sb, "REISER-NESTING",
3399 "th NULL, refcount %d", th->t_refcount);
3401 if (!th->t_trans_id) {
3407 if (th->t_refcount > 0) {
3408 struct reiserfs_transaction_handle *cur_th =
3409 current->journal_info;
3412 * we aren't allowed to close a nested transaction on a
3413 * different filesystem from the one in the task struct
3415 BUG_ON(cur_th->t_super != th->t_super);
3418 memcpy(current->journal_info, th, sizeof(*th));
3423 return do_journal_end(th, 0);
3428 * removes from the current transaction, relsing and descrementing any counters.
3429 * also files the removed buffer directly onto the clean list
3431 * called by journal_mark_freed when a block has been deleted
3433 * returns 1 if it cleaned and relsed the buffer. 0 otherwise
3435 static int remove_from_transaction(struct super_block *sb,
3436 b_blocknr_t blocknr, int already_cleaned)
3438 struct buffer_head *bh;
3439 struct reiserfs_journal_cnode *cn;
3440 struct reiserfs_journal *journal = SB_JOURNAL(sb);
3443 cn = get_journal_hash_dev(sb, journal->j_hash_table, blocknr);
3444 if (!cn || !cn->bh) {
3449 cn->prev->next = cn->next;
3452 cn->next->prev = cn->prev;
3454 if (cn == journal->j_first) {
3455 journal->j_first = cn->next;
3457 if (cn == journal->j_last) {
3458 journal->j_last = cn->prev;
3461 remove_journal_hash(sb, journal->j_hash_table, NULL,
3463 clear_buffer_journaled(bh); /* don't log this one */
3465 if (!already_cleaned) {
3466 clear_buffer_journal_dirty(bh);
3467 clear_buffer_dirty(bh);
3468 clear_buffer_journal_test(bh);
3470 if (atomic_read(&bh->b_count) < 0) {
3471 reiserfs_warning(sb, "journal-1752",
3477 journal->j_len_alloc--;
3483 * for any cnode in a journal list, it can only be dirtied of all the
3484 * transactions that include it are committed to disk.
3485 * this checks through each transaction, and returns 1 if you are allowed
3486 * to dirty, and 0 if you aren't
3488 * it is called by dirty_journal_list, which is called after
3489 * flush_commit_list has gotten all the log blocks for a given
3490 * transaction on disk
3493 static int can_dirty(struct reiserfs_journal_cnode *cn)
3495 struct super_block *sb = cn->sb;
3496 b_blocknr_t blocknr = cn->blocknr;
3497 struct reiserfs_journal_cnode *cur = cn->hprev;
3501 * first test hprev. These are all newer than cn, so any node here
3502 * with the same block number and dev means this node can't be sent
3503 * to disk right now.
3505 while (cur && can_dirty) {
3506 if (cur->jlist && cur->bh && cur->blocknr && cur->sb == sb &&
3507 cur->blocknr == blocknr) {
3513 * then test hnext. These are all older than cn. As long as they
3514 * are committed to the log, it is safe to write cn to disk
3517 while (cur && can_dirty) {
3518 if (cur->jlist && cur->jlist->j_len > 0 &&
3519 atomic_read(&cur->jlist->j_commit_left) > 0 && cur->bh &&
3520 cur->blocknr && cur->sb == sb && cur->blocknr == blocknr) {
3529 * syncs the commit blocks, but does not force the real buffers to disk
3530 * will wait until the current transaction is done/committed before returning
3532 int journal_end_sync(struct reiserfs_transaction_handle *th)
3534 struct super_block *sb = th->t_super;
3535 struct reiserfs_journal *journal = SB_JOURNAL(sb);
3537 BUG_ON(!th->t_trans_id);
3538 /* you can sync while nested, very, very bad */
3539 BUG_ON(th->t_refcount > 1);
3540 if (journal->j_len == 0) {
3541 reiserfs_prepare_for_journal(sb, SB_BUFFER_WITH_SB(sb),
3543 journal_mark_dirty(th, SB_BUFFER_WITH_SB(sb));
3545 return do_journal_end(th, COMMIT_NOW | WAIT);
3548 /* writeback the pending async commits to disk */
3549 static void flush_async_commits(struct work_struct *work)
3551 struct reiserfs_journal *journal =
3552 container_of(work, struct reiserfs_journal, j_work.work);
3553 struct super_block *sb = journal->j_work_sb;
3554 struct reiserfs_journal_list *jl;
3555 struct list_head *entry;
3557 reiserfs_write_lock(sb);
3558 if (!list_empty(&journal->j_journal_list)) {
3559 /* last entry is the youngest, commit it and you get everything */
3560 entry = journal->j_journal_list.prev;
3561 jl = JOURNAL_LIST_ENTRY(entry);
3562 flush_commit_list(sb, jl, 1);
3564 reiserfs_write_unlock(sb);
3568 * flushes any old transactions to disk
3569 * ends the current transaction if it is too old
3571 void reiserfs_flush_old_commits(struct super_block *sb)
3574 struct reiserfs_transaction_handle th;
3575 struct reiserfs_journal *journal = SB_JOURNAL(sb);
3577 now = get_seconds();
3579 * safety check so we don't flush while we are replaying the log during
3582 if (list_empty(&journal->j_journal_list))
3586 * check the current transaction. If there are no writers, and it is
3587 * too old, finish it, and force the commit blocks to disk
3589 if (atomic_read(&journal->j_wcount) <= 0 &&
3590 journal->j_trans_start_time > 0 &&
3591 journal->j_len > 0 &&
3592 (now - journal->j_trans_start_time) > journal->j_max_trans_age) {
3593 if (!journal_join(&th, sb)) {
3594 reiserfs_prepare_for_journal(sb,
3595 SB_BUFFER_WITH_SB(sb),
3597 journal_mark_dirty(&th, SB_BUFFER_WITH_SB(sb));
3600 * we're only being called from kreiserfsd, it makes
3601 * no sense to do an async commit so that kreiserfsd
3604 do_journal_end(&th, COMMIT_NOW | WAIT);
3610 * returns 0 if do_journal_end should return right away, returns 1 if
3611 * do_journal_end should finish the commit
3613 * if the current transaction is too old, but still has writers, this will
3614 * wait on j_join_wait until all the writers are done. By the time it
3615 * wakes up, the transaction it was called has already ended, so it just
3616 * flushes the commit list and returns 0.
3618 * Won't batch when flush or commit_now is set. Also won't batch when
3619 * others are waiting on j_join_wait.
3621 * Note, we can't allow the journal_end to proceed while there are still
3622 * writers in the log.
3624 static int check_journal_end(struct reiserfs_transaction_handle *th, int flags)
3628 int flush = flags & FLUSH_ALL;
3629 int commit_now = flags & COMMIT_NOW;
3630 int wait_on_commit = flags & WAIT;
3631 struct reiserfs_journal_list *jl;
3632 struct super_block *sb = th->t_super;
3633 struct reiserfs_journal *journal = SB_JOURNAL(sb);
3635 BUG_ON(!th->t_trans_id);
3637 if (th->t_trans_id != journal->j_trans_id) {
3638 reiserfs_panic(th->t_super, "journal-1577",
3639 "handle trans id %ld != current trans id %ld",
3640 th->t_trans_id, journal->j_trans_id);
3643 journal->j_len_alloc -= (th->t_blocks_allocated - th->t_blocks_logged);
3644 /* <= 0 is allowed. unmounting might not call begin */
3645 if (atomic_read(&journal->j_wcount) > 0)
3646 atomic_dec(&journal->j_wcount);
3649 * BUG, deal with case where j_len is 0, but people previously
3650 * freed blocks need to be released will be dealt with by next
3651 * transaction that actually writes something, but should be taken
3652 * care of in this trans
3654 BUG_ON(journal->j_len == 0);
3657 * if wcount > 0, and we are called to with flush or commit_now,
3658 * we wait on j_join_wait. We will wake up when the last writer has
3659 * finished the transaction, and started it on its way to the disk.
3660 * Then, we flush the commit or journal list, and just return 0
3661 * because the rest of journal end was already done for this
3664 if (atomic_read(&journal->j_wcount) > 0) {
3665 if (flush || commit_now) {
3668 jl = journal->j_current_jl;
3669 trans_id = jl->j_trans_id;
3671 jl->j_state |= LIST_COMMIT_PENDING;
3672 atomic_set(&journal->j_jlock, 1);
3674 journal->j_next_full_flush = 1;
3679 * sleep while the current transaction is
3682 while (journal->j_trans_id == trans_id) {
3683 if (atomic_read(&journal->j_jlock)) {
3684 queue_log_writer(sb);
3687 if (journal->j_trans_id == trans_id) {
3688 atomic_set(&journal->j_jlock,
3694 BUG_ON(journal->j_trans_id == trans_id);
3697 && journal_list_still_alive(sb, trans_id)
3698 && wait_on_commit) {
3699 flush_commit_list(sb, jl, 1);
3707 /* deal with old transactions where we are the last writers */
3708 now = get_seconds();
3709 if ((now - journal->j_trans_start_time) > journal->j_max_trans_age) {
3711 journal->j_next_async_flush = 1;
3713 /* don't batch when someone is waiting on j_join_wait */
3714 /* don't batch when syncing the commit or flushing the whole trans */
3715 if (!(journal->j_must_wait > 0) && !(atomic_read(&journal->j_jlock))
3716 && !flush && !commit_now && (journal->j_len < journal->j_max_batch)
3717 && journal->j_len_alloc < journal->j_max_batch
3718 && journal->j_cnode_free > (journal->j_trans_max * 3)) {
3719 journal->j_bcount++;
3724 if (journal->j_start > SB_ONDISK_JOURNAL_SIZE(sb)) {
3725 reiserfs_panic(sb, "journal-003",
3726 "j_start (%ld) is too high",
3733 * Does all the work that makes deleting blocks safe.
3734 * when deleting a block mark BH_JNew, just remove it from the current
3735 * transaction, clean it's buffer_head and move on.
3738 * set a bit for the block in the journal bitmap. That will prevent it from
3739 * being allocated for unformatted nodes before this transaction has finished.
3741 * mark any cnodes for this block as BLOCK_FREED, and clear their bh pointers.
3742 * That will prevent any old transactions with this block from trying to flush
3743 * to the real location. Since we aren't removing the cnode from the
3744 * journal_list_hash, *the block can't be reallocated yet.
3746 * Then remove it from the current transaction, decrementing any counters and
3747 * filing it on the clean list.
3749 int journal_mark_freed(struct reiserfs_transaction_handle *th,
3750 struct super_block *sb, b_blocknr_t blocknr)
3752 struct reiserfs_journal *journal = SB_JOURNAL(sb);
3753 struct reiserfs_journal_cnode *cn = NULL;
3754 struct buffer_head *bh = NULL;
3755 struct reiserfs_list_bitmap *jb = NULL;
3757 BUG_ON(!th->t_trans_id);
3759 cn = get_journal_hash_dev(sb, journal->j_hash_table, blocknr);
3764 /* if it is journal new, we just remove it from this transaction */
3765 if (bh && buffer_journal_new(bh)) {
3766 clear_buffer_journal_new(bh);
3767 clear_prepared_bits(bh);
3768 reiserfs_clean_and_file_buffer(bh);
3769 cleaned = remove_from_transaction(sb, blocknr, cleaned);
3772 * set the bit for this block in the journal bitmap
3773 * for this transaction
3775 jb = journal->j_current_jl->j_list_bitmap;
3777 reiserfs_panic(sb, "journal-1702",
3778 "journal_list_bitmap is NULL");
3780 set_bit_in_list_bitmap(sb, blocknr, jb);
3782 /* Note, the entire while loop is not allowed to schedule. */
3785 clear_prepared_bits(bh);
3786 reiserfs_clean_and_file_buffer(bh);
3788 cleaned = remove_from_transaction(sb, blocknr, cleaned);
3791 * find all older transactions with this block,
3792 * make sure they don't try to write it out
3794 cn = get_journal_hash_dev(sb, journal->j_list_hash_table,
3797 if (sb == cn->sb && blocknr == cn->blocknr) {
3798 set_bit(BLOCK_FREED, &cn->state);
3801 * remove_from_transaction will brelse
3802 * the buffer if it was in the current
3806 clear_buffer_journal_dirty(cn->
3808 clear_buffer_dirty(cn->bh);
3809 clear_buffer_journal_test(cn->
3814 (&cn->bh->b_count) < 0) {
3815 reiserfs_warning(sb,
3817 "cn->bh->b_count < 0");
3821 * since we are clearing the bh,
3822 * we MUST dec nonzerolen
3825 atomic_dec(&cn->jlist->
3836 release_buffer_page(bh); /* get_hash grabs the buffer */
3840 void reiserfs_update_inode_transaction(struct inode *inode)
3842 struct reiserfs_journal *journal = SB_JOURNAL(inode->i_sb);
3843 REISERFS_I(inode)->i_jl = journal->j_current_jl;
3844 REISERFS_I(inode)->i_trans_id = journal->j_trans_id;
3848 * returns -1 on error, 0 if no commits/barriers were done and 1
3849 * if a transaction was actually committed and the barrier was done
3851 static int __commit_trans_jl(struct inode *inode, unsigned long id,
3852 struct reiserfs_journal_list *jl)
3854 struct reiserfs_transaction_handle th;
3855 struct super_block *sb = inode->i_sb;
3856 struct reiserfs_journal *journal = SB_JOURNAL(sb);
3860 * is it from the current transaction,
3861 * or from an unknown transaction?
3863 if (id == journal->j_trans_id) {
3864 jl = journal->j_current_jl;
3866 * try to let other writers come in and
3867 * grow this transaction
3869 let_transaction_grow(sb, id);
3870 if (journal->j_trans_id != id) {
3871 goto flush_commit_only;
3874 ret = journal_begin(&th, sb, 1);
3878 /* someone might have ended this transaction while we joined */
3879 if (journal->j_trans_id != id) {
3880 reiserfs_prepare_for_journal(sb, SB_BUFFER_WITH_SB(sb),
3882 journal_mark_dirty(&th, SB_BUFFER_WITH_SB(sb));
3883 ret = journal_end(&th);
3884 goto flush_commit_only;
3887 ret = journal_end_sync(&th);
3893 * this gets tricky, we have to make sure the journal list in
3894 * the inode still exists. We know the list is still around
3895 * if we've got a larger transaction id than the oldest list
3898 if (journal_list_still_alive(inode->i_sb, id)) {
3900 * we only set ret to 1 when we know for sure
3901 * the barrier hasn't been started yet on the commit
3904 if (atomic_read(&jl->j_commit_left) > 1)
3906 flush_commit_list(sb, jl, 1);
3907 if (journal->j_errno)
3908 ret = journal->j_errno;
3911 /* otherwise the list is gone, and long since committed */
3915 int reiserfs_commit_for_inode(struct inode *inode)
3917 unsigned int id = REISERFS_I(inode)->i_trans_id;
3918 struct reiserfs_journal_list *jl = REISERFS_I(inode)->i_jl;
3921 * for the whole inode, assume unset id means it was
3922 * changed in the current transaction. More conservative
3925 reiserfs_update_inode_transaction(inode);
3926 id = REISERFS_I(inode)->i_trans_id;
3927 /* jl will be updated in __commit_trans_jl */
3930 return __commit_trans_jl(inode, id, jl);
3933 void reiserfs_restore_prepared_buffer(struct super_block *sb,
3934 struct buffer_head *bh)
3936 struct reiserfs_journal *journal = SB_JOURNAL(sb);
3937 PROC_INFO_INC(sb, journal.restore_prepared);
3941 if (test_clear_buffer_journal_restore_dirty(bh) &&
3942 buffer_journal_dirty(bh)) {
3943 struct reiserfs_journal_cnode *cn;
3944 reiserfs_write_lock(sb);
3945 cn = get_journal_hash_dev(sb,
3946 journal->j_list_hash_table,
3948 if (cn && can_dirty(cn)) {
3949 set_buffer_journal_test(bh);
3950 mark_buffer_dirty(bh);
3952 reiserfs_write_unlock(sb);
3954 clear_buffer_journal_prepared(bh);
3957 extern struct tree_balance *cur_tb;
3959 * before we can change a metadata block, we have to make sure it won't
3960 * be written to disk while we are altering it. So, we must:
3964 int reiserfs_prepare_for_journal(struct super_block *sb,
3965 struct buffer_head *bh, int wait)
3967 PROC_INFO_INC(sb, journal.prepare);
3969 if (!trylock_buffer(bh)) {
3974 set_buffer_journal_prepared(bh);
3975 if (test_clear_buffer_dirty(bh) && buffer_journal_dirty(bh)) {
3976 clear_buffer_journal_test(bh);
3977 set_buffer_journal_restore_dirty(bh);
3984 * long and ugly. If flush, will not return until all commit
3985 * blocks and all real buffers in the trans are on disk.
3986 * If no_async, won't return until all commit blocks are on disk.
3988 * keep reading, there are comments as you go along
3990 * If the journal is aborted, we just clean up. Things like flushing
3991 * journal lists, etc just won't happen.
3993 static int do_journal_end(struct reiserfs_transaction_handle *th, int flags)
3995 struct super_block *sb = th->t_super;
3996 struct reiserfs_journal *journal = SB_JOURNAL(sb);
3997 struct reiserfs_journal_cnode *cn, *next, *jl_cn;
3998 struct reiserfs_journal_cnode *last_cn = NULL;
3999 struct reiserfs_journal_desc *desc;
4000 struct reiserfs_journal_commit *commit;
4001 struct buffer_head *c_bh; /* commit bh */
4002 struct buffer_head *d_bh; /* desc bh */
4003 int cur_write_start = 0; /* start index of current log write */
4008 struct reiserfs_journal_list *jl, *temp_jl;
4009 struct list_head *entry, *safe;
4010 unsigned long jindex;
4011 unsigned int commit_trans_id;
4015 BUG_ON(th->t_refcount > 1);
4016 BUG_ON(!th->t_trans_id);
4017 BUG_ON(!th->t_super);
4020 * protect flush_older_commits from doing mistakes if the
4021 * transaction ID counter gets overflowed.
4023 if (th->t_trans_id == ~0U)
4024 flags |= FLUSH_ALL | COMMIT_NOW | WAIT;
4025 flush = flags & FLUSH_ALL;
4026 wait_on_commit = flags & WAIT;
4028 current->journal_info = th->t_handle_save;
4029 reiserfs_check_lock_depth(sb, "journal end");
4030 if (journal->j_len == 0) {
4031 reiserfs_prepare_for_journal(sb, SB_BUFFER_WITH_SB(sb),
4033 journal_mark_dirty(th, SB_BUFFER_WITH_SB(sb));
4037 if (journal->j_next_full_flush) {
4041 if (journal->j_next_async_flush) {
4042 flags |= COMMIT_NOW | WAIT;
4047 * check_journal_end locks the journal, and unlocks if it does
4048 * not return 1 it tells us if we should continue with the
4049 * journal_end, or just return
4051 if (!check_journal_end(th, flags)) {
4052 reiserfs_schedule_old_flush(sb);
4053 wake_queued_writers(sb);
4054 reiserfs_async_progress_wait(sb);
4058 /* check_journal_end might set these, check again */
4059 if (journal->j_next_full_flush) {
4064 * j must wait means we have to flush the log blocks, and the
4065 * real blocks for this transaction
4067 if (journal->j_must_wait > 0) {
4070 #ifdef REISERFS_PREALLOCATE
4072 * quota ops might need to nest, setup the journal_info pointer
4073 * for them and raise the refcount so that it is > 0.
4075 current->journal_info = th;
4078 /* it should not involve new blocks into the transaction */
4079 reiserfs_discard_all_prealloc(th);
4082 current->journal_info = th->t_handle_save;
4085 /* setup description block */
4088 SB_ONDISK_JOURNAL_1st_BLOCK(sb) +
4090 set_buffer_uptodate(d_bh);
4091 desc = (struct reiserfs_journal_desc *)(d_bh)->b_data;
4092 memset(d_bh->b_data, 0, d_bh->b_size);
4093 memcpy(get_journal_desc_magic(d_bh), JOURNAL_DESC_MAGIC, 8);
4094 set_desc_trans_id(desc, journal->j_trans_id);
4097 * setup commit block. Don't write (keep it clean too) this one
4098 * until after everyone else is written
4100 c_bh = journal_getblk(sb, SB_ONDISK_JOURNAL_1st_BLOCK(sb) +
4101 ((journal->j_start + journal->j_len +
4102 1) % SB_ONDISK_JOURNAL_SIZE(sb)));
4103 commit = (struct reiserfs_journal_commit *)c_bh->b_data;
4104 memset(c_bh->b_data, 0, c_bh->b_size);
4105 set_commit_trans_id(commit, journal->j_trans_id);
4106 set_buffer_uptodate(c_bh);
4108 /* init this journal list */
4109 jl = journal->j_current_jl;
4112 * we lock the commit before doing anything because
4113 * we want to make sure nobody tries to run flush_commit_list until
4114 * the new transaction is fully setup, and we've already flushed the
4117 reiserfs_mutex_lock_safe(&jl->j_commit_mutex, sb);
4119 /* save the transaction id in case we need to commit it later */
4120 commit_trans_id = jl->j_trans_id;
4122 atomic_set(&jl->j_older_commits_done, 0);
4123 jl->j_trans_id = journal->j_trans_id;
4124 jl->j_timestamp = journal->j_trans_start_time;
4125 jl->j_commit_bh = c_bh;
4126 jl->j_start = journal->j_start;
4127 jl->j_len = journal->j_len;
4128 atomic_set(&jl->j_nonzerolen, journal->j_len);
4129 atomic_set(&jl->j_commit_left, journal->j_len + 2);
4130 jl->j_realblock = NULL;
4133 * The ENTIRE FOR LOOP MUST not cause schedule to occur.
4134 * for each real block, add it to the journal list hash,
4135 * copy into real block index array in the commit or desc block
4137 trans_half = journal_trans_half(sb->s_blocksize);
4138 for (i = 0, cn = journal->j_first; cn; cn = cn->next, i++) {
4139 if (buffer_journaled(cn->bh)) {
4140 jl_cn = get_cnode(sb);
4142 reiserfs_panic(sb, "journal-1676",
4143 "get_cnode returned NULL");
4146 jl->j_realblock = jl_cn;
4148 jl_cn->prev = last_cn;
4151 last_cn->next = jl_cn;
4155 * make sure the block we are trying to log
4156 * is not a block of journal or reserved area
4158 if (is_block_in_log_or_reserved_area
4159 (sb, cn->bh->b_blocknr)) {
4160 reiserfs_panic(sb, "journal-2332",
4161 "Trying to log block %lu, "
4162 "which is a log block",
4165 jl_cn->blocknr = cn->bh->b_blocknr;
4170 insert_journal_hash(journal->j_list_hash_table, jl_cn);
4171 if (i < trans_half) {
4172 desc->j_realblock[i] =
4173 cpu_to_le32(cn->bh->b_blocknr);
4175 commit->j_realblock[i - trans_half] =
4176 cpu_to_le32(cn->bh->b_blocknr);
4182 set_desc_trans_len(desc, journal->j_len);
4183 set_desc_mount_id(desc, journal->j_mount_id);
4184 set_desc_trans_id(desc, journal->j_trans_id);
4185 set_commit_trans_len(commit, journal->j_len);
4188 * special check in case all buffers in the journal
4189 * were marked for not logging
4191 BUG_ON(journal->j_len == 0);
4194 * we're about to dirty all the log blocks, mark the description block
4195 * dirty now too. Don't mark the commit block dirty until all the
4196 * others are on disk
4198 mark_buffer_dirty(d_bh);
4201 * first data block is j_start + 1, so add one to
4202 * cur_write_start wherever you use it
4204 cur_write_start = journal->j_start;
4205 cn = journal->j_first;
4206 jindex = 1; /* start at one so we don't get the desc again */
4208 clear_buffer_journal_new(cn->bh);
4209 /* copy all the real blocks into log area. dirty log blocks */
4210 if (buffer_journaled(cn->bh)) {
4211 struct buffer_head *tmp_bh;
4216 SB_ONDISK_JOURNAL_1st_BLOCK(sb) +
4219 SB_ONDISK_JOURNAL_SIZE(sb)));
4220 set_buffer_uptodate(tmp_bh);
4221 page = cn->bh->b_page;
4223 memcpy(tmp_bh->b_data,
4224 addr + offset_in_page(cn->bh->b_data),
4227 mark_buffer_dirty(tmp_bh);
4229 set_buffer_journal_dirty(cn->bh);
4230 clear_buffer_journaled(cn->bh);
4233 * JDirty cleared sometime during transaction.
4234 * don't log this one
4236 reiserfs_warning(sb, "journal-2048",
4237 "BAD, buffer in journal hash, "
4244 reiserfs_cond_resched(sb);
4248 * we are done with both the c_bh and d_bh, but
4249 * c_bh must be written after all other commit blocks,
4250 * so we dirty/relse c_bh in flush_commit_list, with commit_left <= 1.
4253 journal->j_current_jl = alloc_journal_list(sb);
4255 /* now it is safe to insert this transaction on the main list */
4256 list_add_tail(&jl->j_list, &journal->j_journal_list);
4257 list_add_tail(&jl->j_working_list, &journal->j_working_list);
4258 journal->j_num_work_lists++;
4260 /* reset journal values for the next transaction */
4261 old_start = journal->j_start;
4263 (journal->j_start + journal->j_len +
4264 2) % SB_ONDISK_JOURNAL_SIZE(sb);
4265 atomic_set(&journal->j_wcount, 0);
4266 journal->j_bcount = 0;
4267 journal->j_last = NULL;
4268 journal->j_first = NULL;
4270 journal->j_trans_start_time = 0;
4271 /* check for trans_id overflow */
4272 if (++journal->j_trans_id == 0)
4273 journal->j_trans_id = 10;
4274 journal->j_current_jl->j_trans_id = journal->j_trans_id;
4275 journal->j_must_wait = 0;
4276 journal->j_len_alloc = 0;
4277 journal->j_next_full_flush = 0;
4278 journal->j_next_async_flush = 0;
4279 init_journal_hash(sb);
4282 * make sure reiserfs_add_jh sees the new current_jl before we
4283 * write out the tails
4288 * tail conversion targets have to hit the disk before we end the
4289 * transaction. Otherwise a later transaction might repack the tail
4290 * before this transaction commits, leaving the data block unflushed
4291 * and clean, if we crash before the later transaction commits, the
4292 * data block is lost.
4294 if (!list_empty(&jl->j_tail_bh_list)) {
4295 depth = reiserfs_write_unlock_nested(sb);
4296 write_ordered_buffers(&journal->j_dirty_buffers_lock,
4297 journal, jl, &jl->j_tail_bh_list);
4298 reiserfs_write_lock_nested(sb, depth);
4300 BUG_ON(!list_empty(&jl->j_tail_bh_list));
4301 mutex_unlock(&jl->j_commit_mutex);
4304 * honor the flush wishes from the caller, simple commits can
4305 * be done outside the journal lock, they are done below
4307 * if we don't flush the commit list right now, we put it into
4308 * the work queue so the people waiting on the async progress work
4309 * queue don't wait for this proc to flush journal lists and such.
4312 flush_commit_list(sb, jl, 1);
4313 flush_journal_list(sb, jl, 1);
4314 } else if (!(jl->j_state & LIST_COMMIT_PENDING)) {
4316 * Avoid queueing work when sb is being shut down. Transaction
4317 * will be flushed on journal shutdown.
4319 if (sb->s_flags & MS_ACTIVE)
4320 queue_delayed_work(REISERFS_SB(sb)->commit_wq,
4321 &journal->j_work, HZ / 10);
4325 * if the next transaction has any chance of wrapping, flush
4326 * transactions that might get overwritten. If any journal lists
4327 * are very old flush them as well.
4330 list_for_each_safe(entry, safe, &journal->j_journal_list) {
4331 temp_jl = JOURNAL_LIST_ENTRY(entry);
4332 if (journal->j_start <= temp_jl->j_start) {
4333 if ((journal->j_start + journal->j_trans_max + 1) >=
4335 flush_used_journal_lists(sb, temp_jl);
4337 } else if ((journal->j_start +
4338 journal->j_trans_max + 1) <
4339 SB_ONDISK_JOURNAL_SIZE(sb)) {
4341 * if we don't cross into the next
4342 * transaction and we don't wrap, there is
4343 * no way we can overlap any later transactions
4348 } else if ((journal->j_start +
4349 journal->j_trans_max + 1) >
4350 SB_ONDISK_JOURNAL_SIZE(sb)) {
4351 if (((journal->j_start + journal->j_trans_max + 1) %
4352 SB_ONDISK_JOURNAL_SIZE(sb)) >=
4354 flush_used_journal_lists(sb, temp_jl);
4358 * we don't overlap anything from out start
4359 * to the end of the log, and our wrapped
4360 * portion doesn't overlap anything at
4361 * the start of the log. We can break
4368 journal->j_current_jl->j_list_bitmap =
4369 get_list_bitmap(sb, journal->j_current_jl);
4371 if (!(journal->j_current_jl->j_list_bitmap)) {
4372 reiserfs_panic(sb, "journal-1996",
4373 "could not get a list bitmap");
4376 atomic_set(&journal->j_jlock, 0);
4378 /* wake up any body waiting to join. */
4379 clear_bit(J_WRITERS_QUEUED, &journal->j_state);
4380 wake_up(&journal->j_join_wait);
4382 if (!flush && wait_on_commit &&
4383 journal_list_still_alive(sb, commit_trans_id)) {
4384 flush_commit_list(sb, jl, 1);
4387 reiserfs_check_lock_depth(sb, "journal end2");
4389 memset(th, 0, sizeof(*th));
4391 * Re-set th->t_super, so we can properly keep track of how many
4392 * persistent transactions there are. We need to do this so if this
4393 * call is part of a failed restart_transaction, we can free it later
4397 return journal->j_errno;
4400 /* Send the file system read only and refuse new transactions */
4401 void reiserfs_abort_journal(struct super_block *sb, int errno)
4403 struct reiserfs_journal *journal = SB_JOURNAL(sb);
4404 if (test_bit(J_ABORTED, &journal->j_state))
4407 if (!journal->j_errno)
4408 journal->j_errno = errno;
4410 sb->s_flags |= MS_RDONLY;
4411 set_bit(J_ABORTED, &journal->j_state);
4413 #ifdef CONFIG_REISERFS_CHECK