GNU Linux-libre 5.10.153-gnu1
[releases.git] / fs / jbd2 / commit.c
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * linux/fs/jbd2/commit.c
4  *
5  * Written by Stephen C. Tweedie <sct@redhat.com>, 1998
6  *
7  * Copyright 1998 Red Hat corp --- All Rights Reserved
8  *
9  * Journal commit routines for the generic filesystem journaling code;
10  * part of the ext2fs journaling system.
11  */
12
13 #include <linux/time.h>
14 #include <linux/fs.h>
15 #include <linux/jbd2.h>
16 #include <linux/errno.h>
17 #include <linux/slab.h>
18 #include <linux/mm.h>
19 #include <linux/pagemap.h>
20 #include <linux/jiffies.h>
21 #include <linux/crc32.h>
22 #include <linux/writeback.h>
23 #include <linux/backing-dev.h>
24 #include <linux/bio.h>
25 #include <linux/blkdev.h>
26 #include <linux/bitops.h>
27 #include <trace/events/jbd2.h>
28
29 /*
30  * IO end handler for temporary buffer_heads handling writes to the journal.
31  */
32 static void journal_end_buffer_io_sync(struct buffer_head *bh, int uptodate)
33 {
34         struct buffer_head *orig_bh = bh->b_private;
35
36         BUFFER_TRACE(bh, "");
37         if (uptodate)
38                 set_buffer_uptodate(bh);
39         else
40                 clear_buffer_uptodate(bh);
41         if (orig_bh) {
42                 clear_bit_unlock(BH_Shadow, &orig_bh->b_state);
43                 smp_mb__after_atomic();
44                 wake_up_bit(&orig_bh->b_state, BH_Shadow);
45         }
46         unlock_buffer(bh);
47 }
48
49 /*
50  * When an ext4 file is truncated, it is possible that some pages are not
51  * successfully freed, because they are attached to a committing transaction.
52  * After the transaction commits, these pages are left on the LRU, with no
53  * ->mapping, and with attached buffers.  These pages are trivially reclaimable
54  * by the VM, but their apparent absence upsets the VM accounting, and it makes
55  * the numbers in /proc/meminfo look odd.
56  *
57  * So here, we have a buffer which has just come off the forget list.  Look to
58  * see if we can strip all buffers from the backing page.
59  *
60  * Called under lock_journal(), and possibly under journal_datalist_lock.  The
61  * caller provided us with a ref against the buffer, and we drop that here.
62  */
63 static void release_buffer_page(struct buffer_head *bh)
64 {
65         struct page *page;
66
67         if (buffer_dirty(bh))
68                 goto nope;
69         if (atomic_read(&bh->b_count) != 1)
70                 goto nope;
71         page = bh->b_page;
72         if (!page)
73                 goto nope;
74         if (page->mapping)
75                 goto nope;
76
77         /* OK, it's a truncated page */
78         if (!trylock_page(page))
79                 goto nope;
80
81         get_page(page);
82         __brelse(bh);
83         try_to_free_buffers(page);
84         unlock_page(page);
85         put_page(page);
86         return;
87
88 nope:
89         __brelse(bh);
90 }
91
92 static void jbd2_commit_block_csum_set(journal_t *j, struct buffer_head *bh)
93 {
94         struct commit_header *h;
95         __u32 csum;
96
97         if (!jbd2_journal_has_csum_v2or3(j))
98                 return;
99
100         h = (struct commit_header *)(bh->b_data);
101         h->h_chksum_type = 0;
102         h->h_chksum_size = 0;
103         h->h_chksum[0] = 0;
104         csum = jbd2_chksum(j, j->j_csum_seed, bh->b_data, j->j_blocksize);
105         h->h_chksum[0] = cpu_to_be32(csum);
106 }
107
108 /*
109  * Done it all: now submit the commit record.  We should have
110  * cleaned up our previous buffers by now, so if we are in abort
111  * mode we can now just skip the rest of the journal write
112  * entirely.
113  *
114  * Returns 1 if the journal needs to be aborted or 0 on success
115  */
116 static int journal_submit_commit_record(journal_t *journal,
117                                         transaction_t *commit_transaction,
118                                         struct buffer_head **cbh,
119                                         __u32 crc32_sum)
120 {
121         struct commit_header *tmp;
122         struct buffer_head *bh;
123         int ret;
124         struct timespec64 now;
125
126         *cbh = NULL;
127
128         if (is_journal_aborted(journal))
129                 return 0;
130
131         bh = jbd2_journal_get_descriptor_buffer(commit_transaction,
132                                                 JBD2_COMMIT_BLOCK);
133         if (!bh)
134                 return 1;
135
136         tmp = (struct commit_header *)bh->b_data;
137         ktime_get_coarse_real_ts64(&now);
138         tmp->h_commit_sec = cpu_to_be64(now.tv_sec);
139         tmp->h_commit_nsec = cpu_to_be32(now.tv_nsec);
140
141         if (jbd2_has_feature_checksum(journal)) {
142                 tmp->h_chksum_type      = JBD2_CRC32_CHKSUM;
143                 tmp->h_chksum_size      = JBD2_CRC32_CHKSUM_SIZE;
144                 tmp->h_chksum[0]        = cpu_to_be32(crc32_sum);
145         }
146         jbd2_commit_block_csum_set(journal, bh);
147
148         BUFFER_TRACE(bh, "submit commit block");
149         lock_buffer(bh);
150         clear_buffer_dirty(bh);
151         set_buffer_uptodate(bh);
152         bh->b_end_io = journal_end_buffer_io_sync;
153
154         if (journal->j_flags & JBD2_BARRIER &&
155             !jbd2_has_feature_async_commit(journal))
156                 ret = submit_bh(REQ_OP_WRITE,
157                         REQ_SYNC | REQ_PREFLUSH | REQ_FUA, bh);
158         else
159                 ret = submit_bh(REQ_OP_WRITE, REQ_SYNC, bh);
160
161         *cbh = bh;
162         return ret;
163 }
164
165 /*
166  * This function along with journal_submit_commit_record
167  * allows to write the commit record asynchronously.
168  */
169 static int journal_wait_on_commit_record(journal_t *journal,
170                                          struct buffer_head *bh)
171 {
172         int ret = 0;
173
174         clear_buffer_dirty(bh);
175         wait_on_buffer(bh);
176
177         if (unlikely(!buffer_uptodate(bh)))
178                 ret = -EIO;
179         put_bh(bh);            /* One for getblk() */
180
181         return ret;
182 }
183
184 /*
185  * write the filemap data using writepage() address_space_operations.
186  * We don't do block allocation here even for delalloc. We don't
187  * use writepages() because with delayed allocation we may be doing
188  * block allocation in writepages().
189  */
190 int jbd2_journal_submit_inode_data_buffers(struct jbd2_inode *jinode)
191 {
192         struct address_space *mapping = jinode->i_vfs_inode->i_mapping;
193         struct writeback_control wbc = {
194                 .sync_mode =  WB_SYNC_ALL,
195                 .nr_to_write = mapping->nrpages * 2,
196                 .range_start = jinode->i_dirty_start,
197                 .range_end = jinode->i_dirty_end,
198         };
199
200         /*
201          * submit the inode data buffers. We use writepage
202          * instead of writepages. Because writepages can do
203          * block allocation with delalloc. We need to write
204          * only allocated blocks here.
205          */
206         return generic_writepages(mapping, &wbc);
207 }
208
209 /* Send all the data buffers related to an inode */
210 int jbd2_submit_inode_data(struct jbd2_inode *jinode)
211 {
212
213         if (!jinode || !(jinode->i_flags & JI_WRITE_DATA))
214                 return 0;
215
216         trace_jbd2_submit_inode_data(jinode->i_vfs_inode);
217         return jbd2_journal_submit_inode_data_buffers(jinode);
218
219 }
220 EXPORT_SYMBOL(jbd2_submit_inode_data);
221
222 int jbd2_wait_inode_data(journal_t *journal, struct jbd2_inode *jinode)
223 {
224         if (!jinode || !(jinode->i_flags & JI_WAIT_DATA) ||
225                 !jinode->i_vfs_inode || !jinode->i_vfs_inode->i_mapping)
226                 return 0;
227         return filemap_fdatawait_range_keep_errors(
228                 jinode->i_vfs_inode->i_mapping, jinode->i_dirty_start,
229                 jinode->i_dirty_end);
230 }
231 EXPORT_SYMBOL(jbd2_wait_inode_data);
232
233 /*
234  * Submit all the data buffers of inode associated with the transaction to
235  * disk.
236  *
237  * We are in a committing transaction. Therefore no new inode can be added to
238  * our inode list. We use JI_COMMIT_RUNNING flag to protect inode we currently
239  * operate on from being released while we write out pages.
240  */
241 static int journal_submit_data_buffers(journal_t *journal,
242                 transaction_t *commit_transaction)
243 {
244         struct jbd2_inode *jinode;
245         int err, ret = 0;
246
247         spin_lock(&journal->j_list_lock);
248         list_for_each_entry(jinode, &commit_transaction->t_inode_list, i_list) {
249                 if (!(jinode->i_flags & JI_WRITE_DATA))
250                         continue;
251                 jinode->i_flags |= JI_COMMIT_RUNNING;
252                 spin_unlock(&journal->j_list_lock);
253                 /* submit the inode data buffers. */
254                 trace_jbd2_submit_inode_data(jinode->i_vfs_inode);
255                 if (journal->j_submit_inode_data_buffers) {
256                         err = journal->j_submit_inode_data_buffers(jinode);
257                         if (!ret)
258                                 ret = err;
259                 }
260                 spin_lock(&journal->j_list_lock);
261                 J_ASSERT(jinode->i_transaction == commit_transaction);
262                 jinode->i_flags &= ~JI_COMMIT_RUNNING;
263                 smp_mb();
264                 wake_up_bit(&jinode->i_flags, __JI_COMMIT_RUNNING);
265         }
266         spin_unlock(&journal->j_list_lock);
267         return ret;
268 }
269
270 int jbd2_journal_finish_inode_data_buffers(struct jbd2_inode *jinode)
271 {
272         struct address_space *mapping = jinode->i_vfs_inode->i_mapping;
273
274         return filemap_fdatawait_range_keep_errors(mapping,
275                                                    jinode->i_dirty_start,
276                                                    jinode->i_dirty_end);
277 }
278
279 /*
280  * Wait for data submitted for writeout, refile inodes to proper
281  * transaction if needed.
282  *
283  */
284 static int journal_finish_inode_data_buffers(journal_t *journal,
285                 transaction_t *commit_transaction)
286 {
287         struct jbd2_inode *jinode, *next_i;
288         int err, ret = 0;
289
290         /* For locking, see the comment in journal_submit_data_buffers() */
291         spin_lock(&journal->j_list_lock);
292         list_for_each_entry(jinode, &commit_transaction->t_inode_list, i_list) {
293                 if (!(jinode->i_flags & JI_WAIT_DATA))
294                         continue;
295                 jinode->i_flags |= JI_COMMIT_RUNNING;
296                 spin_unlock(&journal->j_list_lock);
297                 /* wait for the inode data buffers writeout. */
298                 if (journal->j_finish_inode_data_buffers) {
299                         err = journal->j_finish_inode_data_buffers(jinode);
300                         if (!ret)
301                                 ret = err;
302                 }
303                 spin_lock(&journal->j_list_lock);
304                 jinode->i_flags &= ~JI_COMMIT_RUNNING;
305                 smp_mb();
306                 wake_up_bit(&jinode->i_flags, __JI_COMMIT_RUNNING);
307         }
308
309         /* Now refile inode to proper lists */
310         list_for_each_entry_safe(jinode, next_i,
311                                  &commit_transaction->t_inode_list, i_list) {
312                 list_del(&jinode->i_list);
313                 if (jinode->i_next_transaction) {
314                         jinode->i_transaction = jinode->i_next_transaction;
315                         jinode->i_next_transaction = NULL;
316                         list_add(&jinode->i_list,
317                                 &jinode->i_transaction->t_inode_list);
318                 } else {
319                         jinode->i_transaction = NULL;
320                         jinode->i_dirty_start = 0;
321                         jinode->i_dirty_end = 0;
322                 }
323         }
324         spin_unlock(&journal->j_list_lock);
325
326         return ret;
327 }
328
329 static __u32 jbd2_checksum_data(__u32 crc32_sum, struct buffer_head *bh)
330 {
331         struct page *page = bh->b_page;
332         char *addr;
333         __u32 checksum;
334
335         addr = kmap_atomic(page);
336         checksum = crc32_be(crc32_sum,
337                 (void *)(addr + offset_in_page(bh->b_data)), bh->b_size);
338         kunmap_atomic(addr);
339
340         return checksum;
341 }
342
343 static void write_tag_block(journal_t *j, journal_block_tag_t *tag,
344                                    unsigned long long block)
345 {
346         tag->t_blocknr = cpu_to_be32(block & (u32)~0);
347         if (jbd2_has_feature_64bit(j))
348                 tag->t_blocknr_high = cpu_to_be32((block >> 31) >> 1);
349 }
350
351 static void jbd2_block_tag_csum_set(journal_t *j, journal_block_tag_t *tag,
352                                     struct buffer_head *bh, __u32 sequence)
353 {
354         journal_block_tag3_t *tag3 = (journal_block_tag3_t *)tag;
355         struct page *page = bh->b_page;
356         __u8 *addr;
357         __u32 csum32;
358         __be32 seq;
359
360         if (!jbd2_journal_has_csum_v2or3(j))
361                 return;
362
363         seq = cpu_to_be32(sequence);
364         addr = kmap_atomic(page);
365         csum32 = jbd2_chksum(j, j->j_csum_seed, (__u8 *)&seq, sizeof(seq));
366         csum32 = jbd2_chksum(j, csum32, addr + offset_in_page(bh->b_data),
367                              bh->b_size);
368         kunmap_atomic(addr);
369
370         if (jbd2_has_feature_csum3(j))
371                 tag3->t_checksum = cpu_to_be32(csum32);
372         else
373                 tag->t_checksum = cpu_to_be16(csum32);
374 }
375 /*
376  * jbd2_journal_commit_transaction
377  *
378  * The primary function for committing a transaction to the log.  This
379  * function is called by the journal thread to begin a complete commit.
380  */
381 void jbd2_journal_commit_transaction(journal_t *journal)
382 {
383         struct transaction_stats_s stats;
384         transaction_t *commit_transaction;
385         struct journal_head *jh;
386         struct buffer_head *descriptor;
387         struct buffer_head **wbuf = journal->j_wbuf;
388         int bufs;
389         int flags;
390         int err;
391         unsigned long long blocknr;
392         ktime_t start_time;
393         u64 commit_time;
394         char *tagp = NULL;
395         journal_block_tag_t *tag = NULL;
396         int space_left = 0;
397         int first_tag = 0;
398         int tag_flag;
399         int i;
400         int tag_bytes = journal_tag_bytes(journal);
401         struct buffer_head *cbh = NULL; /* For transactional checksums */
402         __u32 crc32_sum = ~0;
403         struct blk_plug plug;
404         /* Tail of the journal */
405         unsigned long first_block;
406         tid_t first_tid;
407         int update_tail;
408         int csum_size = 0;
409         LIST_HEAD(io_bufs);
410         LIST_HEAD(log_bufs);
411
412         if (jbd2_journal_has_csum_v2or3(journal))
413                 csum_size = sizeof(struct jbd2_journal_block_tail);
414
415         /*
416          * First job: lock down the current transaction and wait for
417          * all outstanding updates to complete.
418          */
419
420         /* Do we need to erase the effects of a prior jbd2_journal_flush? */
421         if (journal->j_flags & JBD2_FLUSHED) {
422                 jbd_debug(3, "super block updated\n");
423                 mutex_lock_io(&journal->j_checkpoint_mutex);
424                 /*
425                  * We hold j_checkpoint_mutex so tail cannot change under us.
426                  * We don't need any special data guarantees for writing sb
427                  * since journal is empty and it is ok for write to be
428                  * flushed only with transaction commit.
429                  */
430                 jbd2_journal_update_sb_log_tail(journal,
431                                                 journal->j_tail_sequence,
432                                                 journal->j_tail,
433                                                 REQ_SYNC);
434                 mutex_unlock(&journal->j_checkpoint_mutex);
435         } else {
436                 jbd_debug(3, "superblock not updated\n");
437         }
438
439         J_ASSERT(journal->j_running_transaction != NULL);
440         J_ASSERT(journal->j_committing_transaction == NULL);
441
442         write_lock(&journal->j_state_lock);
443         journal->j_flags |= JBD2_FULL_COMMIT_ONGOING;
444         while (journal->j_flags & JBD2_FAST_COMMIT_ONGOING) {
445                 DEFINE_WAIT(wait);
446
447                 prepare_to_wait(&journal->j_fc_wait, &wait,
448                                 TASK_UNINTERRUPTIBLE);
449                 write_unlock(&journal->j_state_lock);
450                 schedule();
451                 write_lock(&journal->j_state_lock);
452                 finish_wait(&journal->j_fc_wait, &wait);
453                 /*
454                  * TODO: by blocking fast commits here, we are increasing
455                  * fsync() latency slightly. Strictly speaking, we don't need
456                  * to block fast commits until the transaction enters T_FLUSH
457                  * state. So an optimization is possible where we block new fast
458                  * commits here and wait for existing ones to complete
459                  * just before we enter T_FLUSH. That way, the existing fast
460                  * commits and this full commit can proceed parallely.
461                  */
462         }
463         write_unlock(&journal->j_state_lock);
464
465         commit_transaction = journal->j_running_transaction;
466
467         trace_jbd2_start_commit(journal, commit_transaction);
468         jbd_debug(1, "JBD2: starting commit of transaction %d\n",
469                         commit_transaction->t_tid);
470
471         write_lock(&journal->j_state_lock);
472         journal->j_fc_off = 0;
473         J_ASSERT(commit_transaction->t_state == T_RUNNING);
474         commit_transaction->t_state = T_LOCKED;
475
476         trace_jbd2_commit_locking(journal, commit_transaction);
477         stats.run.rs_wait = commit_transaction->t_max_wait;
478         stats.run.rs_request_delay = 0;
479         stats.run.rs_locked = jiffies;
480         if (commit_transaction->t_requested)
481                 stats.run.rs_request_delay =
482                         jbd2_time_diff(commit_transaction->t_requested,
483                                        stats.run.rs_locked);
484         stats.run.rs_running = jbd2_time_diff(commit_transaction->t_start,
485                                               stats.run.rs_locked);
486
487         spin_lock(&commit_transaction->t_handle_lock);
488         while (atomic_read(&commit_transaction->t_updates)) {
489                 DEFINE_WAIT(wait);
490
491                 prepare_to_wait(&journal->j_wait_updates, &wait,
492                                         TASK_UNINTERRUPTIBLE);
493                 if (atomic_read(&commit_transaction->t_updates)) {
494                         spin_unlock(&commit_transaction->t_handle_lock);
495                         write_unlock(&journal->j_state_lock);
496                         schedule();
497                         write_lock(&journal->j_state_lock);
498                         spin_lock(&commit_transaction->t_handle_lock);
499                 }
500                 finish_wait(&journal->j_wait_updates, &wait);
501         }
502         spin_unlock(&commit_transaction->t_handle_lock);
503         commit_transaction->t_state = T_SWITCH;
504
505         J_ASSERT (atomic_read(&commit_transaction->t_outstanding_credits) <=
506                         journal->j_max_transaction_buffers);
507
508         /*
509          * First thing we are allowed to do is to discard any remaining
510          * BJ_Reserved buffers.  Note, it is _not_ permissible to assume
511          * that there are no such buffers: if a large filesystem
512          * operation like a truncate needs to split itself over multiple
513          * transactions, then it may try to do a jbd2_journal_restart() while
514          * there are still BJ_Reserved buffers outstanding.  These must
515          * be released cleanly from the current transaction.
516          *
517          * In this case, the filesystem must still reserve write access
518          * again before modifying the buffer in the new transaction, but
519          * we do not require it to remember exactly which old buffers it
520          * has reserved.  This is consistent with the existing behaviour
521          * that multiple jbd2_journal_get_write_access() calls to the same
522          * buffer are perfectly permissible.
523          * We use journal->j_state_lock here to serialize processing of
524          * t_reserved_list with eviction of buffers from journal_unmap_buffer().
525          */
526         while (commit_transaction->t_reserved_list) {
527                 jh = commit_transaction->t_reserved_list;
528                 JBUFFER_TRACE(jh, "reserved, unused: refile");
529                 /*
530                  * A jbd2_journal_get_undo_access()+jbd2_journal_release_buffer() may
531                  * leave undo-committed data.
532                  */
533                 if (jh->b_committed_data) {
534                         struct buffer_head *bh = jh2bh(jh);
535
536                         spin_lock(&jh->b_state_lock);
537                         jbd2_free(jh->b_committed_data, bh->b_size);
538                         jh->b_committed_data = NULL;
539                         spin_unlock(&jh->b_state_lock);
540                 }
541                 jbd2_journal_refile_buffer(journal, jh);
542         }
543
544         write_unlock(&journal->j_state_lock);
545         /*
546          * Now try to drop any written-back buffers from the journal's
547          * checkpoint lists.  We do this *before* commit because it potentially
548          * frees some memory
549          */
550         spin_lock(&journal->j_list_lock);
551         __jbd2_journal_clean_checkpoint_list(journal, false);
552         spin_unlock(&journal->j_list_lock);
553
554         jbd_debug(3, "JBD2: commit phase 1\n");
555
556         /*
557          * Clear revoked flag to reflect there is no revoked buffers
558          * in the next transaction which is going to be started.
559          */
560         jbd2_clear_buffer_revoked_flags(journal);
561
562         /*
563          * Switch to a new revoke table.
564          */
565         jbd2_journal_switch_revoke_table(journal);
566
567         write_lock(&journal->j_state_lock);
568         /*
569          * Reserved credits cannot be claimed anymore, free them
570          */
571         atomic_sub(atomic_read(&journal->j_reserved_credits),
572                    &commit_transaction->t_outstanding_credits);
573
574         trace_jbd2_commit_flushing(journal, commit_transaction);
575         stats.run.rs_flushing = jiffies;
576         stats.run.rs_locked = jbd2_time_diff(stats.run.rs_locked,
577                                              stats.run.rs_flushing);
578
579         commit_transaction->t_state = T_FLUSH;
580         journal->j_committing_transaction = commit_transaction;
581         journal->j_running_transaction = NULL;
582         start_time = ktime_get();
583         commit_transaction->t_log_start = journal->j_head;
584         wake_up_all(&journal->j_wait_transaction_locked);
585         write_unlock(&journal->j_state_lock);
586
587         jbd_debug(3, "JBD2: commit phase 2a\n");
588
589         /*
590          * Now start flushing things to disk, in the order they appear
591          * on the transaction lists.  Data blocks go first.
592          */
593         err = journal_submit_data_buffers(journal, commit_transaction);
594         if (err)
595                 jbd2_journal_abort(journal, err);
596
597         blk_start_plug(&plug);
598         jbd2_journal_write_revoke_records(commit_transaction, &log_bufs);
599
600         jbd_debug(3, "JBD2: commit phase 2b\n");
601
602         /*
603          * Way to go: we have now written out all of the data for a
604          * transaction!  Now comes the tricky part: we need to write out
605          * metadata.  Loop over the transaction's entire buffer list:
606          */
607         write_lock(&journal->j_state_lock);
608         commit_transaction->t_state = T_COMMIT;
609         write_unlock(&journal->j_state_lock);
610
611         trace_jbd2_commit_logging(journal, commit_transaction);
612         stats.run.rs_logging = jiffies;
613         stats.run.rs_flushing = jbd2_time_diff(stats.run.rs_flushing,
614                                                stats.run.rs_logging);
615         stats.run.rs_blocks = commit_transaction->t_nr_buffers;
616         stats.run.rs_blocks_logged = 0;
617
618         J_ASSERT(commit_transaction->t_nr_buffers <=
619                  atomic_read(&commit_transaction->t_outstanding_credits));
620
621         err = 0;
622         bufs = 0;
623         descriptor = NULL;
624         while (commit_transaction->t_buffers) {
625
626                 /* Find the next buffer to be journaled... */
627
628                 jh = commit_transaction->t_buffers;
629
630                 /* If we're in abort mode, we just un-journal the buffer and
631                    release it. */
632
633                 if (is_journal_aborted(journal)) {
634                         clear_buffer_jbddirty(jh2bh(jh));
635                         JBUFFER_TRACE(jh, "journal is aborting: refile");
636                         jbd2_buffer_abort_trigger(jh,
637                                                   jh->b_frozen_data ?
638                                                   jh->b_frozen_triggers :
639                                                   jh->b_triggers);
640                         jbd2_journal_refile_buffer(journal, jh);
641                         /* If that was the last one, we need to clean up
642                          * any descriptor buffers which may have been
643                          * already allocated, even if we are now
644                          * aborting. */
645                         if (!commit_transaction->t_buffers)
646                                 goto start_journal_io;
647                         continue;
648                 }
649
650                 /* Make sure we have a descriptor block in which to
651                    record the metadata buffer. */
652
653                 if (!descriptor) {
654                         J_ASSERT (bufs == 0);
655
656                         jbd_debug(4, "JBD2: get descriptor\n");
657
658                         descriptor = jbd2_journal_get_descriptor_buffer(
659                                                         commit_transaction,
660                                                         JBD2_DESCRIPTOR_BLOCK);
661                         if (!descriptor) {
662                                 jbd2_journal_abort(journal, -EIO);
663                                 continue;
664                         }
665
666                         jbd_debug(4, "JBD2: got buffer %llu (%p)\n",
667                                 (unsigned long long)descriptor->b_blocknr,
668                                 descriptor->b_data);
669                         tagp = &descriptor->b_data[sizeof(journal_header_t)];
670                         space_left = descriptor->b_size -
671                                                 sizeof(journal_header_t);
672                         first_tag = 1;
673                         set_buffer_jwrite(descriptor);
674                         set_buffer_dirty(descriptor);
675                         wbuf[bufs++] = descriptor;
676
677                         /* Record it so that we can wait for IO
678                            completion later */
679                         BUFFER_TRACE(descriptor, "ph3: file as descriptor");
680                         jbd2_file_log_bh(&log_bufs, descriptor);
681                 }
682
683                 /* Where is the buffer to be written? */
684
685                 err = jbd2_journal_next_log_block(journal, &blocknr);
686                 /* If the block mapping failed, just abandon the buffer
687                    and repeat this loop: we'll fall into the
688                    refile-on-abort condition above. */
689                 if (err) {
690                         jbd2_journal_abort(journal, err);
691                         continue;
692                 }
693
694                 /*
695                  * start_this_handle() uses t_outstanding_credits to determine
696                  * the free space in the log.
697                  */
698                 atomic_dec(&commit_transaction->t_outstanding_credits);
699
700                 /* Bump b_count to prevent truncate from stumbling over
701                    the shadowed buffer!  @@@ This can go if we ever get
702                    rid of the shadow pairing of buffers. */
703                 atomic_inc(&jh2bh(jh)->b_count);
704
705                 /*
706                  * Make a temporary IO buffer with which to write it out
707                  * (this will requeue the metadata buffer to BJ_Shadow).
708                  */
709                 set_bit(BH_JWrite, &jh2bh(jh)->b_state);
710                 JBUFFER_TRACE(jh, "ph3: write metadata");
711                 flags = jbd2_journal_write_metadata_buffer(commit_transaction,
712                                                 jh, &wbuf[bufs], blocknr);
713                 if (flags < 0) {
714                         jbd2_journal_abort(journal, flags);
715                         continue;
716                 }
717                 jbd2_file_log_bh(&io_bufs, wbuf[bufs]);
718
719                 /* Record the new block's tag in the current descriptor
720                    buffer */
721
722                 tag_flag = 0;
723                 if (flags & 1)
724                         tag_flag |= JBD2_FLAG_ESCAPE;
725                 if (!first_tag)
726                         tag_flag |= JBD2_FLAG_SAME_UUID;
727
728                 tag = (journal_block_tag_t *) tagp;
729                 write_tag_block(journal, tag, jh2bh(jh)->b_blocknr);
730                 tag->t_flags = cpu_to_be16(tag_flag);
731                 jbd2_block_tag_csum_set(journal, tag, wbuf[bufs],
732                                         commit_transaction->t_tid);
733                 tagp += tag_bytes;
734                 space_left -= tag_bytes;
735                 bufs++;
736
737                 if (first_tag) {
738                         memcpy (tagp, journal->j_uuid, 16);
739                         tagp += 16;
740                         space_left -= 16;
741                         first_tag = 0;
742                 }
743
744                 /* If there's no more to do, or if the descriptor is full,
745                    let the IO rip! */
746
747                 if (bufs == journal->j_wbufsize ||
748                     commit_transaction->t_buffers == NULL ||
749                     space_left < tag_bytes + 16 + csum_size) {
750
751                         jbd_debug(4, "JBD2: Submit %d IOs\n", bufs);
752
753                         /* Write an end-of-descriptor marker before
754                            submitting the IOs.  "tag" still points to
755                            the last tag we set up. */
756
757                         tag->t_flags |= cpu_to_be16(JBD2_FLAG_LAST_TAG);
758 start_journal_io:
759                         if (descriptor)
760                                 jbd2_descriptor_block_csum_set(journal,
761                                                         descriptor);
762
763                         for (i = 0; i < bufs; i++) {
764                                 struct buffer_head *bh = wbuf[i];
765                                 /*
766                                  * Compute checksum.
767                                  */
768                                 if (jbd2_has_feature_checksum(journal)) {
769                                         crc32_sum =
770                                             jbd2_checksum_data(crc32_sum, bh);
771                                 }
772
773                                 lock_buffer(bh);
774                                 clear_buffer_dirty(bh);
775                                 set_buffer_uptodate(bh);
776                                 bh->b_end_io = journal_end_buffer_io_sync;
777                                 submit_bh(REQ_OP_WRITE, REQ_SYNC, bh);
778                         }
779                         cond_resched();
780
781                         /* Force a new descriptor to be generated next
782                            time round the loop. */
783                         descriptor = NULL;
784                         bufs = 0;
785                 }
786         }
787
788         err = journal_finish_inode_data_buffers(journal, commit_transaction);
789         if (err) {
790                 printk(KERN_WARNING
791                         "JBD2: Detected IO errors while flushing file data "
792                        "on %s\n", journal->j_devname);
793                 if (journal->j_flags & JBD2_ABORT_ON_SYNCDATA_ERR)
794                         jbd2_journal_abort(journal, err);
795                 err = 0;
796         }
797
798         /*
799          * Get current oldest transaction in the log before we issue flush
800          * to the filesystem device. After the flush we can be sure that
801          * blocks of all older transactions are checkpointed to persistent
802          * storage and we will be safe to update journal start in the
803          * superblock with the numbers we get here.
804          */
805         update_tail =
806                 jbd2_journal_get_log_tail(journal, &first_tid, &first_block);
807
808         write_lock(&journal->j_state_lock);
809         if (update_tail) {
810                 long freed = first_block - journal->j_tail;
811
812                 if (first_block < journal->j_tail)
813                         freed += journal->j_last - journal->j_first;
814                 /* Update tail only if we free significant amount of space */
815                 if (freed < jbd2_journal_get_max_txn_bufs(journal))
816                         update_tail = 0;
817         }
818         J_ASSERT(commit_transaction->t_state == T_COMMIT);
819         commit_transaction->t_state = T_COMMIT_DFLUSH;
820         write_unlock(&journal->j_state_lock);
821
822         /* 
823          * If the journal is not located on the file system device,
824          * then we must flush the file system device before we issue
825          * the commit record
826          */
827         if (commit_transaction->t_need_data_flush &&
828             (journal->j_fs_dev != journal->j_dev) &&
829             (journal->j_flags & JBD2_BARRIER))
830                 blkdev_issue_flush(journal->j_fs_dev, GFP_NOFS);
831
832         /* Done it all: now write the commit record asynchronously. */
833         if (jbd2_has_feature_async_commit(journal)) {
834                 err = journal_submit_commit_record(journal, commit_transaction,
835                                                  &cbh, crc32_sum);
836                 if (err)
837                         jbd2_journal_abort(journal, err);
838         }
839
840         blk_finish_plug(&plug);
841
842         /* Lo and behold: we have just managed to send a transaction to
843            the log.  Before we can commit it, wait for the IO so far to
844            complete.  Control buffers being written are on the
845            transaction's t_log_list queue, and metadata buffers are on
846            the io_bufs list.
847
848            Wait for the buffers in reverse order.  That way we are
849            less likely to be woken up until all IOs have completed, and
850            so we incur less scheduling load.
851         */
852
853         jbd_debug(3, "JBD2: commit phase 3\n");
854
855         while (!list_empty(&io_bufs)) {
856                 struct buffer_head *bh = list_entry(io_bufs.prev,
857                                                     struct buffer_head,
858                                                     b_assoc_buffers);
859
860                 wait_on_buffer(bh);
861                 cond_resched();
862
863                 if (unlikely(!buffer_uptodate(bh)))
864                         err = -EIO;
865                 jbd2_unfile_log_bh(bh);
866                 stats.run.rs_blocks_logged++;
867
868                 /*
869                  * The list contains temporary buffer heads created by
870                  * jbd2_journal_write_metadata_buffer().
871                  */
872                 BUFFER_TRACE(bh, "dumping temporary bh");
873                 __brelse(bh);
874                 J_ASSERT_BH(bh, atomic_read(&bh->b_count) == 0);
875                 free_buffer_head(bh);
876
877                 /* We also have to refile the corresponding shadowed buffer */
878                 jh = commit_transaction->t_shadow_list->b_tprev;
879                 bh = jh2bh(jh);
880                 clear_buffer_jwrite(bh);
881                 J_ASSERT_BH(bh, buffer_jbddirty(bh));
882                 J_ASSERT_BH(bh, !buffer_shadow(bh));
883
884                 /* The metadata is now released for reuse, but we need
885                    to remember it against this transaction so that when
886                    we finally commit, we can do any checkpointing
887                    required. */
888                 JBUFFER_TRACE(jh, "file as BJ_Forget");
889                 jbd2_journal_file_buffer(jh, commit_transaction, BJ_Forget);
890                 JBUFFER_TRACE(jh, "brelse shadowed buffer");
891                 __brelse(bh);
892         }
893
894         J_ASSERT (commit_transaction->t_shadow_list == NULL);
895
896         jbd_debug(3, "JBD2: commit phase 4\n");
897
898         /* Here we wait for the revoke record and descriptor record buffers */
899         while (!list_empty(&log_bufs)) {
900                 struct buffer_head *bh;
901
902                 bh = list_entry(log_bufs.prev, struct buffer_head, b_assoc_buffers);
903                 wait_on_buffer(bh);
904                 cond_resched();
905
906                 if (unlikely(!buffer_uptodate(bh)))
907                         err = -EIO;
908
909                 BUFFER_TRACE(bh, "ph5: control buffer writeout done: unfile");
910                 clear_buffer_jwrite(bh);
911                 jbd2_unfile_log_bh(bh);
912                 stats.run.rs_blocks_logged++;
913                 __brelse(bh);           /* One for getblk */
914                 /* AKPM: bforget here */
915         }
916
917         if (err)
918                 jbd2_journal_abort(journal, err);
919
920         jbd_debug(3, "JBD2: commit phase 5\n");
921         write_lock(&journal->j_state_lock);
922         J_ASSERT(commit_transaction->t_state == T_COMMIT_DFLUSH);
923         commit_transaction->t_state = T_COMMIT_JFLUSH;
924         write_unlock(&journal->j_state_lock);
925
926         if (!jbd2_has_feature_async_commit(journal)) {
927                 err = journal_submit_commit_record(journal, commit_transaction,
928                                                 &cbh, crc32_sum);
929                 if (err)
930                         jbd2_journal_abort(journal, err);
931         }
932         if (cbh)
933                 err = journal_wait_on_commit_record(journal, cbh);
934         stats.run.rs_blocks_logged++;
935         if (jbd2_has_feature_async_commit(journal) &&
936             journal->j_flags & JBD2_BARRIER) {
937                 blkdev_issue_flush(journal->j_dev, GFP_NOFS);
938         }
939
940         if (err)
941                 jbd2_journal_abort(journal, err);
942
943         WARN_ON_ONCE(
944                 atomic_read(&commit_transaction->t_outstanding_credits) < 0);
945
946         /*
947          * Now disk caches for filesystem device are flushed so we are safe to
948          * erase checkpointed transactions from the log by updating journal
949          * superblock.
950          */
951         if (update_tail)
952                 jbd2_update_log_tail(journal, first_tid, first_block);
953
954         /* End of a transaction!  Finally, we can do checkpoint
955            processing: any buffers committed as a result of this
956            transaction can be removed from any checkpoint list it was on
957            before. */
958
959         jbd_debug(3, "JBD2: commit phase 6\n");
960
961         J_ASSERT(list_empty(&commit_transaction->t_inode_list));
962         J_ASSERT(commit_transaction->t_buffers == NULL);
963         J_ASSERT(commit_transaction->t_checkpoint_list == NULL);
964         J_ASSERT(commit_transaction->t_shadow_list == NULL);
965
966 restart_loop:
967         /*
968          * As there are other places (journal_unmap_buffer()) adding buffers
969          * to this list we have to be careful and hold the j_list_lock.
970          */
971         spin_lock(&journal->j_list_lock);
972         while (commit_transaction->t_forget) {
973                 transaction_t *cp_transaction;
974                 struct buffer_head *bh;
975                 int try_to_free = 0;
976                 bool drop_ref;
977
978                 jh = commit_transaction->t_forget;
979                 spin_unlock(&journal->j_list_lock);
980                 bh = jh2bh(jh);
981                 /*
982                  * Get a reference so that bh cannot be freed before we are
983                  * done with it.
984                  */
985                 get_bh(bh);
986                 spin_lock(&jh->b_state_lock);
987                 J_ASSERT_JH(jh, jh->b_transaction == commit_transaction);
988
989                 /*
990                  * If there is undo-protected committed data against
991                  * this buffer, then we can remove it now.  If it is a
992                  * buffer needing such protection, the old frozen_data
993                  * field now points to a committed version of the
994                  * buffer, so rotate that field to the new committed
995                  * data.
996                  *
997                  * Otherwise, we can just throw away the frozen data now.
998                  *
999                  * We also know that the frozen data has already fired
1000                  * its triggers if they exist, so we can clear that too.
1001                  */
1002                 if (jh->b_committed_data) {
1003                         jbd2_free(jh->b_committed_data, bh->b_size);
1004                         jh->b_committed_data = NULL;
1005                         if (jh->b_frozen_data) {
1006                                 jh->b_committed_data = jh->b_frozen_data;
1007                                 jh->b_frozen_data = NULL;
1008                                 jh->b_frozen_triggers = NULL;
1009                         }
1010                 } else if (jh->b_frozen_data) {
1011                         jbd2_free(jh->b_frozen_data, bh->b_size);
1012                         jh->b_frozen_data = NULL;
1013                         jh->b_frozen_triggers = NULL;
1014                 }
1015
1016                 spin_lock(&journal->j_list_lock);
1017                 cp_transaction = jh->b_cp_transaction;
1018                 if (cp_transaction) {
1019                         JBUFFER_TRACE(jh, "remove from old cp transaction");
1020                         cp_transaction->t_chp_stats.cs_dropped++;
1021                         __jbd2_journal_remove_checkpoint(jh);
1022                 }
1023
1024                 /* Only re-checkpoint the buffer_head if it is marked
1025                  * dirty.  If the buffer was added to the BJ_Forget list
1026                  * by jbd2_journal_forget, it may no longer be dirty and
1027                  * there's no point in keeping a checkpoint record for
1028                  * it. */
1029
1030                 /*
1031                  * A buffer which has been freed while still being journaled
1032                  * by a previous transaction, refile the buffer to BJ_Forget of
1033                  * the running transaction. If the just committed transaction
1034                  * contains "add to orphan" operation, we can completely
1035                  * invalidate the buffer now. We are rather through in that
1036                  * since the buffer may be still accessible when blocksize <
1037                  * pagesize and it is attached to the last partial page.
1038                  */
1039                 if (buffer_freed(bh) && !jh->b_next_transaction) {
1040                         struct address_space *mapping;
1041
1042                         clear_buffer_freed(bh);
1043                         clear_buffer_jbddirty(bh);
1044
1045                         /*
1046                          * Block device buffers need to stay mapped all the
1047                          * time, so it is enough to clear buffer_jbddirty and
1048                          * buffer_freed bits. For the file mapping buffers (i.e.
1049                          * journalled data) we need to unmap buffer and clear
1050                          * more bits. We also need to be careful about the check
1051                          * because the data page mapping can get cleared under
1052                          * our hands. Note that if mapping == NULL, we don't
1053                          * need to make buffer unmapped because the page is
1054                          * already detached from the mapping and buffers cannot
1055                          * get reused.
1056                          */
1057                         mapping = READ_ONCE(bh->b_page->mapping);
1058                         if (mapping && !sb_is_blkdev_sb(mapping->host->i_sb)) {
1059                                 clear_buffer_mapped(bh);
1060                                 clear_buffer_new(bh);
1061                                 clear_buffer_req(bh);
1062                                 bh->b_bdev = NULL;
1063                         }
1064                 }
1065
1066                 if (buffer_jbddirty(bh)) {
1067                         JBUFFER_TRACE(jh, "add to new checkpointing trans");
1068                         __jbd2_journal_insert_checkpoint(jh, commit_transaction);
1069                         if (is_journal_aborted(journal))
1070                                 clear_buffer_jbddirty(bh);
1071                 } else {
1072                         J_ASSERT_BH(bh, !buffer_dirty(bh));
1073                         /*
1074                          * The buffer on BJ_Forget list and not jbddirty means
1075                          * it has been freed by this transaction and hence it
1076                          * could not have been reallocated until this
1077                          * transaction has committed. *BUT* it could be
1078                          * reallocated once we have written all the data to
1079                          * disk and before we process the buffer on BJ_Forget
1080                          * list.
1081                          */
1082                         if (!jh->b_next_transaction)
1083                                 try_to_free = 1;
1084                 }
1085                 JBUFFER_TRACE(jh, "refile or unfile buffer");
1086                 drop_ref = __jbd2_journal_refile_buffer(jh);
1087                 spin_unlock(&jh->b_state_lock);
1088                 if (drop_ref)
1089                         jbd2_journal_put_journal_head(jh);
1090                 if (try_to_free)
1091                         release_buffer_page(bh);        /* Drops bh reference */
1092                 else
1093                         __brelse(bh);
1094                 cond_resched_lock(&journal->j_list_lock);
1095         }
1096         spin_unlock(&journal->j_list_lock);
1097         /*
1098          * This is a bit sleazy.  We use j_list_lock to protect transition
1099          * of a transaction into T_FINISHED state and calling
1100          * __jbd2_journal_drop_transaction(). Otherwise we could race with
1101          * other checkpointing code processing the transaction...
1102          */
1103         write_lock(&journal->j_state_lock);
1104         spin_lock(&journal->j_list_lock);
1105         /*
1106          * Now recheck if some buffers did not get attached to the transaction
1107          * while the lock was dropped...
1108          */
1109         if (commit_transaction->t_forget) {
1110                 spin_unlock(&journal->j_list_lock);
1111                 write_unlock(&journal->j_state_lock);
1112                 goto restart_loop;
1113         }
1114
1115         /* Add the transaction to the checkpoint list
1116          * __journal_remove_checkpoint() can not destroy transaction
1117          * under us because it is not marked as T_FINISHED yet */
1118         if (journal->j_checkpoint_transactions == NULL) {
1119                 journal->j_checkpoint_transactions = commit_transaction;
1120                 commit_transaction->t_cpnext = commit_transaction;
1121                 commit_transaction->t_cpprev = commit_transaction;
1122         } else {
1123                 commit_transaction->t_cpnext =
1124                         journal->j_checkpoint_transactions;
1125                 commit_transaction->t_cpprev =
1126                         commit_transaction->t_cpnext->t_cpprev;
1127                 commit_transaction->t_cpnext->t_cpprev =
1128                         commit_transaction;
1129                 commit_transaction->t_cpprev->t_cpnext =
1130                                 commit_transaction;
1131         }
1132         spin_unlock(&journal->j_list_lock);
1133
1134         /* Done with this transaction! */
1135
1136         jbd_debug(3, "JBD2: commit phase 7\n");
1137
1138         J_ASSERT(commit_transaction->t_state == T_COMMIT_JFLUSH);
1139
1140         commit_transaction->t_start = jiffies;
1141         stats.run.rs_logging = jbd2_time_diff(stats.run.rs_logging,
1142                                               commit_transaction->t_start);
1143
1144         /*
1145          * File the transaction statistics
1146          */
1147         stats.ts_tid = commit_transaction->t_tid;
1148         stats.run.rs_handle_count =
1149                 atomic_read(&commit_transaction->t_handle_count);
1150         trace_jbd2_run_stats(journal->j_fs_dev->bd_dev,
1151                              commit_transaction->t_tid, &stats.run);
1152         stats.ts_requested = (commit_transaction->t_requested) ? 1 : 0;
1153
1154         commit_transaction->t_state = T_COMMIT_CALLBACK;
1155         J_ASSERT(commit_transaction == journal->j_committing_transaction);
1156         journal->j_commit_sequence = commit_transaction->t_tid;
1157         journal->j_committing_transaction = NULL;
1158         commit_time = ktime_to_ns(ktime_sub(ktime_get(), start_time));
1159
1160         /*
1161          * weight the commit time higher than the average time so we don't
1162          * react too strongly to vast changes in the commit time
1163          */
1164         if (likely(journal->j_average_commit_time))
1165                 journal->j_average_commit_time = (commit_time +
1166                                 journal->j_average_commit_time*3) / 4;
1167         else
1168                 journal->j_average_commit_time = commit_time;
1169
1170         write_unlock(&journal->j_state_lock);
1171
1172         if (journal->j_commit_callback)
1173                 journal->j_commit_callback(journal, commit_transaction);
1174         if (journal->j_fc_cleanup_callback)
1175                 journal->j_fc_cleanup_callback(journal, 1);
1176
1177         trace_jbd2_end_commit(journal, commit_transaction);
1178         jbd_debug(1, "JBD2: commit %d complete, head %d\n",
1179                   journal->j_commit_sequence, journal->j_tail_sequence);
1180
1181         write_lock(&journal->j_state_lock);
1182         journal->j_flags &= ~JBD2_FULL_COMMIT_ONGOING;
1183         journal->j_flags &= ~JBD2_FAST_COMMIT_ONGOING;
1184         spin_lock(&journal->j_list_lock);
1185         commit_transaction->t_state = T_FINISHED;
1186         /* Check if the transaction can be dropped now that we are finished */
1187         if (commit_transaction->t_checkpoint_list == NULL &&
1188             commit_transaction->t_checkpoint_io_list == NULL) {
1189                 __jbd2_journal_drop_transaction(journal, commit_transaction);
1190                 jbd2_journal_free_transaction(commit_transaction);
1191         }
1192         spin_unlock(&journal->j_list_lock);
1193         write_unlock(&journal->j_state_lock);
1194         wake_up(&journal->j_wait_done_commit);
1195         wake_up(&journal->j_fc_wait);
1196
1197         /*
1198          * Calculate overall stats
1199          */
1200         spin_lock(&journal->j_history_lock);
1201         journal->j_stats.ts_tid++;
1202         journal->j_stats.ts_requested += stats.ts_requested;
1203         journal->j_stats.run.rs_wait += stats.run.rs_wait;
1204         journal->j_stats.run.rs_request_delay += stats.run.rs_request_delay;
1205         journal->j_stats.run.rs_running += stats.run.rs_running;
1206         journal->j_stats.run.rs_locked += stats.run.rs_locked;
1207         journal->j_stats.run.rs_flushing += stats.run.rs_flushing;
1208         journal->j_stats.run.rs_logging += stats.run.rs_logging;
1209         journal->j_stats.run.rs_handle_count += stats.run.rs_handle_count;
1210         journal->j_stats.run.rs_blocks += stats.run.rs_blocks;
1211         journal->j_stats.run.rs_blocks_logged += stats.run.rs_blocks_logged;
1212         spin_unlock(&journal->j_history_lock);
1213 }