1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2008 Oracle. All rights reserved.
6 #include <linux/sched.h>
7 #include <linux/slab.h>
8 #include <linux/blkdev.h>
9 #include <linux/list_sort.h>
10 #include <linux/iversion.h>
15 #include "print-tree.h"
17 #include "compression.h"
19 #include "inode-map.h"
21 /* magic values for the inode_only field in btrfs_log_inode:
23 * LOG_INODE_ALL means to log everything
24 * LOG_INODE_EXISTS means to log just enough to recreate the inode
27 #define LOG_INODE_ALL 0
28 #define LOG_INODE_EXISTS 1
29 #define LOG_OTHER_INODE 2
32 * directory trouble cases
34 * 1) on rename or unlink, if the inode being unlinked isn't in the fsync
35 * log, we must force a full commit before doing an fsync of the directory
36 * where the unlink was done.
37 * ---> record transid of last unlink/rename per directory
41 * rename foo/some_dir foo2/some_dir
43 * fsync foo/some_dir/some_file
45 * The fsync above will unlink the original some_dir without recording
46 * it in its new location (foo2). After a crash, some_dir will be gone
47 * unless the fsync of some_file forces a full commit
49 * 2) we must log any new names for any file or dir that is in the fsync
50 * log. ---> check inode while renaming/linking.
52 * 2a) we must log any new names for any file or dir during rename
53 * when the directory they are being removed from was logged.
54 * ---> check inode and old parent dir during rename
56 * 2a is actually the more important variant. With the extra logging
57 * a crash might unlink the old name without recreating the new one
59 * 3) after a crash, we must go through any directories with a link count
60 * of zero and redo the rm -rf
67 * The directory f1 was fully removed from the FS, but fsync was never
68 * called on f1, only its parent dir. After a crash the rm -rf must
69 * be replayed. This must be able to recurse down the entire
70 * directory tree. The inode link count fixup code takes care of the
75 * stages for the tree walking. The first
76 * stage (0) is to only pin down the blocks we find
77 * the second stage (1) is to make sure that all the inodes
78 * we find in the log are created in the subvolume.
80 * The last stage is to deal with directories and links and extents
81 * and all the other fun semantics
83 #define LOG_WALK_PIN_ONLY 0
84 #define LOG_WALK_REPLAY_INODES 1
85 #define LOG_WALK_REPLAY_DIR_INDEX 2
86 #define LOG_WALK_REPLAY_ALL 3
88 static int btrfs_log_inode(struct btrfs_trans_handle *trans,
89 struct btrfs_root *root, struct btrfs_inode *inode,
93 struct btrfs_log_ctx *ctx);
94 static int link_to_fixup_dir(struct btrfs_trans_handle *trans,
95 struct btrfs_root *root,
96 struct btrfs_path *path, u64 objectid);
97 static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans,
98 struct btrfs_root *root,
99 struct btrfs_root *log,
100 struct btrfs_path *path,
101 u64 dirid, int del_all);
104 * tree logging is a special write ahead log used to make sure that
105 * fsyncs and O_SYNCs can happen without doing full tree commits.
107 * Full tree commits are expensive because they require commonly
108 * modified blocks to be recowed, creating many dirty pages in the
109 * extent tree an 4x-6x higher write load than ext3.
111 * Instead of doing a tree commit on every fsync, we use the
112 * key ranges and transaction ids to find items for a given file or directory
113 * that have changed in this transaction. Those items are copied into
114 * a special tree (one per subvolume root), that tree is written to disk
115 * and then the fsync is considered complete.
117 * After a crash, items are copied out of the log-tree back into the
118 * subvolume tree. Any file data extents found are recorded in the extent
119 * allocation tree, and the log-tree freed.
121 * The log tree is read three times, once to pin down all the extents it is
122 * using in ram and once, once to create all the inodes logged in the tree
123 * and once to do all the other items.
127 * start a sub transaction and setup the log tree
128 * this increments the log tree writer count to make the people
129 * syncing the tree wait for us to finish
131 static int start_log_trans(struct btrfs_trans_handle *trans,
132 struct btrfs_root *root,
133 struct btrfs_log_ctx *ctx)
135 struct btrfs_fs_info *fs_info = root->fs_info;
138 mutex_lock(&root->log_mutex);
140 if (root->log_root) {
141 if (btrfs_need_log_full_commit(fs_info, trans)) {
146 if (!root->log_start_pid) {
147 clear_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state);
148 root->log_start_pid = current->pid;
149 } else if (root->log_start_pid != current->pid) {
150 set_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state);
153 mutex_lock(&fs_info->tree_log_mutex);
154 if (!fs_info->log_root_tree)
155 ret = btrfs_init_log_root_tree(trans, fs_info);
156 mutex_unlock(&fs_info->tree_log_mutex);
160 ret = btrfs_add_log_tree(trans, root);
164 clear_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state);
165 root->log_start_pid = current->pid;
168 atomic_inc(&root->log_batch);
169 atomic_inc(&root->log_writers);
171 int index = root->log_transid % 2;
172 list_add_tail(&ctx->list, &root->log_ctxs[index]);
173 ctx->log_transid = root->log_transid;
177 mutex_unlock(&root->log_mutex);
182 * returns 0 if there was a log transaction running and we were able
183 * to join, or returns -ENOENT if there were not transactions
186 static int join_running_log_trans(struct btrfs_root *root)
194 mutex_lock(&root->log_mutex);
195 if (root->log_root) {
197 atomic_inc(&root->log_writers);
199 mutex_unlock(&root->log_mutex);
204 * This either makes the current running log transaction wait
205 * until you call btrfs_end_log_trans() or it makes any future
206 * log transactions wait until you call btrfs_end_log_trans()
208 int btrfs_pin_log_trans(struct btrfs_root *root)
212 mutex_lock(&root->log_mutex);
213 atomic_inc(&root->log_writers);
214 mutex_unlock(&root->log_mutex);
219 * indicate we're done making changes to the log tree
220 * and wake up anyone waiting to do a sync
222 void btrfs_end_log_trans(struct btrfs_root *root)
224 if (atomic_dec_and_test(&root->log_writers)) {
225 /* atomic_dec_and_test implies a barrier */
226 cond_wake_up_nomb(&root->log_writer_wait);
232 * the walk control struct is used to pass state down the chain when
233 * processing the log tree. The stage field tells us which part
234 * of the log tree processing we are currently doing. The others
235 * are state fields used for that specific part
237 struct walk_control {
238 /* should we free the extent on disk when done? This is used
239 * at transaction commit time while freeing a log tree
243 /* should we write out the extent buffer? This is used
244 * while flushing the log tree to disk during a sync
248 /* should we wait for the extent buffer io to finish? Also used
249 * while flushing the log tree to disk for a sync
253 /* pin only walk, we record which extents on disk belong to the
258 /* what stage of the replay code we're currently in */
262 * Ignore any items from the inode currently being processed. Needs
263 * to be set every time we find a BTRFS_INODE_ITEM_KEY and we are in
264 * the LOG_WALK_REPLAY_INODES stage.
266 bool ignore_cur_inode;
268 /* the root we are currently replaying */
269 struct btrfs_root *replay_dest;
271 /* the trans handle for the current replay */
272 struct btrfs_trans_handle *trans;
274 /* the function that gets used to process blocks we find in the
275 * tree. Note the extent_buffer might not be up to date when it is
276 * passed in, and it must be checked or read if you need the data
279 int (*process_func)(struct btrfs_root *log, struct extent_buffer *eb,
280 struct walk_control *wc, u64 gen, int level);
284 * process_func used to pin down extents, write them or wait on them
286 static int process_one_buffer(struct btrfs_root *log,
287 struct extent_buffer *eb,
288 struct walk_control *wc, u64 gen, int level)
290 struct btrfs_fs_info *fs_info = log->fs_info;
294 * If this fs is mixed then we need to be able to process the leaves to
295 * pin down any logged extents, so we have to read the block.
297 if (btrfs_fs_incompat(fs_info, MIXED_GROUPS)) {
298 ret = btrfs_read_buffer(eb, gen, level, NULL);
304 ret = btrfs_pin_extent_for_log_replay(fs_info, eb->start,
307 if (!ret && btrfs_buffer_uptodate(eb, gen, 0)) {
308 if (wc->pin && btrfs_header_level(eb) == 0)
309 ret = btrfs_exclude_logged_extents(fs_info, eb);
311 btrfs_write_tree_block(eb);
313 btrfs_wait_tree_block_writeback(eb);
319 * Item overwrite used by replay and tree logging. eb, slot and key all refer
320 * to the src data we are copying out.
322 * root is the tree we are copying into, and path is a scratch
323 * path for use in this function (it should be released on entry and
324 * will be released on exit).
326 * If the key is already in the destination tree the existing item is
327 * overwritten. If the existing item isn't big enough, it is extended.
328 * If it is too large, it is truncated.
330 * If the key isn't in the destination yet, a new item is inserted.
332 static noinline int overwrite_item(struct btrfs_trans_handle *trans,
333 struct btrfs_root *root,
334 struct btrfs_path *path,
335 struct extent_buffer *eb, int slot,
336 struct btrfs_key *key)
338 struct btrfs_fs_info *fs_info = root->fs_info;
341 u64 saved_i_size = 0;
342 int save_old_i_size = 0;
343 unsigned long src_ptr;
344 unsigned long dst_ptr;
345 int overwrite_root = 0;
346 bool inode_item = key->type == BTRFS_INODE_ITEM_KEY;
348 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID)
351 item_size = btrfs_item_size_nr(eb, slot);
352 src_ptr = btrfs_item_ptr_offset(eb, slot);
354 /* look for the key in the destination tree */
355 ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
362 u32 dst_size = btrfs_item_size_nr(path->nodes[0],
364 if (dst_size != item_size)
367 if (item_size == 0) {
368 btrfs_release_path(path);
371 dst_copy = kmalloc(item_size, GFP_NOFS);
372 src_copy = kmalloc(item_size, GFP_NOFS);
373 if (!dst_copy || !src_copy) {
374 btrfs_release_path(path);
380 read_extent_buffer(eb, src_copy, src_ptr, item_size);
382 dst_ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
383 read_extent_buffer(path->nodes[0], dst_copy, dst_ptr,
385 ret = memcmp(dst_copy, src_copy, item_size);
390 * they have the same contents, just return, this saves
391 * us from cowing blocks in the destination tree and doing
392 * extra writes that may not have been done by a previous
396 btrfs_release_path(path);
401 * We need to load the old nbytes into the inode so when we
402 * replay the extents we've logged we get the right nbytes.
405 struct btrfs_inode_item *item;
409 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
410 struct btrfs_inode_item);
411 nbytes = btrfs_inode_nbytes(path->nodes[0], item);
412 item = btrfs_item_ptr(eb, slot,
413 struct btrfs_inode_item);
414 btrfs_set_inode_nbytes(eb, item, nbytes);
417 * If this is a directory we need to reset the i_size to
418 * 0 so that we can set it up properly when replaying
419 * the rest of the items in this log.
421 mode = btrfs_inode_mode(eb, item);
423 btrfs_set_inode_size(eb, item, 0);
425 } else if (inode_item) {
426 struct btrfs_inode_item *item;
430 * New inode, set nbytes to 0 so that the nbytes comes out
431 * properly when we replay the extents.
433 item = btrfs_item_ptr(eb, slot, struct btrfs_inode_item);
434 btrfs_set_inode_nbytes(eb, item, 0);
437 * If this is a directory we need to reset the i_size to 0 so
438 * that we can set it up properly when replaying the rest of
439 * the items in this log.
441 mode = btrfs_inode_mode(eb, item);
443 btrfs_set_inode_size(eb, item, 0);
446 btrfs_release_path(path);
447 /* try to insert the key into the destination tree */
448 path->skip_release_on_error = 1;
449 ret = btrfs_insert_empty_item(trans, root, path,
451 path->skip_release_on_error = 0;
453 /* make sure any existing item is the correct size */
454 if (ret == -EEXIST || ret == -EOVERFLOW) {
456 found_size = btrfs_item_size_nr(path->nodes[0],
458 if (found_size > item_size)
459 btrfs_truncate_item(fs_info, path, item_size, 1);
460 else if (found_size < item_size)
461 btrfs_extend_item(fs_info, path,
462 item_size - found_size);
466 dst_ptr = btrfs_item_ptr_offset(path->nodes[0],
469 /* don't overwrite an existing inode if the generation number
470 * was logged as zero. This is done when the tree logging code
471 * is just logging an inode to make sure it exists after recovery.
473 * Also, don't overwrite i_size on directories during replay.
474 * log replay inserts and removes directory items based on the
475 * state of the tree found in the subvolume, and i_size is modified
478 if (key->type == BTRFS_INODE_ITEM_KEY && ret == -EEXIST) {
479 struct btrfs_inode_item *src_item;
480 struct btrfs_inode_item *dst_item;
482 src_item = (struct btrfs_inode_item *)src_ptr;
483 dst_item = (struct btrfs_inode_item *)dst_ptr;
485 if (btrfs_inode_generation(eb, src_item) == 0) {
486 struct extent_buffer *dst_eb = path->nodes[0];
487 const u64 ino_size = btrfs_inode_size(eb, src_item);
490 * For regular files an ino_size == 0 is used only when
491 * logging that an inode exists, as part of a directory
492 * fsync, and the inode wasn't fsynced before. In this
493 * case don't set the size of the inode in the fs/subvol
494 * tree, otherwise we would be throwing valid data away.
496 if (S_ISREG(btrfs_inode_mode(eb, src_item)) &&
497 S_ISREG(btrfs_inode_mode(dst_eb, dst_item)) &&
499 struct btrfs_map_token token;
501 btrfs_init_map_token(&token);
502 btrfs_set_token_inode_size(dst_eb, dst_item,
508 if (overwrite_root &&
509 S_ISDIR(btrfs_inode_mode(eb, src_item)) &&
510 S_ISDIR(btrfs_inode_mode(path->nodes[0], dst_item))) {
512 saved_i_size = btrfs_inode_size(path->nodes[0],
517 copy_extent_buffer(path->nodes[0], eb, dst_ptr,
520 if (save_old_i_size) {
521 struct btrfs_inode_item *dst_item;
522 dst_item = (struct btrfs_inode_item *)dst_ptr;
523 btrfs_set_inode_size(path->nodes[0], dst_item, saved_i_size);
526 /* make sure the generation is filled in */
527 if (key->type == BTRFS_INODE_ITEM_KEY) {
528 struct btrfs_inode_item *dst_item;
529 dst_item = (struct btrfs_inode_item *)dst_ptr;
530 if (btrfs_inode_generation(path->nodes[0], dst_item) == 0) {
531 btrfs_set_inode_generation(path->nodes[0], dst_item,
536 btrfs_mark_buffer_dirty(path->nodes[0]);
537 btrfs_release_path(path);
542 * simple helper to read an inode off the disk from a given root
543 * This can only be called for subvolume roots and not for the log
545 static noinline struct inode *read_one_inode(struct btrfs_root *root,
548 struct btrfs_key key;
551 key.objectid = objectid;
552 key.type = BTRFS_INODE_ITEM_KEY;
554 inode = btrfs_iget(root->fs_info->sb, &key, root, NULL);
560 /* replays a single extent in 'eb' at 'slot' with 'key' into the
561 * subvolume 'root'. path is released on entry and should be released
564 * extents in the log tree have not been allocated out of the extent
565 * tree yet. So, this completes the allocation, taking a reference
566 * as required if the extent already exists or creating a new extent
567 * if it isn't in the extent allocation tree yet.
569 * The extent is inserted into the file, dropping any existing extents
570 * from the file that overlap the new one.
572 static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
573 struct btrfs_root *root,
574 struct btrfs_path *path,
575 struct extent_buffer *eb, int slot,
576 struct btrfs_key *key)
578 struct btrfs_fs_info *fs_info = root->fs_info;
581 u64 start = key->offset;
583 struct btrfs_file_extent_item *item;
584 struct inode *inode = NULL;
588 item = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
589 found_type = btrfs_file_extent_type(eb, item);
591 if (found_type == BTRFS_FILE_EXTENT_REG ||
592 found_type == BTRFS_FILE_EXTENT_PREALLOC) {
593 nbytes = btrfs_file_extent_num_bytes(eb, item);
594 extent_end = start + nbytes;
597 * We don't add to the inodes nbytes if we are prealloc or a
600 if (btrfs_file_extent_disk_bytenr(eb, item) == 0)
602 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
603 size = btrfs_file_extent_ram_bytes(eb, item);
604 nbytes = btrfs_file_extent_ram_bytes(eb, item);
605 extent_end = ALIGN(start + size,
606 fs_info->sectorsize);
612 inode = read_one_inode(root, key->objectid);
619 * first check to see if we already have this extent in the
620 * file. This must be done before the btrfs_drop_extents run
621 * so we don't try to drop this extent.
623 ret = btrfs_lookup_file_extent(trans, root, path,
624 btrfs_ino(BTRFS_I(inode)), start, 0);
627 (found_type == BTRFS_FILE_EXTENT_REG ||
628 found_type == BTRFS_FILE_EXTENT_PREALLOC)) {
629 struct btrfs_file_extent_item cmp1;
630 struct btrfs_file_extent_item cmp2;
631 struct btrfs_file_extent_item *existing;
632 struct extent_buffer *leaf;
634 leaf = path->nodes[0];
635 existing = btrfs_item_ptr(leaf, path->slots[0],
636 struct btrfs_file_extent_item);
638 read_extent_buffer(eb, &cmp1, (unsigned long)item,
640 read_extent_buffer(leaf, &cmp2, (unsigned long)existing,
644 * we already have a pointer to this exact extent,
645 * we don't have to do anything
647 if (memcmp(&cmp1, &cmp2, sizeof(cmp1)) == 0) {
648 btrfs_release_path(path);
652 btrfs_release_path(path);
654 /* drop any overlapping extents */
655 ret = btrfs_drop_extents(trans, root, inode, start, extent_end, 1);
659 if (found_type == BTRFS_FILE_EXTENT_REG ||
660 found_type == BTRFS_FILE_EXTENT_PREALLOC) {
662 unsigned long dest_offset;
663 struct btrfs_key ins;
665 if (btrfs_file_extent_disk_bytenr(eb, item) == 0 &&
666 btrfs_fs_incompat(fs_info, NO_HOLES))
669 ret = btrfs_insert_empty_item(trans, root, path, key,
673 dest_offset = btrfs_item_ptr_offset(path->nodes[0],
675 copy_extent_buffer(path->nodes[0], eb, dest_offset,
676 (unsigned long)item, sizeof(*item));
678 ins.objectid = btrfs_file_extent_disk_bytenr(eb, item);
679 ins.offset = btrfs_file_extent_disk_num_bytes(eb, item);
680 ins.type = BTRFS_EXTENT_ITEM_KEY;
681 offset = key->offset - btrfs_file_extent_offset(eb, item);
684 * Manually record dirty extent, as here we did a shallow
685 * file extent item copy and skip normal backref update,
686 * but modifying extent tree all by ourselves.
687 * So need to manually record dirty extent for qgroup,
688 * as the owner of the file extent changed from log tree
689 * (doesn't affect qgroup) to fs/file tree(affects qgroup)
691 ret = btrfs_qgroup_trace_extent(trans,
692 btrfs_file_extent_disk_bytenr(eb, item),
693 btrfs_file_extent_disk_num_bytes(eb, item),
698 if (ins.objectid > 0) {
701 LIST_HEAD(ordered_sums);
703 * is this extent already allocated in the extent
704 * allocation tree? If so, just add a reference
706 ret = btrfs_lookup_data_extent(fs_info, ins.objectid,
709 ret = btrfs_inc_extent_ref(trans, root,
710 ins.objectid, ins.offset,
711 0, root->root_key.objectid,
712 key->objectid, offset);
717 * insert the extent pointer in the extent
720 ret = btrfs_alloc_logged_file_extent(trans,
721 root->root_key.objectid,
722 key->objectid, offset, &ins);
726 btrfs_release_path(path);
728 if (btrfs_file_extent_compression(eb, item)) {
729 csum_start = ins.objectid;
730 csum_end = csum_start + ins.offset;
732 csum_start = ins.objectid +
733 btrfs_file_extent_offset(eb, item);
734 csum_end = csum_start +
735 btrfs_file_extent_num_bytes(eb, item);
738 ret = btrfs_lookup_csums_range(root->log_root,
739 csum_start, csum_end - 1,
744 * Now delete all existing cums in the csum root that
745 * cover our range. We do this because we can have an
746 * extent that is completely referenced by one file
747 * extent item and partially referenced by another
748 * file extent item (like after using the clone or
749 * extent_same ioctls). In this case if we end up doing
750 * the replay of the one that partially references the
751 * extent first, and we do not do the csum deletion
752 * below, we can get 2 csum items in the csum tree that
753 * overlap each other. For example, imagine our log has
754 * the two following file extent items:
756 * key (257 EXTENT_DATA 409600)
757 * extent data disk byte 12845056 nr 102400
758 * extent data offset 20480 nr 20480 ram 102400
760 * key (257 EXTENT_DATA 819200)
761 * extent data disk byte 12845056 nr 102400
762 * extent data offset 0 nr 102400 ram 102400
764 * Where the second one fully references the 100K extent
765 * that starts at disk byte 12845056, and the log tree
766 * has a single csum item that covers the entire range
769 * key (EXTENT_CSUM EXTENT_CSUM 12845056) itemsize 100
771 * After the first file extent item is replayed, the
772 * csum tree gets the following csum item:
774 * key (EXTENT_CSUM EXTENT_CSUM 12865536) itemsize 20
776 * Which covers the 20K sub-range starting at offset 20K
777 * of our extent. Now when we replay the second file
778 * extent item, if we do not delete existing csum items
779 * that cover any of its blocks, we end up getting two
780 * csum items in our csum tree that overlap each other:
782 * key (EXTENT_CSUM EXTENT_CSUM 12845056) itemsize 100
783 * key (EXTENT_CSUM EXTENT_CSUM 12865536) itemsize 20
785 * Which is a problem, because after this anyone trying
786 * to lookup up for the checksum of any block of our
787 * extent starting at an offset of 40K or higher, will
788 * end up looking at the second csum item only, which
789 * does not contain the checksum for any block starting
790 * at offset 40K or higher of our extent.
792 while (!list_empty(&ordered_sums)) {
793 struct btrfs_ordered_sum *sums;
794 sums = list_entry(ordered_sums.next,
795 struct btrfs_ordered_sum,
798 ret = btrfs_del_csums(trans,
803 ret = btrfs_csum_file_blocks(trans,
804 fs_info->csum_root, sums);
805 list_del(&sums->list);
811 btrfs_release_path(path);
813 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
814 /* inline extents are easy, we just overwrite them */
815 ret = overwrite_item(trans, root, path, eb, slot, key);
820 inode_add_bytes(inode, nbytes);
822 ret = btrfs_update_inode(trans, root, inode);
830 * when cleaning up conflicts between the directory names in the
831 * subvolume, directory names in the log and directory names in the
832 * inode back references, we may have to unlink inodes from directories.
834 * This is a helper function to do the unlink of a specific directory
837 static noinline int drop_one_dir_item(struct btrfs_trans_handle *trans,
838 struct btrfs_root *root,
839 struct btrfs_path *path,
840 struct btrfs_inode *dir,
841 struct btrfs_dir_item *di)
846 struct extent_buffer *leaf;
847 struct btrfs_key location;
850 leaf = path->nodes[0];
852 btrfs_dir_item_key_to_cpu(leaf, di, &location);
853 name_len = btrfs_dir_name_len(leaf, di);
854 name = kmalloc(name_len, GFP_NOFS);
858 read_extent_buffer(leaf, name, (unsigned long)(di + 1), name_len);
859 btrfs_release_path(path);
861 inode = read_one_inode(root, location.objectid);
867 ret = link_to_fixup_dir(trans, root, path, location.objectid);
871 ret = btrfs_unlink_inode(trans, root, dir, BTRFS_I(inode), name,
876 ret = btrfs_run_delayed_items(trans);
884 * See if a given name and sequence number found in an inode back reference are
885 * already in a directory and correctly point to this inode.
887 * Returns: < 0 on error, 0 if the directory entry does not exists and 1 if it
890 static noinline int inode_in_dir(struct btrfs_root *root,
891 struct btrfs_path *path,
892 u64 dirid, u64 objectid, u64 index,
893 const char *name, int name_len)
895 struct btrfs_dir_item *di;
896 struct btrfs_key location;
899 di = btrfs_lookup_dir_index_item(NULL, root, path, dirid,
900 index, name, name_len, 0);
902 if (PTR_ERR(di) != -ENOENT)
906 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location);
907 if (location.objectid != objectid)
913 btrfs_release_path(path);
914 di = btrfs_lookup_dir_item(NULL, root, path, dirid, name, name_len, 0);
919 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location);
920 if (location.objectid == objectid)
924 btrfs_release_path(path);
929 * helper function to check a log tree for a named back reference in
930 * an inode. This is used to decide if a back reference that is
931 * found in the subvolume conflicts with what we find in the log.
933 * inode backreferences may have multiple refs in a single item,
934 * during replay we process one reference at a time, and we don't
935 * want to delete valid links to a file from the subvolume if that
936 * link is also in the log.
938 static noinline int backref_in_log(struct btrfs_root *log,
939 struct btrfs_key *key,
941 const char *name, int namelen)
943 struct btrfs_path *path;
944 struct btrfs_inode_ref *ref;
946 unsigned long ptr_end;
947 unsigned long name_ptr;
953 path = btrfs_alloc_path();
957 ret = btrfs_search_slot(NULL, log, key, path, 0, 0);
961 ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
963 if (key->type == BTRFS_INODE_EXTREF_KEY) {
964 if (btrfs_find_name_in_ext_backref(path->nodes[0],
967 name, namelen, NULL))
973 item_size = btrfs_item_size_nr(path->nodes[0], path->slots[0]);
974 ptr_end = ptr + item_size;
975 while (ptr < ptr_end) {
976 ref = (struct btrfs_inode_ref *)ptr;
977 found_name_len = btrfs_inode_ref_name_len(path->nodes[0], ref);
978 if (found_name_len == namelen) {
979 name_ptr = (unsigned long)(ref + 1);
980 ret = memcmp_extent_buffer(path->nodes[0], name,
987 ptr = (unsigned long)(ref + 1) + found_name_len;
990 btrfs_free_path(path);
994 static inline int __add_inode_ref(struct btrfs_trans_handle *trans,
995 struct btrfs_root *root,
996 struct btrfs_path *path,
997 struct btrfs_root *log_root,
998 struct btrfs_inode *dir,
999 struct btrfs_inode *inode,
1000 u64 inode_objectid, u64 parent_objectid,
1001 u64 ref_index, char *name, int namelen,
1006 int victim_name_len;
1007 struct extent_buffer *leaf;
1008 struct btrfs_dir_item *di;
1009 struct btrfs_key search_key;
1010 struct btrfs_inode_extref *extref;
1013 /* Search old style refs */
1014 search_key.objectid = inode_objectid;
1015 search_key.type = BTRFS_INODE_REF_KEY;
1016 search_key.offset = parent_objectid;
1017 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
1019 struct btrfs_inode_ref *victim_ref;
1021 unsigned long ptr_end;
1023 leaf = path->nodes[0];
1025 /* are we trying to overwrite a back ref for the root directory
1026 * if so, just jump out, we're done
1028 if (search_key.objectid == search_key.offset)
1031 /* check all the names in this back reference to see
1032 * if they are in the log. if so, we allow them to stay
1033 * otherwise they must be unlinked as a conflict
1035 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
1036 ptr_end = ptr + btrfs_item_size_nr(leaf, path->slots[0]);
1037 while (ptr < ptr_end) {
1038 victim_ref = (struct btrfs_inode_ref *)ptr;
1039 victim_name_len = btrfs_inode_ref_name_len(leaf,
1041 victim_name = kmalloc(victim_name_len, GFP_NOFS);
1045 read_extent_buffer(leaf, victim_name,
1046 (unsigned long)(victim_ref + 1),
1049 if (!backref_in_log(log_root, &search_key,
1053 inc_nlink(&inode->vfs_inode);
1054 btrfs_release_path(path);
1056 ret = btrfs_unlink_inode(trans, root, dir, inode,
1057 victim_name, victim_name_len);
1061 ret = btrfs_run_delayed_items(trans);
1069 ptr = (unsigned long)(victim_ref + 1) + victim_name_len;
1073 * NOTE: we have searched root tree and checked the
1074 * corresponding ref, it does not need to check again.
1078 btrfs_release_path(path);
1080 /* Same search but for extended refs */
1081 extref = btrfs_lookup_inode_extref(NULL, root, path, name, namelen,
1082 inode_objectid, parent_objectid, 0,
1084 if (!IS_ERR_OR_NULL(extref)) {
1088 struct inode *victim_parent;
1090 leaf = path->nodes[0];
1092 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1093 base = btrfs_item_ptr_offset(leaf, path->slots[0]);
1095 while (cur_offset < item_size) {
1096 extref = (struct btrfs_inode_extref *)(base + cur_offset);
1098 victim_name_len = btrfs_inode_extref_name_len(leaf, extref);
1100 if (btrfs_inode_extref_parent(leaf, extref) != parent_objectid)
1103 victim_name = kmalloc(victim_name_len, GFP_NOFS);
1106 read_extent_buffer(leaf, victim_name, (unsigned long)&extref->name,
1109 search_key.objectid = inode_objectid;
1110 search_key.type = BTRFS_INODE_EXTREF_KEY;
1111 search_key.offset = btrfs_extref_hash(parent_objectid,
1115 if (!backref_in_log(log_root, &search_key,
1116 parent_objectid, victim_name,
1119 victim_parent = read_one_inode(root,
1121 if (victim_parent) {
1122 inc_nlink(&inode->vfs_inode);
1123 btrfs_release_path(path);
1125 ret = btrfs_unlink_inode(trans, root,
1126 BTRFS_I(victim_parent),
1131 ret = btrfs_run_delayed_items(
1134 iput(victim_parent);
1143 cur_offset += victim_name_len + sizeof(*extref);
1147 btrfs_release_path(path);
1149 /* look for a conflicting sequence number */
1150 di = btrfs_lookup_dir_index_item(trans, root, path, btrfs_ino(dir),
1151 ref_index, name, namelen, 0);
1153 if (PTR_ERR(di) != -ENOENT)
1156 ret = drop_one_dir_item(trans, root, path, dir, di);
1160 btrfs_release_path(path);
1162 /* look for a conflicing name */
1163 di = btrfs_lookup_dir_item(trans, root, path, btrfs_ino(dir),
1168 ret = drop_one_dir_item(trans, root, path, dir, di);
1172 btrfs_release_path(path);
1177 static int extref_get_fields(struct extent_buffer *eb, unsigned long ref_ptr,
1178 u32 *namelen, char **name, u64 *index,
1179 u64 *parent_objectid)
1181 struct btrfs_inode_extref *extref;
1183 extref = (struct btrfs_inode_extref *)ref_ptr;
1185 *namelen = btrfs_inode_extref_name_len(eb, extref);
1186 *name = kmalloc(*namelen, GFP_NOFS);
1190 read_extent_buffer(eb, *name, (unsigned long)&extref->name,
1194 *index = btrfs_inode_extref_index(eb, extref);
1195 if (parent_objectid)
1196 *parent_objectid = btrfs_inode_extref_parent(eb, extref);
1201 static int ref_get_fields(struct extent_buffer *eb, unsigned long ref_ptr,
1202 u32 *namelen, char **name, u64 *index)
1204 struct btrfs_inode_ref *ref;
1206 ref = (struct btrfs_inode_ref *)ref_ptr;
1208 *namelen = btrfs_inode_ref_name_len(eb, ref);
1209 *name = kmalloc(*namelen, GFP_NOFS);
1213 read_extent_buffer(eb, *name, (unsigned long)(ref + 1), *namelen);
1216 *index = btrfs_inode_ref_index(eb, ref);
1222 * Take an inode reference item from the log tree and iterate all names from the
1223 * inode reference item in the subvolume tree with the same key (if it exists).
1224 * For any name that is not in the inode reference item from the log tree, do a
1225 * proper unlink of that name (that is, remove its entry from the inode
1226 * reference item and both dir index keys).
1228 static int unlink_old_inode_refs(struct btrfs_trans_handle *trans,
1229 struct btrfs_root *root,
1230 struct btrfs_path *path,
1231 struct btrfs_inode *inode,
1232 struct extent_buffer *log_eb,
1234 struct btrfs_key *key)
1237 unsigned long ref_ptr;
1238 unsigned long ref_end;
1239 struct extent_buffer *eb;
1242 btrfs_release_path(path);
1243 ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
1251 eb = path->nodes[0];
1252 ref_ptr = btrfs_item_ptr_offset(eb, path->slots[0]);
1253 ref_end = ref_ptr + btrfs_item_size_nr(eb, path->slots[0]);
1254 while (ref_ptr < ref_end) {
1259 if (key->type == BTRFS_INODE_EXTREF_KEY) {
1260 ret = extref_get_fields(eb, ref_ptr, &namelen, &name,
1263 parent_id = key->offset;
1264 ret = ref_get_fields(eb, ref_ptr, &namelen, &name,
1270 if (key->type == BTRFS_INODE_EXTREF_KEY)
1271 ret = btrfs_find_name_in_ext_backref(log_eb, log_slot,
1275 ret = btrfs_find_name_in_backref(log_eb, log_slot, name,
1281 btrfs_release_path(path);
1282 dir = read_one_inode(root, parent_id);
1288 ret = btrfs_unlink_inode(trans, root, BTRFS_I(dir),
1289 inode, name, namelen);
1293 * Whenever we need to check if a name exists or not, we
1294 * check the subvolume tree. So after an unlink we must
1295 * run delayed items, so that future checks for a name
1296 * during log replay see that the name does not exists
1300 ret = btrfs_run_delayed_items(trans);
1308 if (key->type == BTRFS_INODE_EXTREF_KEY)
1309 ref_ptr += sizeof(struct btrfs_inode_extref);
1311 ref_ptr += sizeof(struct btrfs_inode_ref);
1315 btrfs_release_path(path);
1319 static int btrfs_inode_ref_exists(struct inode *inode, struct inode *dir,
1320 const u8 ref_type, const char *name,
1323 struct btrfs_key key;
1324 struct btrfs_path *path;
1325 const u64 parent_id = btrfs_ino(BTRFS_I(dir));
1328 path = btrfs_alloc_path();
1332 key.objectid = btrfs_ino(BTRFS_I(inode));
1333 key.type = ref_type;
1334 if (key.type == BTRFS_INODE_REF_KEY)
1335 key.offset = parent_id;
1337 key.offset = btrfs_extref_hash(parent_id, name, namelen);
1339 ret = btrfs_search_slot(NULL, BTRFS_I(inode)->root, &key, path, 0, 0);
1346 if (key.type == BTRFS_INODE_EXTREF_KEY)
1347 ret = btrfs_find_name_in_ext_backref(path->nodes[0],
1348 path->slots[0], parent_id,
1349 name, namelen, NULL);
1351 ret = btrfs_find_name_in_backref(path->nodes[0], path->slots[0],
1352 name, namelen, NULL);
1355 btrfs_free_path(path);
1360 * replay one inode back reference item found in the log tree.
1361 * eb, slot and key refer to the buffer and key found in the log tree.
1362 * root is the destination we are replaying into, and path is for temp
1363 * use by this function. (it should be released on return).
1365 static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
1366 struct btrfs_root *root,
1367 struct btrfs_root *log,
1368 struct btrfs_path *path,
1369 struct extent_buffer *eb, int slot,
1370 struct btrfs_key *key)
1372 struct inode *dir = NULL;
1373 struct inode *inode = NULL;
1374 unsigned long ref_ptr;
1375 unsigned long ref_end;
1379 int search_done = 0;
1380 int log_ref_ver = 0;
1381 u64 parent_objectid;
1384 int ref_struct_size;
1386 ref_ptr = btrfs_item_ptr_offset(eb, slot);
1387 ref_end = ref_ptr + btrfs_item_size_nr(eb, slot);
1389 if (key->type == BTRFS_INODE_EXTREF_KEY) {
1390 struct btrfs_inode_extref *r;
1392 ref_struct_size = sizeof(struct btrfs_inode_extref);
1394 r = (struct btrfs_inode_extref *)ref_ptr;
1395 parent_objectid = btrfs_inode_extref_parent(eb, r);
1397 ref_struct_size = sizeof(struct btrfs_inode_ref);
1398 parent_objectid = key->offset;
1400 inode_objectid = key->objectid;
1403 * it is possible that we didn't log all the parent directories
1404 * for a given inode. If we don't find the dir, just don't
1405 * copy the back ref in. The link count fixup code will take
1408 dir = read_one_inode(root, parent_objectid);
1414 inode = read_one_inode(root, inode_objectid);
1420 while (ref_ptr < ref_end) {
1422 ret = extref_get_fields(eb, ref_ptr, &namelen, &name,
1423 &ref_index, &parent_objectid);
1425 * parent object can change from one array
1429 dir = read_one_inode(root, parent_objectid);
1435 ret = ref_get_fields(eb, ref_ptr, &namelen, &name,
1441 ret = inode_in_dir(root, path, btrfs_ino(BTRFS_I(dir)),
1442 btrfs_ino(BTRFS_I(inode)), ref_index,
1446 } else if (ret == 0) {
1448 * look for a conflicting back reference in the
1449 * metadata. if we find one we have to unlink that name
1450 * of the file before we add our new link. Later on, we
1451 * overwrite any existing back reference, and we don't
1452 * want to create dangling pointers in the directory.
1456 ret = __add_inode_ref(trans, root, path, log,
1461 ref_index, name, namelen,
1471 * If a reference item already exists for this inode
1472 * with the same parent and name, but different index,
1473 * drop it and the corresponding directory index entries
1474 * from the parent before adding the new reference item
1475 * and dir index entries, otherwise we would fail with
1476 * -EEXIST returned from btrfs_add_link() below.
1478 ret = btrfs_inode_ref_exists(inode, dir, key->type,
1481 ret = btrfs_unlink_inode(trans, root,
1486 * If we dropped the link count to 0, bump it so
1487 * that later the iput() on the inode will not
1488 * free it. We will fixup the link count later.
1490 if (!ret && inode->i_nlink == 0)
1493 * Whenever we need to check if a name exists or
1494 * not, we check the subvolume tree. So after an
1495 * unlink we must run delayed items, so that future
1496 * checks for a name during log replay see that the
1497 * name does not exists anymore.
1500 ret = btrfs_run_delayed_items(trans);
1505 /* insert our name */
1506 ret = btrfs_add_link(trans, BTRFS_I(dir),
1508 name, namelen, 0, ref_index);
1512 btrfs_update_inode(trans, root, inode);
1514 /* Else, ret == 1, we already have a perfect match, we're done. */
1516 ref_ptr = (unsigned long)(ref_ptr + ref_struct_size) + namelen;
1526 * Before we overwrite the inode reference item in the subvolume tree
1527 * with the item from the log tree, we must unlink all names from the
1528 * parent directory that are in the subvolume's tree inode reference
1529 * item, otherwise we end up with an inconsistent subvolume tree where
1530 * dir index entries exist for a name but there is no inode reference
1531 * item with the same name.
1533 ret = unlink_old_inode_refs(trans, root, path, BTRFS_I(inode), eb, slot,
1538 /* finally write the back reference in the inode */
1539 ret = overwrite_item(trans, root, path, eb, slot, key);
1541 btrfs_release_path(path);
1548 static int insert_orphan_item(struct btrfs_trans_handle *trans,
1549 struct btrfs_root *root, u64 ino)
1553 ret = btrfs_insert_orphan_item(trans, root, ino);
1560 static int count_inode_extrefs(struct btrfs_root *root,
1561 struct btrfs_inode *inode, struct btrfs_path *path)
1565 unsigned int nlink = 0;
1568 u64 inode_objectid = btrfs_ino(inode);
1571 struct btrfs_inode_extref *extref;
1572 struct extent_buffer *leaf;
1575 ret = btrfs_find_one_extref(root, inode_objectid, offset, path,
1580 leaf = path->nodes[0];
1581 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1582 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
1585 while (cur_offset < item_size) {
1586 extref = (struct btrfs_inode_extref *) (ptr + cur_offset);
1587 name_len = btrfs_inode_extref_name_len(leaf, extref);
1591 cur_offset += name_len + sizeof(*extref);
1595 btrfs_release_path(path);
1597 btrfs_release_path(path);
1599 if (ret < 0 && ret != -ENOENT)
1604 static int count_inode_refs(struct btrfs_root *root,
1605 struct btrfs_inode *inode, struct btrfs_path *path)
1608 struct btrfs_key key;
1609 unsigned int nlink = 0;
1611 unsigned long ptr_end;
1613 u64 ino = btrfs_ino(inode);
1616 key.type = BTRFS_INODE_REF_KEY;
1617 key.offset = (u64)-1;
1620 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1624 if (path->slots[0] == 0)
1629 btrfs_item_key_to_cpu(path->nodes[0], &key,
1631 if (key.objectid != ino ||
1632 key.type != BTRFS_INODE_REF_KEY)
1634 ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
1635 ptr_end = ptr + btrfs_item_size_nr(path->nodes[0],
1637 while (ptr < ptr_end) {
1638 struct btrfs_inode_ref *ref;
1640 ref = (struct btrfs_inode_ref *)ptr;
1641 name_len = btrfs_inode_ref_name_len(path->nodes[0],
1643 ptr = (unsigned long)(ref + 1) + name_len;
1647 if (key.offset == 0)
1649 if (path->slots[0] > 0) {
1654 btrfs_release_path(path);
1656 btrfs_release_path(path);
1662 * There are a few corners where the link count of the file can't
1663 * be properly maintained during replay. So, instead of adding
1664 * lots of complexity to the log code, we just scan the backrefs
1665 * for any file that has been through replay.
1667 * The scan will update the link count on the inode to reflect the
1668 * number of back refs found. If it goes down to zero, the iput
1669 * will free the inode.
1671 static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans,
1672 struct btrfs_root *root,
1673 struct inode *inode)
1675 struct btrfs_path *path;
1678 u64 ino = btrfs_ino(BTRFS_I(inode));
1680 path = btrfs_alloc_path();
1684 ret = count_inode_refs(root, BTRFS_I(inode), path);
1690 ret = count_inode_extrefs(root, BTRFS_I(inode), path);
1698 if (nlink != inode->i_nlink) {
1699 set_nlink(inode, nlink);
1700 btrfs_update_inode(trans, root, inode);
1702 BTRFS_I(inode)->index_cnt = (u64)-1;
1704 if (inode->i_nlink == 0) {
1705 if (S_ISDIR(inode->i_mode)) {
1706 ret = replay_dir_deletes(trans, root, NULL, path,
1711 ret = insert_orphan_item(trans, root, ino);
1715 btrfs_free_path(path);
1719 static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans,
1720 struct btrfs_root *root,
1721 struct btrfs_path *path)
1724 struct btrfs_key key;
1725 struct inode *inode;
1727 key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID;
1728 key.type = BTRFS_ORPHAN_ITEM_KEY;
1729 key.offset = (u64)-1;
1731 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1737 if (path->slots[0] == 0)
1742 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1743 if (key.objectid != BTRFS_TREE_LOG_FIXUP_OBJECTID ||
1744 key.type != BTRFS_ORPHAN_ITEM_KEY)
1747 ret = btrfs_del_item(trans, root, path);
1751 btrfs_release_path(path);
1752 inode = read_one_inode(root, key.offset);
1758 ret = fixup_inode_link_count(trans, root, inode);
1764 * fixup on a directory may create new entries,
1765 * make sure we always look for the highset possible
1768 key.offset = (u64)-1;
1770 btrfs_release_path(path);
1776 * record a given inode in the fixup dir so we can check its link
1777 * count when replay is done. The link count is incremented here
1778 * so the inode won't go away until we check it
1780 static noinline int link_to_fixup_dir(struct btrfs_trans_handle *trans,
1781 struct btrfs_root *root,
1782 struct btrfs_path *path,
1785 struct btrfs_key key;
1787 struct inode *inode;
1789 inode = read_one_inode(root, objectid);
1793 key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID;
1794 key.type = BTRFS_ORPHAN_ITEM_KEY;
1795 key.offset = objectid;
1797 ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
1799 btrfs_release_path(path);
1801 if (!inode->i_nlink)
1802 set_nlink(inode, 1);
1805 ret = btrfs_update_inode(trans, root, inode);
1806 } else if (ret == -EEXIST) {
1815 * when replaying the log for a directory, we only insert names
1816 * for inodes that actually exist. This means an fsync on a directory
1817 * does not implicitly fsync all the new files in it
1819 static noinline int insert_one_name(struct btrfs_trans_handle *trans,
1820 struct btrfs_root *root,
1821 u64 dirid, u64 index,
1822 char *name, int name_len,
1823 struct btrfs_key *location)
1825 struct inode *inode;
1829 inode = read_one_inode(root, location->objectid);
1833 dir = read_one_inode(root, dirid);
1839 ret = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode), name,
1840 name_len, 1, index);
1842 /* FIXME, put inode into FIXUP list */
1850 * Return true if an inode reference exists in the log for the given name,
1851 * inode and parent inode.
1853 static bool name_in_log_ref(struct btrfs_root *log_root,
1854 const char *name, const int name_len,
1855 const u64 dirid, const u64 ino)
1857 struct btrfs_key search_key;
1859 search_key.objectid = ino;
1860 search_key.type = BTRFS_INODE_REF_KEY;
1861 search_key.offset = dirid;
1862 if (backref_in_log(log_root, &search_key, dirid, name, name_len))
1865 search_key.type = BTRFS_INODE_EXTREF_KEY;
1866 search_key.offset = btrfs_extref_hash(dirid, name, name_len);
1867 if (backref_in_log(log_root, &search_key, dirid, name, name_len))
1874 * take a single entry in a log directory item and replay it into
1877 * if a conflicting item exists in the subdirectory already,
1878 * the inode it points to is unlinked and put into the link count
1881 * If a name from the log points to a file or directory that does
1882 * not exist in the FS, it is skipped. fsyncs on directories
1883 * do not force down inodes inside that directory, just changes to the
1884 * names or unlinks in a directory.
1886 * Returns < 0 on error, 0 if the name wasn't replayed (dentry points to a
1887 * non-existing inode) and 1 if the name was replayed.
1889 static noinline int replay_one_name(struct btrfs_trans_handle *trans,
1890 struct btrfs_root *root,
1891 struct btrfs_path *path,
1892 struct extent_buffer *eb,
1893 struct btrfs_dir_item *di,
1894 struct btrfs_key *key)
1898 struct btrfs_dir_item *dst_di;
1899 struct btrfs_key found_key;
1900 struct btrfs_key log_key;
1905 bool update_size = (key->type == BTRFS_DIR_INDEX_KEY);
1906 bool name_added = false;
1908 dir = read_one_inode(root, key->objectid);
1912 name_len = btrfs_dir_name_len(eb, di);
1913 name = kmalloc(name_len, GFP_NOFS);
1919 log_type = btrfs_dir_type(eb, di);
1920 read_extent_buffer(eb, name, (unsigned long)(di + 1),
1923 btrfs_dir_item_key_to_cpu(eb, di, &log_key);
1924 ret = btrfs_lookup_inode(trans, root, path, &log_key, 0);
1925 btrfs_release_path(path);
1928 exists = (ret == 0);
1931 if (key->type == BTRFS_DIR_ITEM_KEY) {
1932 dst_di = btrfs_lookup_dir_item(trans, root, path, key->objectid,
1934 } else if (key->type == BTRFS_DIR_INDEX_KEY) {
1935 dst_di = btrfs_lookup_dir_index_item(trans, root, path,
1945 if (dst_di == ERR_PTR(-ENOENT))
1948 if (IS_ERR(dst_di)) {
1949 ret = PTR_ERR(dst_di);
1951 } else if (!dst_di) {
1952 /* we need a sequence number to insert, so we only
1953 * do inserts for the BTRFS_DIR_INDEX_KEY types
1955 if (key->type != BTRFS_DIR_INDEX_KEY)
1960 btrfs_dir_item_key_to_cpu(path->nodes[0], dst_di, &found_key);
1961 /* the existing item matches the logged item */
1962 if (found_key.objectid == log_key.objectid &&
1963 found_key.type == log_key.type &&
1964 found_key.offset == log_key.offset &&
1965 btrfs_dir_type(path->nodes[0], dst_di) == log_type) {
1966 update_size = false;
1971 * don't drop the conflicting directory entry if the inode
1972 * for the new entry doesn't exist
1977 ret = drop_one_dir_item(trans, root, path, BTRFS_I(dir), dst_di);
1981 if (key->type == BTRFS_DIR_INDEX_KEY)
1984 btrfs_release_path(path);
1985 if (!ret && update_size) {
1986 btrfs_i_size_write(BTRFS_I(dir), dir->i_size + name_len * 2);
1987 ret = btrfs_update_inode(trans, root, dir);
1991 if (!ret && name_added)
1996 if (name_in_log_ref(root->log_root, name, name_len,
1997 key->objectid, log_key.objectid)) {
1998 /* The dentry will be added later. */
2000 update_size = false;
2003 btrfs_release_path(path);
2004 ret = insert_one_name(trans, root, key->objectid, key->offset,
2005 name, name_len, &log_key);
2006 if (ret && ret != -ENOENT && ret != -EEXIST)
2010 update_size = false;
2016 * find all the names in a directory item and reconcile them into
2017 * the subvolume. Only BTRFS_DIR_ITEM_KEY types will have more than
2018 * one name in a directory item, but the same code gets used for
2019 * both directory index types
2021 static noinline int replay_one_dir_item(struct btrfs_trans_handle *trans,
2022 struct btrfs_root *root,
2023 struct btrfs_path *path,
2024 struct extent_buffer *eb, int slot,
2025 struct btrfs_key *key)
2028 u32 item_size = btrfs_item_size_nr(eb, slot);
2029 struct btrfs_dir_item *di;
2032 unsigned long ptr_end;
2033 struct btrfs_path *fixup_path = NULL;
2035 ptr = btrfs_item_ptr_offset(eb, slot);
2036 ptr_end = ptr + item_size;
2037 while (ptr < ptr_end) {
2038 di = (struct btrfs_dir_item *)ptr;
2039 name_len = btrfs_dir_name_len(eb, di);
2040 ret = replay_one_name(trans, root, path, eb, di, key);
2043 ptr = (unsigned long)(di + 1);
2047 * If this entry refers to a non-directory (directories can not
2048 * have a link count > 1) and it was added in the transaction
2049 * that was not committed, make sure we fixup the link count of
2050 * the inode it the entry points to. Otherwise something like
2051 * the following would result in a directory pointing to an
2052 * inode with a wrong link that does not account for this dir
2060 * ln testdir/bar testdir/bar_link
2061 * ln testdir/foo testdir/foo_link
2062 * xfs_io -c "fsync" testdir/bar
2066 * mount fs, log replay happens
2068 * File foo would remain with a link count of 1 when it has two
2069 * entries pointing to it in the directory testdir. This would
2070 * make it impossible to ever delete the parent directory has
2071 * it would result in stale dentries that can never be deleted.
2073 if (ret == 1 && btrfs_dir_type(eb, di) != BTRFS_FT_DIR) {
2074 struct btrfs_key di_key;
2077 fixup_path = btrfs_alloc_path();
2084 btrfs_dir_item_key_to_cpu(eb, di, &di_key);
2085 ret = link_to_fixup_dir(trans, root, fixup_path,
2092 btrfs_free_path(fixup_path);
2097 * directory replay has two parts. There are the standard directory
2098 * items in the log copied from the subvolume, and range items
2099 * created in the log while the subvolume was logged.
2101 * The range items tell us which parts of the key space the log
2102 * is authoritative for. During replay, if a key in the subvolume
2103 * directory is in a logged range item, but not actually in the log
2104 * that means it was deleted from the directory before the fsync
2105 * and should be removed.
2107 static noinline int find_dir_range(struct btrfs_root *root,
2108 struct btrfs_path *path,
2109 u64 dirid, int key_type,
2110 u64 *start_ret, u64 *end_ret)
2112 struct btrfs_key key;
2114 struct btrfs_dir_log_item *item;
2118 if (*start_ret == (u64)-1)
2121 key.objectid = dirid;
2122 key.type = key_type;
2123 key.offset = *start_ret;
2125 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2129 if (path->slots[0] == 0)
2134 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
2136 if (key.type != key_type || key.objectid != dirid) {
2140 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
2141 struct btrfs_dir_log_item);
2142 found_end = btrfs_dir_log_end(path->nodes[0], item);
2144 if (*start_ret >= key.offset && *start_ret <= found_end) {
2146 *start_ret = key.offset;
2147 *end_ret = found_end;
2152 /* check the next slot in the tree to see if it is a valid item */
2153 nritems = btrfs_header_nritems(path->nodes[0]);
2155 if (path->slots[0] >= nritems) {
2156 ret = btrfs_next_leaf(root, path);
2161 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
2163 if (key.type != key_type || key.objectid != dirid) {
2167 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
2168 struct btrfs_dir_log_item);
2169 found_end = btrfs_dir_log_end(path->nodes[0], item);
2170 *start_ret = key.offset;
2171 *end_ret = found_end;
2174 btrfs_release_path(path);
2179 * this looks for a given directory item in the log. If the directory
2180 * item is not in the log, the item is removed and the inode it points
2183 static noinline int check_item_in_log(struct btrfs_trans_handle *trans,
2184 struct btrfs_root *root,
2185 struct btrfs_root *log,
2186 struct btrfs_path *path,
2187 struct btrfs_path *log_path,
2189 struct btrfs_key *dir_key)
2192 struct extent_buffer *eb;
2195 struct btrfs_dir_item *di;
2196 struct btrfs_dir_item *log_di;
2199 unsigned long ptr_end;
2201 struct inode *inode;
2202 struct btrfs_key location;
2205 eb = path->nodes[0];
2206 slot = path->slots[0];
2207 item_size = btrfs_item_size_nr(eb, slot);
2208 ptr = btrfs_item_ptr_offset(eb, slot);
2209 ptr_end = ptr + item_size;
2210 while (ptr < ptr_end) {
2211 di = (struct btrfs_dir_item *)ptr;
2212 name_len = btrfs_dir_name_len(eb, di);
2213 name = kmalloc(name_len, GFP_NOFS);
2218 read_extent_buffer(eb, name, (unsigned long)(di + 1),
2221 if (log && dir_key->type == BTRFS_DIR_ITEM_KEY) {
2222 log_di = btrfs_lookup_dir_item(trans, log, log_path,
2225 } else if (log && dir_key->type == BTRFS_DIR_INDEX_KEY) {
2226 log_di = btrfs_lookup_dir_index_item(trans, log,
2232 if (!log_di || log_di == ERR_PTR(-ENOENT)) {
2233 btrfs_dir_item_key_to_cpu(eb, di, &location);
2234 btrfs_release_path(path);
2235 btrfs_release_path(log_path);
2236 inode = read_one_inode(root, location.objectid);
2242 ret = link_to_fixup_dir(trans, root,
2243 path, location.objectid);
2251 ret = btrfs_unlink_inode(trans, root, BTRFS_I(dir),
2252 BTRFS_I(inode), name, name_len);
2254 ret = btrfs_run_delayed_items(trans);
2260 /* there might still be more names under this key
2261 * check and repeat if required
2263 ret = btrfs_search_slot(NULL, root, dir_key, path,
2269 } else if (IS_ERR(log_di)) {
2271 return PTR_ERR(log_di);
2273 btrfs_release_path(log_path);
2276 ptr = (unsigned long)(di + 1);
2281 btrfs_release_path(path);
2282 btrfs_release_path(log_path);
2286 static int replay_xattr_deletes(struct btrfs_trans_handle *trans,
2287 struct btrfs_root *root,
2288 struct btrfs_root *log,
2289 struct btrfs_path *path,
2292 struct btrfs_key search_key;
2293 struct btrfs_path *log_path;
2298 log_path = btrfs_alloc_path();
2302 search_key.objectid = ino;
2303 search_key.type = BTRFS_XATTR_ITEM_KEY;
2304 search_key.offset = 0;
2306 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
2310 nritems = btrfs_header_nritems(path->nodes[0]);
2311 for (i = path->slots[0]; i < nritems; i++) {
2312 struct btrfs_key key;
2313 struct btrfs_dir_item *di;
2314 struct btrfs_dir_item *log_di;
2318 btrfs_item_key_to_cpu(path->nodes[0], &key, i);
2319 if (key.objectid != ino || key.type != BTRFS_XATTR_ITEM_KEY) {
2324 di = btrfs_item_ptr(path->nodes[0], i, struct btrfs_dir_item);
2325 total_size = btrfs_item_size_nr(path->nodes[0], i);
2327 while (cur < total_size) {
2328 u16 name_len = btrfs_dir_name_len(path->nodes[0], di);
2329 u16 data_len = btrfs_dir_data_len(path->nodes[0], di);
2330 u32 this_len = sizeof(*di) + name_len + data_len;
2333 name = kmalloc(name_len, GFP_NOFS);
2338 read_extent_buffer(path->nodes[0], name,
2339 (unsigned long)(di + 1), name_len);
2341 log_di = btrfs_lookup_xattr(NULL, log, log_path, ino,
2343 btrfs_release_path(log_path);
2345 /* Doesn't exist in log tree, so delete it. */
2346 btrfs_release_path(path);
2347 di = btrfs_lookup_xattr(trans, root, path, ino,
2348 name, name_len, -1);
2355 ret = btrfs_delete_one_dir_name(trans, root,
2359 btrfs_release_path(path);
2364 if (IS_ERR(log_di)) {
2365 ret = PTR_ERR(log_di);
2369 di = (struct btrfs_dir_item *)((char *)di + this_len);
2372 ret = btrfs_next_leaf(root, path);
2378 btrfs_free_path(log_path);
2379 btrfs_release_path(path);
2385 * deletion replay happens before we copy any new directory items
2386 * out of the log or out of backreferences from inodes. It
2387 * scans the log to find ranges of keys that log is authoritative for,
2388 * and then scans the directory to find items in those ranges that are
2389 * not present in the log.
2391 * Anything we don't find in the log is unlinked and removed from the
2394 static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans,
2395 struct btrfs_root *root,
2396 struct btrfs_root *log,
2397 struct btrfs_path *path,
2398 u64 dirid, int del_all)
2402 int key_type = BTRFS_DIR_LOG_ITEM_KEY;
2404 struct btrfs_key dir_key;
2405 struct btrfs_key found_key;
2406 struct btrfs_path *log_path;
2409 dir_key.objectid = dirid;
2410 dir_key.type = BTRFS_DIR_ITEM_KEY;
2411 log_path = btrfs_alloc_path();
2415 dir = read_one_inode(root, dirid);
2416 /* it isn't an error if the inode isn't there, that can happen
2417 * because we replay the deletes before we copy in the inode item
2421 btrfs_free_path(log_path);
2429 range_end = (u64)-1;
2431 ret = find_dir_range(log, path, dirid, key_type,
2432 &range_start, &range_end);
2439 dir_key.offset = range_start;
2442 ret = btrfs_search_slot(NULL, root, &dir_key, path,
2447 nritems = btrfs_header_nritems(path->nodes[0]);
2448 if (path->slots[0] >= nritems) {
2449 ret = btrfs_next_leaf(root, path);
2455 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
2457 if (found_key.objectid != dirid ||
2458 found_key.type != dir_key.type)
2461 if (found_key.offset > range_end)
2464 ret = check_item_in_log(trans, root, log, path,
2469 if (found_key.offset == (u64)-1)
2471 dir_key.offset = found_key.offset + 1;
2473 btrfs_release_path(path);
2474 if (range_end == (u64)-1)
2476 range_start = range_end + 1;
2481 if (key_type == BTRFS_DIR_LOG_ITEM_KEY) {
2482 key_type = BTRFS_DIR_LOG_INDEX_KEY;
2483 dir_key.type = BTRFS_DIR_INDEX_KEY;
2484 btrfs_release_path(path);
2488 btrfs_release_path(path);
2489 btrfs_free_path(log_path);
2495 * the process_func used to replay items from the log tree. This
2496 * gets called in two different stages. The first stage just looks
2497 * for inodes and makes sure they are all copied into the subvolume.
2499 * The second stage copies all the other item types from the log into
2500 * the subvolume. The two stage approach is slower, but gets rid of
2501 * lots of complexity around inodes referencing other inodes that exist
2502 * only in the log (references come from either directory items or inode
2505 static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb,
2506 struct walk_control *wc, u64 gen, int level)
2509 struct btrfs_path *path;
2510 struct btrfs_root *root = wc->replay_dest;
2511 struct btrfs_key key;
2515 ret = btrfs_read_buffer(eb, gen, level, NULL);
2519 level = btrfs_header_level(eb);
2524 path = btrfs_alloc_path();
2528 nritems = btrfs_header_nritems(eb);
2529 for (i = 0; i < nritems; i++) {
2530 btrfs_item_key_to_cpu(eb, &key, i);
2532 /* inode keys are done during the first stage */
2533 if (key.type == BTRFS_INODE_ITEM_KEY &&
2534 wc->stage == LOG_WALK_REPLAY_INODES) {
2535 struct btrfs_inode_item *inode_item;
2538 inode_item = btrfs_item_ptr(eb, i,
2539 struct btrfs_inode_item);
2541 * If we have a tmpfile (O_TMPFILE) that got fsync'ed
2542 * and never got linked before the fsync, skip it, as
2543 * replaying it is pointless since it would be deleted
2544 * later. We skip logging tmpfiles, but it's always
2545 * possible we are replaying a log created with a kernel
2546 * that used to log tmpfiles.
2548 if (btrfs_inode_nlink(eb, inode_item) == 0) {
2549 wc->ignore_cur_inode = true;
2552 wc->ignore_cur_inode = false;
2554 ret = replay_xattr_deletes(wc->trans, root, log,
2555 path, key.objectid);
2558 mode = btrfs_inode_mode(eb, inode_item);
2559 if (S_ISDIR(mode)) {
2560 ret = replay_dir_deletes(wc->trans,
2561 root, log, path, key.objectid, 0);
2565 ret = overwrite_item(wc->trans, root, path,
2571 * Before replaying extents, truncate the inode to its
2572 * size. We need to do it now and not after log replay
2573 * because before an fsync we can have prealloc extents
2574 * added beyond the inode's i_size. If we did it after,
2575 * through orphan cleanup for example, we would drop
2576 * those prealloc extents just after replaying them.
2578 if (S_ISREG(mode)) {
2579 struct inode *inode;
2582 inode = read_one_inode(root, key.objectid);
2587 from = ALIGN(i_size_read(inode),
2588 root->fs_info->sectorsize);
2589 ret = btrfs_drop_extents(wc->trans, root, inode,
2592 /* Update the inode's nbytes. */
2593 ret = btrfs_update_inode(wc->trans,
2601 ret = link_to_fixup_dir(wc->trans, root,
2602 path, key.objectid);
2607 if (wc->ignore_cur_inode)
2610 if (key.type == BTRFS_DIR_INDEX_KEY &&
2611 wc->stage == LOG_WALK_REPLAY_DIR_INDEX) {
2612 ret = replay_one_dir_item(wc->trans, root, path,
2618 if (wc->stage < LOG_WALK_REPLAY_ALL)
2621 /* these keys are simply copied */
2622 if (key.type == BTRFS_XATTR_ITEM_KEY) {
2623 ret = overwrite_item(wc->trans, root, path,
2627 } else if (key.type == BTRFS_INODE_REF_KEY ||
2628 key.type == BTRFS_INODE_EXTREF_KEY) {
2629 ret = add_inode_ref(wc->trans, root, log, path,
2631 if (ret && ret != -ENOENT)
2634 } else if (key.type == BTRFS_EXTENT_DATA_KEY) {
2635 ret = replay_one_extent(wc->trans, root, path,
2639 } else if (key.type == BTRFS_DIR_ITEM_KEY) {
2640 ret = replay_one_dir_item(wc->trans, root, path,
2646 btrfs_free_path(path);
2650 static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,
2651 struct btrfs_root *root,
2652 struct btrfs_path *path, int *level,
2653 struct walk_control *wc)
2655 struct btrfs_fs_info *fs_info = root->fs_info;
2659 struct extent_buffer *next;
2660 struct extent_buffer *cur;
2661 struct extent_buffer *parent;
2665 WARN_ON(*level < 0);
2666 WARN_ON(*level >= BTRFS_MAX_LEVEL);
2668 while (*level > 0) {
2669 struct btrfs_key first_key;
2671 WARN_ON(*level < 0);
2672 WARN_ON(*level >= BTRFS_MAX_LEVEL);
2673 cur = path->nodes[*level];
2675 WARN_ON(btrfs_header_level(cur) != *level);
2677 if (path->slots[*level] >=
2678 btrfs_header_nritems(cur))
2681 bytenr = btrfs_node_blockptr(cur, path->slots[*level]);
2682 ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]);
2683 btrfs_node_key_to_cpu(cur, &first_key, path->slots[*level]);
2684 blocksize = fs_info->nodesize;
2686 parent = path->nodes[*level];
2687 root_owner = btrfs_header_owner(parent);
2689 next = btrfs_find_create_tree_block(fs_info, bytenr);
2691 return PTR_ERR(next);
2694 ret = wc->process_func(root, next, wc, ptr_gen,
2697 free_extent_buffer(next);
2701 path->slots[*level]++;
2703 ret = btrfs_read_buffer(next, ptr_gen,
2704 *level - 1, &first_key);
2706 free_extent_buffer(next);
2711 btrfs_tree_lock(next);
2712 btrfs_set_lock_blocking(next);
2713 clean_tree_block(fs_info, next);
2714 btrfs_wait_tree_block_writeback(next);
2715 btrfs_tree_unlock(next);
2717 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags))
2718 clear_extent_buffer_dirty(next);
2721 WARN_ON(root_owner !=
2722 BTRFS_TREE_LOG_OBJECTID);
2723 ret = btrfs_free_and_pin_reserved_extent(
2727 free_extent_buffer(next);
2731 free_extent_buffer(next);
2734 ret = btrfs_read_buffer(next, ptr_gen, *level - 1, &first_key);
2736 free_extent_buffer(next);
2740 WARN_ON(*level <= 0);
2741 if (path->nodes[*level-1])
2742 free_extent_buffer(path->nodes[*level-1]);
2743 path->nodes[*level-1] = next;
2744 *level = btrfs_header_level(next);
2745 path->slots[*level] = 0;
2748 WARN_ON(*level < 0);
2749 WARN_ON(*level >= BTRFS_MAX_LEVEL);
2751 path->slots[*level] = btrfs_header_nritems(path->nodes[*level]);
2757 static noinline int walk_up_log_tree(struct btrfs_trans_handle *trans,
2758 struct btrfs_root *root,
2759 struct btrfs_path *path, int *level,
2760 struct walk_control *wc)
2762 struct btrfs_fs_info *fs_info = root->fs_info;
2768 for (i = *level; i < BTRFS_MAX_LEVEL - 1 && path->nodes[i]; i++) {
2769 slot = path->slots[i];
2770 if (slot + 1 < btrfs_header_nritems(path->nodes[i])) {
2773 WARN_ON(*level == 0);
2776 struct extent_buffer *parent;
2777 if (path->nodes[*level] == root->node)
2778 parent = path->nodes[*level];
2780 parent = path->nodes[*level + 1];
2782 root_owner = btrfs_header_owner(parent);
2783 ret = wc->process_func(root, path->nodes[*level], wc,
2784 btrfs_header_generation(path->nodes[*level]),
2790 struct extent_buffer *next;
2792 next = path->nodes[*level];
2795 btrfs_tree_lock(next);
2796 btrfs_set_lock_blocking(next);
2797 clean_tree_block(fs_info, next);
2798 btrfs_wait_tree_block_writeback(next);
2799 btrfs_tree_unlock(next);
2801 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags))
2802 clear_extent_buffer_dirty(next);
2805 WARN_ON(root_owner != BTRFS_TREE_LOG_OBJECTID);
2806 ret = btrfs_free_and_pin_reserved_extent(
2808 path->nodes[*level]->start,
2809 path->nodes[*level]->len);
2813 free_extent_buffer(path->nodes[*level]);
2814 path->nodes[*level] = NULL;
2822 * drop the reference count on the tree rooted at 'snap'. This traverses
2823 * the tree freeing any blocks that have a ref count of zero after being
2826 static int walk_log_tree(struct btrfs_trans_handle *trans,
2827 struct btrfs_root *log, struct walk_control *wc)
2829 struct btrfs_fs_info *fs_info = log->fs_info;
2833 struct btrfs_path *path;
2836 path = btrfs_alloc_path();
2840 level = btrfs_header_level(log->node);
2842 path->nodes[level] = log->node;
2843 extent_buffer_get(log->node);
2844 path->slots[level] = 0;
2847 wret = walk_down_log_tree(trans, log, path, &level, wc);
2855 wret = walk_up_log_tree(trans, log, path, &level, wc);
2864 /* was the root node processed? if not, catch it here */
2865 if (path->nodes[orig_level]) {
2866 ret = wc->process_func(log, path->nodes[orig_level], wc,
2867 btrfs_header_generation(path->nodes[orig_level]),
2872 struct extent_buffer *next;
2874 next = path->nodes[orig_level];
2877 btrfs_tree_lock(next);
2878 btrfs_set_lock_blocking(next);
2879 clean_tree_block(fs_info, next);
2880 btrfs_wait_tree_block_writeback(next);
2881 btrfs_tree_unlock(next);
2883 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags))
2884 clear_extent_buffer_dirty(next);
2887 WARN_ON(log->root_key.objectid !=
2888 BTRFS_TREE_LOG_OBJECTID);
2889 ret = btrfs_free_and_pin_reserved_extent(fs_info,
2890 next->start, next->len);
2897 btrfs_free_path(path);
2902 * helper function to update the item for a given subvolumes log root
2903 * in the tree of log roots
2905 static int update_log_root(struct btrfs_trans_handle *trans,
2906 struct btrfs_root *log,
2907 struct btrfs_root_item *root_item)
2909 struct btrfs_fs_info *fs_info = log->fs_info;
2912 if (log->log_transid == 1) {
2913 /* insert root item on the first sync */
2914 ret = btrfs_insert_root(trans, fs_info->log_root_tree,
2915 &log->root_key, root_item);
2917 ret = btrfs_update_root(trans, fs_info->log_root_tree,
2918 &log->root_key, root_item);
2923 static void wait_log_commit(struct btrfs_root *root, int transid)
2926 int index = transid % 2;
2929 * we only allow two pending log transactions at a time,
2930 * so we know that if ours is more than 2 older than the
2931 * current transaction, we're done
2934 prepare_to_wait(&root->log_commit_wait[index],
2935 &wait, TASK_UNINTERRUPTIBLE);
2937 if (!(root->log_transid_committed < transid &&
2938 atomic_read(&root->log_commit[index])))
2941 mutex_unlock(&root->log_mutex);
2943 mutex_lock(&root->log_mutex);
2945 finish_wait(&root->log_commit_wait[index], &wait);
2948 static void wait_for_writer(struct btrfs_root *root)
2953 prepare_to_wait(&root->log_writer_wait, &wait,
2954 TASK_UNINTERRUPTIBLE);
2955 if (!atomic_read(&root->log_writers))
2958 mutex_unlock(&root->log_mutex);
2960 mutex_lock(&root->log_mutex);
2962 finish_wait(&root->log_writer_wait, &wait);
2965 static inline void btrfs_remove_log_ctx(struct btrfs_root *root,
2966 struct btrfs_log_ctx *ctx)
2971 mutex_lock(&root->log_mutex);
2972 list_del_init(&ctx->list);
2973 mutex_unlock(&root->log_mutex);
2977 * Invoked in log mutex context, or be sure there is no other task which
2978 * can access the list.
2980 static inline void btrfs_remove_all_log_ctxs(struct btrfs_root *root,
2981 int index, int error)
2983 struct btrfs_log_ctx *ctx;
2984 struct btrfs_log_ctx *safe;
2986 list_for_each_entry_safe(ctx, safe, &root->log_ctxs[index], list) {
2987 list_del_init(&ctx->list);
2988 ctx->log_ret = error;
2991 INIT_LIST_HEAD(&root->log_ctxs[index]);
2995 * btrfs_sync_log does sends a given tree log down to the disk and
2996 * updates the super blocks to record it. When this call is done,
2997 * you know that any inodes previously logged are safely on disk only
3000 * Any other return value means you need to call btrfs_commit_transaction.
3001 * Some of the edge cases for fsyncing directories that have had unlinks
3002 * or renames done in the past mean that sometimes the only safe
3003 * fsync is to commit the whole FS. When btrfs_sync_log returns -EAGAIN,
3004 * that has happened.
3006 int btrfs_sync_log(struct btrfs_trans_handle *trans,
3007 struct btrfs_root *root, struct btrfs_log_ctx *ctx)
3013 struct btrfs_fs_info *fs_info = root->fs_info;
3014 struct btrfs_root *log = root->log_root;
3015 struct btrfs_root *log_root_tree = fs_info->log_root_tree;
3016 struct btrfs_root_item new_root_item;
3017 int log_transid = 0;
3018 struct btrfs_log_ctx root_log_ctx;
3019 struct blk_plug plug;
3021 mutex_lock(&root->log_mutex);
3022 log_transid = ctx->log_transid;
3023 if (root->log_transid_committed >= log_transid) {
3024 mutex_unlock(&root->log_mutex);
3025 return ctx->log_ret;
3028 index1 = log_transid % 2;
3029 if (atomic_read(&root->log_commit[index1])) {
3030 wait_log_commit(root, log_transid);
3031 mutex_unlock(&root->log_mutex);
3032 return ctx->log_ret;
3034 ASSERT(log_transid == root->log_transid);
3035 atomic_set(&root->log_commit[index1], 1);
3037 /* wait for previous tree log sync to complete */
3038 if (atomic_read(&root->log_commit[(index1 + 1) % 2]))
3039 wait_log_commit(root, log_transid - 1);
3042 int batch = atomic_read(&root->log_batch);
3043 /* when we're on an ssd, just kick the log commit out */
3044 if (!btrfs_test_opt(fs_info, SSD) &&
3045 test_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state)) {
3046 mutex_unlock(&root->log_mutex);
3047 schedule_timeout_uninterruptible(1);
3048 mutex_lock(&root->log_mutex);
3050 wait_for_writer(root);
3051 if (batch == atomic_read(&root->log_batch))
3055 /* bail out if we need to do a full commit */
3056 if (btrfs_need_log_full_commit(fs_info, trans)) {
3058 mutex_unlock(&root->log_mutex);
3062 if (log_transid % 2 == 0)
3063 mark = EXTENT_DIRTY;
3067 /* we start IO on all the marked extents here, but we don't actually
3068 * wait for them until later.
3070 blk_start_plug(&plug);
3071 ret = btrfs_write_marked_extents(fs_info, &log->dirty_log_pages, mark);
3073 blk_finish_plug(&plug);
3074 btrfs_abort_transaction(trans, ret);
3075 btrfs_set_log_full_commit(fs_info, trans);
3076 mutex_unlock(&root->log_mutex);
3081 * We _must_ update under the root->log_mutex in order to make sure we
3082 * have a consistent view of the log root we are trying to commit at
3085 * We _must_ copy this into a local copy, because we are not holding the
3086 * log_root_tree->log_mutex yet. This is important because when we
3087 * commit the log_root_tree we must have a consistent view of the
3088 * log_root_tree when we update the super block to point at the
3089 * log_root_tree bytenr. If we update the log_root_tree here we'll race
3090 * with the commit and possibly point at the new block which we may not
3093 btrfs_set_root_node(&log->root_item, log->node);
3094 memcpy(&new_root_item, &log->root_item, sizeof(new_root_item));
3096 root->log_transid++;
3097 log->log_transid = root->log_transid;
3098 root->log_start_pid = 0;
3100 * IO has been started, blocks of the log tree have WRITTEN flag set
3101 * in their headers. new modifications of the log will be written to
3102 * new positions. so it's safe to allow log writers to go in.
3104 mutex_unlock(&root->log_mutex);
3106 btrfs_init_log_ctx(&root_log_ctx, NULL);
3108 mutex_lock(&log_root_tree->log_mutex);
3109 atomic_inc(&log_root_tree->log_batch);
3110 atomic_inc(&log_root_tree->log_writers);
3112 index2 = log_root_tree->log_transid % 2;
3113 list_add_tail(&root_log_ctx.list, &log_root_tree->log_ctxs[index2]);
3114 root_log_ctx.log_transid = log_root_tree->log_transid;
3116 mutex_unlock(&log_root_tree->log_mutex);
3118 mutex_lock(&log_root_tree->log_mutex);
3121 * Now we are safe to update the log_root_tree because we're under the
3122 * log_mutex, and we're a current writer so we're holding the commit
3123 * open until we drop the log_mutex.
3125 ret = update_log_root(trans, log, &new_root_item);
3127 if (atomic_dec_and_test(&log_root_tree->log_writers)) {
3128 /* atomic_dec_and_test implies a barrier */
3129 cond_wake_up_nomb(&log_root_tree->log_writer_wait);
3133 if (!list_empty(&root_log_ctx.list))
3134 list_del_init(&root_log_ctx.list);
3136 blk_finish_plug(&plug);
3137 btrfs_set_log_full_commit(fs_info, trans);
3139 if (ret != -ENOSPC) {
3140 btrfs_abort_transaction(trans, ret);
3141 mutex_unlock(&log_root_tree->log_mutex);
3144 btrfs_wait_tree_log_extents(log, mark);
3145 mutex_unlock(&log_root_tree->log_mutex);
3150 if (log_root_tree->log_transid_committed >= root_log_ctx.log_transid) {
3151 blk_finish_plug(&plug);
3152 list_del_init(&root_log_ctx.list);
3153 mutex_unlock(&log_root_tree->log_mutex);
3154 ret = root_log_ctx.log_ret;
3158 index2 = root_log_ctx.log_transid % 2;
3159 if (atomic_read(&log_root_tree->log_commit[index2])) {
3160 blk_finish_plug(&plug);
3161 ret = btrfs_wait_tree_log_extents(log, mark);
3162 wait_log_commit(log_root_tree,
3163 root_log_ctx.log_transid);
3164 mutex_unlock(&log_root_tree->log_mutex);
3166 ret = root_log_ctx.log_ret;
3169 ASSERT(root_log_ctx.log_transid == log_root_tree->log_transid);
3170 atomic_set(&log_root_tree->log_commit[index2], 1);
3172 if (atomic_read(&log_root_tree->log_commit[(index2 + 1) % 2])) {
3173 wait_log_commit(log_root_tree,
3174 root_log_ctx.log_transid - 1);
3177 wait_for_writer(log_root_tree);
3180 * now that we've moved on to the tree of log tree roots,
3181 * check the full commit flag again
3183 if (btrfs_need_log_full_commit(fs_info, trans)) {
3184 blk_finish_plug(&plug);
3185 btrfs_wait_tree_log_extents(log, mark);
3186 mutex_unlock(&log_root_tree->log_mutex);
3188 goto out_wake_log_root;
3191 ret = btrfs_write_marked_extents(fs_info,
3192 &log_root_tree->dirty_log_pages,
3193 EXTENT_DIRTY | EXTENT_NEW);
3194 blk_finish_plug(&plug);
3196 btrfs_set_log_full_commit(fs_info, trans);
3197 btrfs_abort_transaction(trans, ret);
3198 mutex_unlock(&log_root_tree->log_mutex);
3199 goto out_wake_log_root;
3201 ret = btrfs_wait_tree_log_extents(log, mark);
3203 ret = btrfs_wait_tree_log_extents(log_root_tree,
3204 EXTENT_NEW | EXTENT_DIRTY);
3206 btrfs_set_log_full_commit(fs_info, trans);
3207 mutex_unlock(&log_root_tree->log_mutex);
3208 goto out_wake_log_root;
3211 btrfs_set_super_log_root(fs_info->super_for_commit,
3212 log_root_tree->node->start);
3213 btrfs_set_super_log_root_level(fs_info->super_for_commit,
3214 btrfs_header_level(log_root_tree->node));
3216 log_root_tree->log_transid++;
3217 mutex_unlock(&log_root_tree->log_mutex);
3220 * nobody else is going to jump in and write the the ctree
3221 * super here because the log_commit atomic below is protecting
3222 * us. We must be called with a transaction handle pinning
3223 * the running transaction open, so a full commit can't hop
3224 * in and cause problems either.
3226 ret = write_all_supers(fs_info, 1);
3228 btrfs_set_log_full_commit(fs_info, trans);
3229 btrfs_abort_transaction(trans, ret);
3230 goto out_wake_log_root;
3233 mutex_lock(&root->log_mutex);
3234 if (root->last_log_commit < log_transid)
3235 root->last_log_commit = log_transid;
3236 mutex_unlock(&root->log_mutex);
3239 mutex_lock(&log_root_tree->log_mutex);
3240 btrfs_remove_all_log_ctxs(log_root_tree, index2, ret);
3242 log_root_tree->log_transid_committed++;
3243 atomic_set(&log_root_tree->log_commit[index2], 0);
3244 mutex_unlock(&log_root_tree->log_mutex);
3247 * The barrier before waitqueue_active (in cond_wake_up) is needed so
3248 * all the updates above are seen by the woken threads. It might not be
3249 * necessary, but proving that seems to be hard.
3251 cond_wake_up(&log_root_tree->log_commit_wait[index2]);
3253 mutex_lock(&root->log_mutex);
3254 btrfs_remove_all_log_ctxs(root, index1, ret);
3255 root->log_transid_committed++;
3256 atomic_set(&root->log_commit[index1], 0);
3257 mutex_unlock(&root->log_mutex);
3260 * The barrier before waitqueue_active (in cond_wake_up) is needed so
3261 * all the updates above are seen by the woken threads. It might not be
3262 * necessary, but proving that seems to be hard.
3264 cond_wake_up(&root->log_commit_wait[index1]);
3268 static void free_log_tree(struct btrfs_trans_handle *trans,
3269 struct btrfs_root *log)
3274 struct walk_control wc = {
3276 .process_func = process_one_buffer
3279 ret = walk_log_tree(trans, log, &wc);
3282 btrfs_abort_transaction(trans, ret);
3284 btrfs_handle_fs_error(log->fs_info, ret, NULL);
3288 ret = find_first_extent_bit(&log->dirty_log_pages,
3290 EXTENT_DIRTY | EXTENT_NEW | EXTENT_NEED_WAIT,
3295 clear_extent_bits(&log->dirty_log_pages, start, end,
3296 EXTENT_DIRTY | EXTENT_NEW | EXTENT_NEED_WAIT);
3299 free_extent_buffer(log->node);
3304 * free all the extents used by the tree log. This should be called
3305 * at commit time of the full transaction
3307 int btrfs_free_log(struct btrfs_trans_handle *trans, struct btrfs_root *root)
3309 if (root->log_root) {
3310 free_log_tree(trans, root->log_root);
3311 root->log_root = NULL;
3316 int btrfs_free_log_root_tree(struct btrfs_trans_handle *trans,
3317 struct btrfs_fs_info *fs_info)
3319 if (fs_info->log_root_tree) {
3320 free_log_tree(trans, fs_info->log_root_tree);
3321 fs_info->log_root_tree = NULL;
3327 * Check if an inode was logged in the current transaction. We can't always rely
3328 * on an inode's logged_trans value, because it's an in-memory only field and
3329 * therefore not persisted. This means that its value is lost if the inode gets
3330 * evicted and loaded again from disk (in which case it has a value of 0, and
3331 * certainly it is smaller then any possible transaction ID), when that happens
3332 * the full_sync flag is set in the inode's runtime flags, so on that case we
3333 * assume eviction happened and ignore the logged_trans value, assuming the
3334 * worst case, that the inode was logged before in the current transaction.
3336 static bool inode_logged(struct btrfs_trans_handle *trans,
3337 struct btrfs_inode *inode)
3339 if (inode->logged_trans == trans->transid)
3342 if (inode->last_trans == trans->transid &&
3343 test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags) &&
3344 !test_bit(BTRFS_FS_LOG_RECOVERING, &trans->fs_info->flags))
3351 * If both a file and directory are logged, and unlinks or renames are
3352 * mixed in, we have a few interesting corners:
3354 * create file X in dir Y
3355 * link file X to X.link in dir Y
3357 * unlink file X but leave X.link
3360 * After a crash we would expect only X.link to exist. But file X
3361 * didn't get fsync'd again so the log has back refs for X and X.link.
3363 * We solve this by removing directory entries and inode backrefs from the
3364 * log when a file that was logged in the current transaction is
3365 * unlinked. Any later fsync will include the updated log entries, and
3366 * we'll be able to reconstruct the proper directory items from backrefs.
3368 * This optimizations allows us to avoid relogging the entire inode
3369 * or the entire directory.
3371 int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans,
3372 struct btrfs_root *root,
3373 const char *name, int name_len,
3374 struct btrfs_inode *dir, u64 index)
3376 struct btrfs_root *log;
3377 struct btrfs_dir_item *di;
3378 struct btrfs_path *path;
3382 u64 dir_ino = btrfs_ino(dir);
3384 if (!inode_logged(trans, dir))
3387 ret = join_running_log_trans(root);
3391 mutex_lock(&dir->log_mutex);
3393 log = root->log_root;
3394 path = btrfs_alloc_path();
3400 di = btrfs_lookup_dir_item(trans, log, path, dir_ino,
3401 name, name_len, -1);
3407 ret = btrfs_delete_one_dir_name(trans, log, path, di);
3408 bytes_del += name_len;
3414 btrfs_release_path(path);
3415 di = btrfs_lookup_dir_index_item(trans, log, path, dir_ino,
3416 index, name, name_len, -1);
3422 ret = btrfs_delete_one_dir_name(trans, log, path, di);
3423 bytes_del += name_len;
3430 /* update the directory size in the log to reflect the names
3434 struct btrfs_key key;
3436 key.objectid = dir_ino;
3438 key.type = BTRFS_INODE_ITEM_KEY;
3439 btrfs_release_path(path);
3441 ret = btrfs_search_slot(trans, log, &key, path, 0, 1);
3447 struct btrfs_inode_item *item;
3450 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
3451 struct btrfs_inode_item);
3452 i_size = btrfs_inode_size(path->nodes[0], item);
3453 if (i_size > bytes_del)
3454 i_size -= bytes_del;
3457 btrfs_set_inode_size(path->nodes[0], item, i_size);
3458 btrfs_mark_buffer_dirty(path->nodes[0]);
3461 btrfs_release_path(path);
3464 btrfs_free_path(path);
3466 mutex_unlock(&dir->log_mutex);
3467 if (err == -ENOSPC) {
3468 btrfs_set_log_full_commit(root->fs_info, trans);
3470 } else if (err < 0 && err != -ENOENT) {
3471 /* ENOENT can be returned if the entry hasn't been fsynced yet */
3472 btrfs_abort_transaction(trans, err);
3475 btrfs_end_log_trans(root);
3480 /* see comments for btrfs_del_dir_entries_in_log */
3481 int btrfs_del_inode_ref_in_log(struct btrfs_trans_handle *trans,
3482 struct btrfs_root *root,
3483 const char *name, int name_len,
3484 struct btrfs_inode *inode, u64 dirid)
3486 struct btrfs_fs_info *fs_info = root->fs_info;
3487 struct btrfs_root *log;
3491 if (!inode_logged(trans, inode))
3494 ret = join_running_log_trans(root);
3497 log = root->log_root;
3498 mutex_lock(&inode->log_mutex);
3500 ret = btrfs_del_inode_ref(trans, log, name, name_len, btrfs_ino(inode),
3502 mutex_unlock(&inode->log_mutex);
3503 if (ret == -ENOSPC) {
3504 btrfs_set_log_full_commit(fs_info, trans);
3506 } else if (ret < 0 && ret != -ENOENT)
3507 btrfs_abort_transaction(trans, ret);
3508 btrfs_end_log_trans(root);
3514 * creates a range item in the log for 'dirid'. first_offset and
3515 * last_offset tell us which parts of the key space the log should
3516 * be considered authoritative for.
3518 static noinline int insert_dir_log_key(struct btrfs_trans_handle *trans,
3519 struct btrfs_root *log,
3520 struct btrfs_path *path,
3521 int key_type, u64 dirid,
3522 u64 first_offset, u64 last_offset)
3525 struct btrfs_key key;
3526 struct btrfs_dir_log_item *item;
3528 key.objectid = dirid;
3529 key.offset = first_offset;
3530 if (key_type == BTRFS_DIR_ITEM_KEY)
3531 key.type = BTRFS_DIR_LOG_ITEM_KEY;
3533 key.type = BTRFS_DIR_LOG_INDEX_KEY;
3534 ret = btrfs_insert_empty_item(trans, log, path, &key, sizeof(*item));
3538 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
3539 struct btrfs_dir_log_item);
3540 btrfs_set_dir_log_end(path->nodes[0], item, last_offset);
3541 btrfs_mark_buffer_dirty(path->nodes[0]);
3542 btrfs_release_path(path);
3547 * log all the items included in the current transaction for a given
3548 * directory. This also creates the range items in the log tree required
3549 * to replay anything deleted before the fsync
3551 static noinline int log_dir_items(struct btrfs_trans_handle *trans,
3552 struct btrfs_root *root, struct btrfs_inode *inode,
3553 struct btrfs_path *path,
3554 struct btrfs_path *dst_path, int key_type,
3555 struct btrfs_log_ctx *ctx,
3556 u64 min_offset, u64 *last_offset_ret)
3558 struct btrfs_key min_key;
3559 struct btrfs_root *log = root->log_root;
3560 struct extent_buffer *src;
3565 u64 first_offset = min_offset;
3566 u64 last_offset = (u64)-1;
3567 u64 ino = btrfs_ino(inode);
3569 log = root->log_root;
3571 min_key.objectid = ino;
3572 min_key.type = key_type;
3573 min_key.offset = min_offset;
3575 ret = btrfs_search_forward(root, &min_key, path, trans->transid);
3578 * we didn't find anything from this transaction, see if there
3579 * is anything at all
3581 if (ret != 0 || min_key.objectid != ino || min_key.type != key_type) {
3582 min_key.objectid = ino;
3583 min_key.type = key_type;
3584 min_key.offset = (u64)-1;
3585 btrfs_release_path(path);
3586 ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0);
3588 btrfs_release_path(path);
3591 ret = btrfs_previous_item(root, path, ino, key_type);
3593 /* if ret == 0 there are items for this type,
3594 * create a range to tell us the last key of this type.
3595 * otherwise, there are no items in this directory after
3596 * *min_offset, and we create a range to indicate that.
3599 struct btrfs_key tmp;
3600 btrfs_item_key_to_cpu(path->nodes[0], &tmp,
3602 if (key_type == tmp.type)
3603 first_offset = max(min_offset, tmp.offset) + 1;
3608 /* go backward to find any previous key */
3609 ret = btrfs_previous_item(root, path, ino, key_type);
3611 struct btrfs_key tmp;
3612 btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]);
3613 if (key_type == tmp.type) {
3614 first_offset = tmp.offset;
3615 ret = overwrite_item(trans, log, dst_path,
3616 path->nodes[0], path->slots[0],
3624 btrfs_release_path(path);
3627 * Find the first key from this transaction again. See the note for
3628 * log_new_dir_dentries, if we're logging a directory recursively we
3629 * won't be holding its i_mutex, which means we can modify the directory
3630 * while we're logging it. If we remove an entry between our first
3631 * search and this search we'll not find the key again and can just
3635 ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0);
3640 * we have a block from this transaction, log every item in it
3641 * from our directory
3644 struct btrfs_key tmp;
3645 src = path->nodes[0];
3646 nritems = btrfs_header_nritems(src);
3647 for (i = path->slots[0]; i < nritems; i++) {
3648 struct btrfs_dir_item *di;
3650 btrfs_item_key_to_cpu(src, &min_key, i);
3652 if (min_key.objectid != ino || min_key.type != key_type)
3655 if (need_resched()) {
3656 btrfs_release_path(path);
3661 ret = overwrite_item(trans, log, dst_path, src, i,
3669 * We must make sure that when we log a directory entry,
3670 * the corresponding inode, after log replay, has a
3671 * matching link count. For example:
3677 * xfs_io -c "fsync" mydir
3679 * <mount fs and log replay>
3681 * Would result in a fsync log that when replayed, our
3682 * file inode would have a link count of 1, but we get
3683 * two directory entries pointing to the same inode.
3684 * After removing one of the names, it would not be
3685 * possible to remove the other name, which resulted
3686 * always in stale file handle errors, and would not
3687 * be possible to rmdir the parent directory, since
3688 * its i_size could never decrement to the value
3689 * BTRFS_EMPTY_DIR_SIZE, resulting in -ENOTEMPTY errors.
3691 di = btrfs_item_ptr(src, i, struct btrfs_dir_item);
3692 btrfs_dir_item_key_to_cpu(src, di, &tmp);
3694 (btrfs_dir_transid(src, di) == trans->transid ||
3695 btrfs_dir_type(src, di) == BTRFS_FT_DIR) &&
3696 tmp.type != BTRFS_ROOT_ITEM_KEY)
3697 ctx->log_new_dentries = true;
3699 path->slots[0] = nritems;
3702 * look ahead to the next item and see if it is also
3703 * from this directory and from this transaction
3705 ret = btrfs_next_leaf(root, path);
3708 last_offset = (u64)-1;
3713 btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]);
3714 if (tmp.objectid != ino || tmp.type != key_type) {
3715 last_offset = (u64)-1;
3718 if (btrfs_header_generation(path->nodes[0]) != trans->transid) {
3719 ret = overwrite_item(trans, log, dst_path,
3720 path->nodes[0], path->slots[0],
3725 last_offset = tmp.offset;
3730 btrfs_release_path(path);
3731 btrfs_release_path(dst_path);
3734 *last_offset_ret = last_offset;
3736 * insert the log range keys to indicate where the log
3739 ret = insert_dir_log_key(trans, log, path, key_type,
3740 ino, first_offset, last_offset);
3748 * logging directories is very similar to logging inodes, We find all the items
3749 * from the current transaction and write them to the log.
3751 * The recovery code scans the directory in the subvolume, and if it finds a
3752 * key in the range logged that is not present in the log tree, then it means
3753 * that dir entry was unlinked during the transaction.
3755 * In order for that scan to work, we must include one key smaller than
3756 * the smallest logged by this transaction and one key larger than the largest
3757 * key logged by this transaction.
3759 static noinline int log_directory_changes(struct btrfs_trans_handle *trans,
3760 struct btrfs_root *root, struct btrfs_inode *inode,
3761 struct btrfs_path *path,
3762 struct btrfs_path *dst_path,
3763 struct btrfs_log_ctx *ctx)
3768 int key_type = BTRFS_DIR_ITEM_KEY;
3774 ret = log_dir_items(trans, root, inode, path, dst_path, key_type,
3775 ctx, min_key, &max_key);
3778 if (max_key == (u64)-1)
3780 min_key = max_key + 1;
3783 if (key_type == BTRFS_DIR_ITEM_KEY) {
3784 key_type = BTRFS_DIR_INDEX_KEY;
3791 * a helper function to drop items from the log before we relog an
3792 * inode. max_key_type indicates the highest item type to remove.
3793 * This cannot be run for file data extents because it does not
3794 * free the extents they point to.
3796 static int drop_objectid_items(struct btrfs_trans_handle *trans,
3797 struct btrfs_root *log,
3798 struct btrfs_path *path,
3799 u64 objectid, int max_key_type)
3802 struct btrfs_key key;
3803 struct btrfs_key found_key;
3806 key.objectid = objectid;
3807 key.type = max_key_type;
3808 key.offset = (u64)-1;
3811 ret = btrfs_search_slot(trans, log, &key, path, -1, 1);
3812 BUG_ON(ret == 0); /* Logic error */
3816 if (path->slots[0] == 0)
3820 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
3823 if (found_key.objectid != objectid)
3826 found_key.offset = 0;
3828 ret = btrfs_bin_search(path->nodes[0], &found_key, 0,
3831 ret = btrfs_del_items(trans, log, path, start_slot,
3832 path->slots[0] - start_slot + 1);
3834 * If start slot isn't 0 then we don't need to re-search, we've
3835 * found the last guy with the objectid in this tree.
3837 if (ret || start_slot != 0)
3839 btrfs_release_path(path);
3841 btrfs_release_path(path);
3847 static void fill_inode_item(struct btrfs_trans_handle *trans,
3848 struct extent_buffer *leaf,
3849 struct btrfs_inode_item *item,
3850 struct inode *inode, int log_inode_only,
3853 struct btrfs_map_token token;
3855 btrfs_init_map_token(&token);
3857 if (log_inode_only) {
3858 /* set the generation to zero so the recover code
3859 * can tell the difference between an logging
3860 * just to say 'this inode exists' and a logging
3861 * to say 'update this inode with these values'
3863 btrfs_set_token_inode_generation(leaf, item, 0, &token);
3864 btrfs_set_token_inode_size(leaf, item, logged_isize, &token);
3866 btrfs_set_token_inode_generation(leaf, item,
3867 BTRFS_I(inode)->generation,
3869 btrfs_set_token_inode_size(leaf, item, inode->i_size, &token);
3872 btrfs_set_token_inode_uid(leaf, item, i_uid_read(inode), &token);
3873 btrfs_set_token_inode_gid(leaf, item, i_gid_read(inode), &token);
3874 btrfs_set_token_inode_mode(leaf, item, inode->i_mode, &token);
3875 btrfs_set_token_inode_nlink(leaf, item, inode->i_nlink, &token);
3877 btrfs_set_token_timespec_sec(leaf, &item->atime,
3878 inode->i_atime.tv_sec, &token);
3879 btrfs_set_token_timespec_nsec(leaf, &item->atime,
3880 inode->i_atime.tv_nsec, &token);
3882 btrfs_set_token_timespec_sec(leaf, &item->mtime,
3883 inode->i_mtime.tv_sec, &token);
3884 btrfs_set_token_timespec_nsec(leaf, &item->mtime,
3885 inode->i_mtime.tv_nsec, &token);
3887 btrfs_set_token_timespec_sec(leaf, &item->ctime,
3888 inode->i_ctime.tv_sec, &token);
3889 btrfs_set_token_timespec_nsec(leaf, &item->ctime,
3890 inode->i_ctime.tv_nsec, &token);
3892 btrfs_set_token_inode_nbytes(leaf, item, inode_get_bytes(inode),
3895 btrfs_set_token_inode_sequence(leaf, item,
3896 inode_peek_iversion(inode), &token);
3897 btrfs_set_token_inode_transid(leaf, item, trans->transid, &token);
3898 btrfs_set_token_inode_rdev(leaf, item, inode->i_rdev, &token);
3899 btrfs_set_token_inode_flags(leaf, item, BTRFS_I(inode)->flags, &token);
3900 btrfs_set_token_inode_block_group(leaf, item, 0, &token);
3903 static int log_inode_item(struct btrfs_trans_handle *trans,
3904 struct btrfs_root *log, struct btrfs_path *path,
3905 struct btrfs_inode *inode)
3907 struct btrfs_inode_item *inode_item;
3910 ret = btrfs_insert_empty_item(trans, log, path,
3911 &inode->location, sizeof(*inode_item));
3912 if (ret && ret != -EEXIST)
3914 inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
3915 struct btrfs_inode_item);
3916 fill_inode_item(trans, path->nodes[0], inode_item, &inode->vfs_inode,
3918 btrfs_release_path(path);
3922 static int log_csums(struct btrfs_trans_handle *trans,
3923 struct btrfs_root *log_root,
3924 struct btrfs_ordered_sum *sums)
3929 * Due to extent cloning, we might have logged a csum item that covers a
3930 * subrange of a cloned extent, and later we can end up logging a csum
3931 * item for a larger subrange of the same extent or the entire range.
3932 * This would leave csum items in the log tree that cover the same range
3933 * and break the searches for checksums in the log tree, resulting in
3934 * some checksums missing in the fs/subvolume tree. So just delete (or
3935 * trim and adjust) any existing csum items in the log for this range.
3937 ret = btrfs_del_csums(trans, log_root, sums->bytenr, sums->len);
3941 return btrfs_csum_file_blocks(trans, log_root, sums);
3944 static noinline int copy_items(struct btrfs_trans_handle *trans,
3945 struct btrfs_inode *inode,
3946 struct btrfs_path *dst_path,
3947 struct btrfs_path *src_path,
3948 int start_slot, int nr, int inode_only,
3951 struct btrfs_fs_info *fs_info = trans->fs_info;
3952 unsigned long src_offset;
3953 unsigned long dst_offset;
3954 struct btrfs_root *log = inode->root->log_root;
3955 struct btrfs_file_extent_item *extent;
3956 struct btrfs_inode_item *inode_item;
3957 struct extent_buffer *src = src_path->nodes[0];
3959 struct btrfs_key *ins_keys;
3963 struct list_head ordered_sums;
3964 int skip_csum = inode->flags & BTRFS_INODE_NODATASUM;
3966 INIT_LIST_HEAD(&ordered_sums);
3968 ins_data = kmalloc(nr * sizeof(struct btrfs_key) +
3969 nr * sizeof(u32), GFP_NOFS);
3973 ins_sizes = (u32 *)ins_data;
3974 ins_keys = (struct btrfs_key *)(ins_data + nr * sizeof(u32));
3976 for (i = 0; i < nr; i++) {
3977 ins_sizes[i] = btrfs_item_size_nr(src, i + start_slot);
3978 btrfs_item_key_to_cpu(src, ins_keys + i, i + start_slot);
3980 ret = btrfs_insert_empty_items(trans, log, dst_path,
3981 ins_keys, ins_sizes, nr);
3987 for (i = 0; i < nr; i++, dst_path->slots[0]++) {
3988 dst_offset = btrfs_item_ptr_offset(dst_path->nodes[0],
3989 dst_path->slots[0]);
3991 src_offset = btrfs_item_ptr_offset(src, start_slot + i);
3993 if (ins_keys[i].type == BTRFS_INODE_ITEM_KEY) {
3994 inode_item = btrfs_item_ptr(dst_path->nodes[0],
3996 struct btrfs_inode_item);
3997 fill_inode_item(trans, dst_path->nodes[0], inode_item,
3999 inode_only == LOG_INODE_EXISTS,
4002 copy_extent_buffer(dst_path->nodes[0], src, dst_offset,
4003 src_offset, ins_sizes[i]);
4006 /* take a reference on file data extents so that truncates
4007 * or deletes of this inode don't have to relog the inode
4010 if (ins_keys[i].type == BTRFS_EXTENT_DATA_KEY &&
4013 extent = btrfs_item_ptr(src, start_slot + i,
4014 struct btrfs_file_extent_item);
4016 if (btrfs_file_extent_generation(src, extent) < trans->transid)
4019 found_type = btrfs_file_extent_type(src, extent);
4020 if (found_type == BTRFS_FILE_EXTENT_REG) {
4022 ds = btrfs_file_extent_disk_bytenr(src,
4024 /* ds == 0 is a hole */
4028 dl = btrfs_file_extent_disk_num_bytes(src,
4030 cs = btrfs_file_extent_offset(src, extent);
4031 cl = btrfs_file_extent_num_bytes(src,
4033 if (btrfs_file_extent_compression(src,
4039 ret = btrfs_lookup_csums_range(
4041 ds + cs, ds + cs + cl - 1,
4049 btrfs_mark_buffer_dirty(dst_path->nodes[0]);
4050 btrfs_release_path(dst_path);
4054 * we have to do this after the loop above to avoid changing the
4055 * log tree while trying to change the log tree.
4057 while (!list_empty(&ordered_sums)) {
4058 struct btrfs_ordered_sum *sums = list_entry(ordered_sums.next,
4059 struct btrfs_ordered_sum,
4062 ret = log_csums(trans, log, sums);
4063 list_del(&sums->list);
4070 static int extent_cmp(void *priv, struct list_head *a, struct list_head *b)
4072 struct extent_map *em1, *em2;
4074 em1 = list_entry(a, struct extent_map, list);
4075 em2 = list_entry(b, struct extent_map, list);
4077 if (em1->start < em2->start)
4079 else if (em1->start > em2->start)
4084 static int log_extent_csums(struct btrfs_trans_handle *trans,
4085 struct btrfs_inode *inode,
4086 struct btrfs_root *log_root,
4087 const struct extent_map *em)
4091 LIST_HEAD(ordered_sums);
4094 if (inode->flags & BTRFS_INODE_NODATASUM ||
4095 test_bit(EXTENT_FLAG_PREALLOC, &em->flags) ||
4096 em->block_start == EXTENT_MAP_HOLE)
4099 /* If we're compressed we have to save the entire range of csums. */
4100 if (em->compress_type) {
4102 csum_len = max(em->block_len, em->orig_block_len);
4104 csum_offset = em->mod_start - em->start;
4105 csum_len = em->mod_len;
4108 /* block start is already adjusted for the file extent offset. */
4109 ret = btrfs_lookup_csums_range(trans->fs_info->csum_root,
4110 em->block_start + csum_offset,
4111 em->block_start + csum_offset +
4112 csum_len - 1, &ordered_sums, 0);
4116 while (!list_empty(&ordered_sums)) {
4117 struct btrfs_ordered_sum *sums = list_entry(ordered_sums.next,
4118 struct btrfs_ordered_sum,
4121 ret = log_csums(trans, log_root, sums);
4122 list_del(&sums->list);
4129 static int log_one_extent(struct btrfs_trans_handle *trans,
4130 struct btrfs_inode *inode, struct btrfs_root *root,
4131 const struct extent_map *em,
4132 struct btrfs_path *path,
4133 struct btrfs_log_ctx *ctx)
4135 struct btrfs_root *log = root->log_root;
4136 struct btrfs_file_extent_item *fi;
4137 struct extent_buffer *leaf;
4138 struct btrfs_map_token token;
4139 struct btrfs_key key;
4140 u64 extent_offset = em->start - em->orig_start;
4143 int extent_inserted = 0;
4145 ret = log_extent_csums(trans, inode, log, em);
4149 btrfs_init_map_token(&token);
4151 ret = __btrfs_drop_extents(trans, log, &inode->vfs_inode, path, em->start,
4152 em->start + em->len, NULL, 0, 1,
4153 sizeof(*fi), &extent_inserted);
4157 if (!extent_inserted) {
4158 key.objectid = btrfs_ino(inode);
4159 key.type = BTRFS_EXTENT_DATA_KEY;
4160 key.offset = em->start;
4162 ret = btrfs_insert_empty_item(trans, log, path, &key,
4167 leaf = path->nodes[0];
4168 fi = btrfs_item_ptr(leaf, path->slots[0],
4169 struct btrfs_file_extent_item);
4171 btrfs_set_token_file_extent_generation(leaf, fi, trans->transid,
4173 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
4174 btrfs_set_token_file_extent_type(leaf, fi,
4175 BTRFS_FILE_EXTENT_PREALLOC,
4178 btrfs_set_token_file_extent_type(leaf, fi,
4179 BTRFS_FILE_EXTENT_REG,
4182 block_len = max(em->block_len, em->orig_block_len);
4183 if (em->compress_type != BTRFS_COMPRESS_NONE) {
4184 btrfs_set_token_file_extent_disk_bytenr(leaf, fi,
4187 btrfs_set_token_file_extent_disk_num_bytes(leaf, fi, block_len,
4189 } else if (em->block_start < EXTENT_MAP_LAST_BYTE) {
4190 btrfs_set_token_file_extent_disk_bytenr(leaf, fi,
4192 extent_offset, &token);
4193 btrfs_set_token_file_extent_disk_num_bytes(leaf, fi, block_len,
4196 btrfs_set_token_file_extent_disk_bytenr(leaf, fi, 0, &token);
4197 btrfs_set_token_file_extent_disk_num_bytes(leaf, fi, 0,
4201 btrfs_set_token_file_extent_offset(leaf, fi, extent_offset, &token);
4202 btrfs_set_token_file_extent_num_bytes(leaf, fi, em->len, &token);
4203 btrfs_set_token_file_extent_ram_bytes(leaf, fi, em->ram_bytes, &token);
4204 btrfs_set_token_file_extent_compression(leaf, fi, em->compress_type,
4206 btrfs_set_token_file_extent_encryption(leaf, fi, 0, &token);
4207 btrfs_set_token_file_extent_other_encoding(leaf, fi, 0, &token);
4208 btrfs_mark_buffer_dirty(leaf);
4210 btrfs_release_path(path);
4216 * Log all prealloc extents beyond the inode's i_size to make sure we do not
4217 * lose them after doing a fast fsync and replaying the log. We scan the
4218 * subvolume's root instead of iterating the inode's extent map tree because
4219 * otherwise we can log incorrect extent items based on extent map conversion.
4220 * That can happen due to the fact that extent maps are merged when they
4221 * are not in the extent map tree's list of modified extents.
4223 static int btrfs_log_prealloc_extents(struct btrfs_trans_handle *trans,
4224 struct btrfs_inode *inode,
4225 struct btrfs_path *path)
4227 struct btrfs_root *root = inode->root;
4228 struct btrfs_key key;
4229 const u64 i_size = i_size_read(&inode->vfs_inode);
4230 const u64 ino = btrfs_ino(inode);
4231 struct btrfs_path *dst_path = NULL;
4232 bool dropped_extents = false;
4233 u64 truncate_offset = i_size;
4234 struct extent_buffer *leaf;
4240 if (!(inode->flags & BTRFS_INODE_PREALLOC))
4244 key.type = BTRFS_EXTENT_DATA_KEY;
4245 key.offset = i_size;
4246 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4251 * We must check if there is a prealloc extent that starts before the
4252 * i_size and crosses the i_size boundary. This is to ensure later we
4253 * truncate down to the end of that extent and not to the i_size, as
4254 * otherwise we end up losing part of the prealloc extent after a log
4255 * replay and with an implicit hole if there is another prealloc extent
4256 * that starts at an offset beyond i_size.
4258 ret = btrfs_previous_item(root, path, ino, BTRFS_EXTENT_DATA_KEY);
4263 struct btrfs_file_extent_item *ei;
4265 leaf = path->nodes[0];
4266 slot = path->slots[0];
4267 ei = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
4269 if (btrfs_file_extent_type(leaf, ei) ==
4270 BTRFS_FILE_EXTENT_PREALLOC) {
4273 btrfs_item_key_to_cpu(leaf, &key, slot);
4274 extent_end = key.offset +
4275 btrfs_file_extent_num_bytes(leaf, ei);
4277 if (extent_end > i_size)
4278 truncate_offset = extent_end;
4285 leaf = path->nodes[0];
4286 slot = path->slots[0];
4288 if (slot >= btrfs_header_nritems(leaf)) {
4290 ret = copy_items(trans, inode, dst_path, path,
4291 start_slot, ins_nr, 1, 0);
4296 ret = btrfs_next_leaf(root, path);
4306 btrfs_item_key_to_cpu(leaf, &key, slot);
4307 if (key.objectid > ino)
4309 if (WARN_ON_ONCE(key.objectid < ino) ||
4310 key.type < BTRFS_EXTENT_DATA_KEY ||
4311 key.offset < i_size) {
4315 if (!dropped_extents) {
4317 * Avoid logging extent items logged in past fsync calls
4318 * and leading to duplicate keys in the log tree.
4321 ret = btrfs_truncate_inode_items(trans,
4325 BTRFS_EXTENT_DATA_KEY);
4326 } while (ret == -EAGAIN);
4329 dropped_extents = true;
4336 dst_path = btrfs_alloc_path();
4344 ret = copy_items(trans, inode, dst_path, path,
4345 start_slot, ins_nr, 1, 0);
4350 btrfs_release_path(path);
4351 btrfs_free_path(dst_path);
4355 static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans,
4356 struct btrfs_root *root,
4357 struct btrfs_inode *inode,
4358 struct btrfs_path *path,
4359 struct btrfs_log_ctx *ctx,
4363 struct extent_map *em, *n;
4364 struct list_head extents;
4365 struct extent_map_tree *tree = &inode->extent_tree;
4366 u64 logged_start, logged_end;
4371 INIT_LIST_HEAD(&extents);
4373 write_lock(&tree->lock);
4374 test_gen = root->fs_info->last_trans_committed;
4375 logged_start = start;
4378 list_for_each_entry_safe(em, n, &tree->modified_extents, list) {
4380 * Skip extents outside our logging range. It's important to do
4381 * it for correctness because if we don't ignore them, we may
4382 * log them before their ordered extent completes, and therefore
4383 * we could log them without logging their respective checksums
4384 * (the checksum items are added to the csum tree at the very
4385 * end of btrfs_finish_ordered_io()). Also leave such extents
4386 * outside of our range in the list, since we may have another
4387 * ranged fsync in the near future that needs them. If an extent
4388 * outside our range corresponds to a hole, log it to avoid
4389 * leaving gaps between extents (fsck will complain when we are
4390 * not using the NO_HOLES feature).
4392 if ((em->start > end || em->start + em->len <= start) &&
4393 em->block_start != EXTENT_MAP_HOLE)
4396 list_del_init(&em->list);
4398 * Just an arbitrary number, this can be really CPU intensive
4399 * once we start getting a lot of extents, and really once we
4400 * have a bunch of extents we just want to commit since it will
4403 if (++num > 32768) {
4404 list_del_init(&tree->modified_extents);
4409 if (em->generation <= test_gen)
4412 /* We log prealloc extents beyond eof later. */
4413 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) &&
4414 em->start >= i_size_read(&inode->vfs_inode))
4417 if (em->start < logged_start)
4418 logged_start = em->start;
4419 if ((em->start + em->len - 1) > logged_end)
4420 logged_end = em->start + em->len - 1;
4422 /* Need a ref to keep it from getting evicted from cache */
4423 refcount_inc(&em->refs);
4424 set_bit(EXTENT_FLAG_LOGGING, &em->flags);
4425 list_add_tail(&em->list, &extents);
4429 list_sort(NULL, &extents, extent_cmp);
4431 while (!list_empty(&extents)) {
4432 em = list_entry(extents.next, struct extent_map, list);
4434 list_del_init(&em->list);
4437 * If we had an error we just need to delete everybody from our
4441 clear_em_logging(tree, em);
4442 free_extent_map(em);
4446 write_unlock(&tree->lock);
4448 ret = log_one_extent(trans, inode, root, em, path, ctx);
4449 write_lock(&tree->lock);
4450 clear_em_logging(tree, em);
4451 free_extent_map(em);
4453 WARN_ON(!list_empty(&extents));
4454 write_unlock(&tree->lock);
4456 btrfs_release_path(path);
4458 ret = btrfs_log_prealloc_extents(trans, inode, path);
4463 static int logged_inode_size(struct btrfs_root *log, struct btrfs_inode *inode,
4464 struct btrfs_path *path, u64 *size_ret)
4466 struct btrfs_key key;
4469 key.objectid = btrfs_ino(inode);
4470 key.type = BTRFS_INODE_ITEM_KEY;
4473 ret = btrfs_search_slot(NULL, log, &key, path, 0, 0);
4476 } else if (ret > 0) {
4479 struct btrfs_inode_item *item;
4481 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
4482 struct btrfs_inode_item);
4483 *size_ret = btrfs_inode_size(path->nodes[0], item);
4485 * If the in-memory inode's i_size is smaller then the inode
4486 * size stored in the btree, return the inode's i_size, so
4487 * that we get a correct inode size after replaying the log
4488 * when before a power failure we had a shrinking truncate
4489 * followed by addition of a new name (rename / new hard link).
4490 * Otherwise return the inode size from the btree, to avoid
4491 * data loss when replaying a log due to previously doing a
4492 * write that expands the inode's size and logging a new name
4493 * immediately after.
4495 if (*size_ret > inode->vfs_inode.i_size)
4496 *size_ret = inode->vfs_inode.i_size;
4499 btrfs_release_path(path);
4504 * At the moment we always log all xattrs. This is to figure out at log replay
4505 * time which xattrs must have their deletion replayed. If a xattr is missing
4506 * in the log tree and exists in the fs/subvol tree, we delete it. This is
4507 * because if a xattr is deleted, the inode is fsynced and a power failure
4508 * happens, causing the log to be replayed the next time the fs is mounted,
4509 * we want the xattr to not exist anymore (same behaviour as other filesystems
4510 * with a journal, ext3/4, xfs, f2fs, etc).
4512 static int btrfs_log_all_xattrs(struct btrfs_trans_handle *trans,
4513 struct btrfs_root *root,
4514 struct btrfs_inode *inode,
4515 struct btrfs_path *path,
4516 struct btrfs_path *dst_path)
4519 struct btrfs_key key;
4520 const u64 ino = btrfs_ino(inode);
4525 key.type = BTRFS_XATTR_ITEM_KEY;
4528 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4533 int slot = path->slots[0];
4534 struct extent_buffer *leaf = path->nodes[0];
4535 int nritems = btrfs_header_nritems(leaf);
4537 if (slot >= nritems) {
4539 ret = copy_items(trans, inode, dst_path, path,
4540 start_slot, ins_nr, 1, 0);
4545 ret = btrfs_next_leaf(root, path);
4553 btrfs_item_key_to_cpu(leaf, &key, slot);
4554 if (key.objectid != ino || key.type != BTRFS_XATTR_ITEM_KEY)
4564 ret = copy_items(trans, inode, dst_path, path,
4565 start_slot, ins_nr, 1, 0);
4574 * When using the NO_HOLES feature if we punched a hole that causes the
4575 * deletion of entire leafs or all the extent items of the first leaf (the one
4576 * that contains the inode item and references) we may end up not processing
4577 * any extents, because there are no leafs with a generation matching the
4578 * current transaction that have extent items for our inode. So we need to find
4579 * if any holes exist and then log them. We also need to log holes after any
4580 * truncate operation that changes the inode's size.
4582 static int btrfs_log_holes(struct btrfs_trans_handle *trans,
4583 struct btrfs_root *root,
4584 struct btrfs_inode *inode,
4585 struct btrfs_path *path)
4587 struct btrfs_fs_info *fs_info = root->fs_info;
4588 struct btrfs_key key;
4589 const u64 ino = btrfs_ino(inode);
4590 const u64 i_size = i_size_read(&inode->vfs_inode);
4591 u64 prev_extent_end = 0;
4594 if (!btrfs_fs_incompat(fs_info, NO_HOLES) || i_size == 0)
4598 key.type = BTRFS_EXTENT_DATA_KEY;
4601 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4606 struct btrfs_file_extent_item *extent;
4607 struct extent_buffer *leaf = path->nodes[0];
4610 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
4611 ret = btrfs_next_leaf(root, path);
4618 leaf = path->nodes[0];
4621 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
4622 if (key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY)
4625 /* We have a hole, log it. */
4626 if (prev_extent_end < key.offset) {
4627 const u64 hole_len = key.offset - prev_extent_end;
4630 * Release the path to avoid deadlocks with other code
4631 * paths that search the root while holding locks on
4632 * leafs from the log root.
4634 btrfs_release_path(path);
4635 ret = btrfs_insert_file_extent(trans, root->log_root,
4636 ino, prev_extent_end, 0,
4637 0, hole_len, 0, hole_len,
4643 * Search for the same key again in the root. Since it's
4644 * an extent item and we are holding the inode lock, the
4645 * key must still exist. If it doesn't just emit warning
4646 * and return an error to fall back to a transaction
4649 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4652 if (WARN_ON(ret > 0))
4654 leaf = path->nodes[0];
4657 extent = btrfs_item_ptr(leaf, path->slots[0],
4658 struct btrfs_file_extent_item);
4659 if (btrfs_file_extent_type(leaf, extent) ==
4660 BTRFS_FILE_EXTENT_INLINE) {
4661 len = btrfs_file_extent_ram_bytes(leaf, extent);
4662 prev_extent_end = ALIGN(key.offset + len,
4663 fs_info->sectorsize);
4665 len = btrfs_file_extent_num_bytes(leaf, extent);
4666 prev_extent_end = key.offset + len;
4673 if (prev_extent_end < i_size) {
4676 btrfs_release_path(path);
4677 hole_len = ALIGN(i_size - prev_extent_end, fs_info->sectorsize);
4678 ret = btrfs_insert_file_extent(trans, root->log_root,
4679 ino, prev_extent_end, 0, 0,
4680 hole_len, 0, hole_len,
4690 * When we are logging a new inode X, check if it doesn't have a reference that
4691 * matches the reference from some other inode Y created in a past transaction
4692 * and that was renamed in the current transaction. If we don't do this, then at
4693 * log replay time we can lose inode Y (and all its files if it's a directory):
4696 * echo "hello world" > /mnt/x/foobar
4699 * mkdir /mnt/x # or touch /mnt/x
4700 * xfs_io -c fsync /mnt/x
4702 * mount fs, trigger log replay
4704 * After the log replay procedure, we would lose the first directory and all its
4705 * files (file foobar).
4706 * For the case where inode Y is not a directory we simply end up losing it:
4708 * echo "123" > /mnt/foo
4710 * mv /mnt/foo /mnt/bar
4711 * echo "abc" > /mnt/foo
4712 * xfs_io -c fsync /mnt/foo
4715 * We also need this for cases where a snapshot entry is replaced by some other
4716 * entry (file or directory) otherwise we end up with an unreplayable log due to
4717 * attempts to delete the snapshot entry (entry of type BTRFS_ROOT_ITEM_KEY) as
4718 * if it were a regular entry:
4721 * btrfs subvolume snapshot /mnt /mnt/x/snap
4722 * btrfs subvolume delete /mnt/x/snap
4725 * fsync /mnt/x or fsync some new file inside it
4728 * The snapshot delete, rmdir of x, mkdir of a new x and the fsync all happen in
4729 * the same transaction.
4731 static int btrfs_check_ref_name_override(struct extent_buffer *eb,
4733 const struct btrfs_key *key,
4734 struct btrfs_inode *inode,
4738 struct btrfs_path *search_path;
4741 u32 item_size = btrfs_item_size_nr(eb, slot);
4743 unsigned long ptr = btrfs_item_ptr_offset(eb, slot);
4745 search_path = btrfs_alloc_path();
4748 search_path->search_commit_root = 1;
4749 search_path->skip_locking = 1;
4751 while (cur_offset < item_size) {
4755 unsigned long name_ptr;
4756 struct btrfs_dir_item *di;
4758 if (key->type == BTRFS_INODE_REF_KEY) {
4759 struct btrfs_inode_ref *iref;
4761 iref = (struct btrfs_inode_ref *)(ptr + cur_offset);
4762 parent = key->offset;
4763 this_name_len = btrfs_inode_ref_name_len(eb, iref);
4764 name_ptr = (unsigned long)(iref + 1);
4765 this_len = sizeof(*iref) + this_name_len;
4767 struct btrfs_inode_extref *extref;
4769 extref = (struct btrfs_inode_extref *)(ptr +
4771 parent = btrfs_inode_extref_parent(eb, extref);
4772 this_name_len = btrfs_inode_extref_name_len(eb, extref);
4773 name_ptr = (unsigned long)&extref->name;
4774 this_len = sizeof(*extref) + this_name_len;
4777 if (this_name_len > name_len) {
4780 new_name = krealloc(name, this_name_len, GFP_NOFS);
4785 name_len = this_name_len;
4789 read_extent_buffer(eb, name, name_ptr, this_name_len);
4790 di = btrfs_lookup_dir_item(NULL, inode->root, search_path,
4791 parent, name, this_name_len, 0);
4792 if (di && !IS_ERR(di)) {
4793 struct btrfs_key di_key;
4795 btrfs_dir_item_key_to_cpu(search_path->nodes[0],
4797 if (di_key.type == BTRFS_INODE_ITEM_KEY) {
4799 *other_ino = di_key.objectid;
4804 } else if (IS_ERR(di)) {
4808 btrfs_release_path(search_path);
4810 cur_offset += this_len;
4814 btrfs_free_path(search_path);
4819 /* log a single inode in the tree log.
4820 * At least one parent directory for this inode must exist in the tree
4821 * or be logged already.
4823 * Any items from this inode changed by the current transaction are copied
4824 * to the log tree. An extra reference is taken on any extents in this
4825 * file, allowing us to avoid a whole pile of corner cases around logging
4826 * blocks that have been removed from the tree.
4828 * See LOG_INODE_ALL and related defines for a description of what inode_only
4831 * This handles both files and directories.
4833 static int btrfs_log_inode(struct btrfs_trans_handle *trans,
4834 struct btrfs_root *root, struct btrfs_inode *inode,
4838 struct btrfs_log_ctx *ctx)
4840 struct btrfs_fs_info *fs_info = root->fs_info;
4841 struct btrfs_path *path;
4842 struct btrfs_path *dst_path;
4843 struct btrfs_key min_key;
4844 struct btrfs_key max_key;
4845 struct btrfs_root *log = root->log_root;
4849 int ins_start_slot = 0;
4851 bool fast_search = false;
4852 u64 ino = btrfs_ino(inode);
4853 struct extent_map_tree *em_tree = &inode->extent_tree;
4854 u64 logged_isize = 0;
4855 bool need_log_inode_item = true;
4856 bool xattrs_logged = false;
4858 path = btrfs_alloc_path();
4861 dst_path = btrfs_alloc_path();
4863 btrfs_free_path(path);
4867 min_key.objectid = ino;
4868 min_key.type = BTRFS_INODE_ITEM_KEY;
4871 max_key.objectid = ino;
4874 /* today the code can only do partial logging of directories */
4875 if (S_ISDIR(inode->vfs_inode.i_mode) ||
4876 (!test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
4877 &inode->runtime_flags) &&
4878 inode_only >= LOG_INODE_EXISTS))
4879 max_key.type = BTRFS_XATTR_ITEM_KEY;
4881 max_key.type = (u8)-1;
4882 max_key.offset = (u64)-1;
4885 * Only run delayed items if we are a dir or a new file.
4886 * Otherwise commit the delayed inode only, which is needed in
4887 * order for the log replay code to mark inodes for link count
4888 * fixup (create temporary BTRFS_TREE_LOG_FIXUP_OBJECTID items).
4890 if (S_ISDIR(inode->vfs_inode.i_mode) ||
4891 inode->generation > fs_info->last_trans_committed)
4892 ret = btrfs_commit_inode_delayed_items(trans, inode);
4894 ret = btrfs_commit_inode_delayed_inode(inode);
4897 btrfs_free_path(path);
4898 btrfs_free_path(dst_path);
4902 if (inode_only == LOG_OTHER_INODE) {
4903 inode_only = LOG_INODE_EXISTS;
4904 mutex_lock_nested(&inode->log_mutex, SINGLE_DEPTH_NESTING);
4906 mutex_lock(&inode->log_mutex);
4910 * For symlinks, we must always log their content, which is stored in an
4911 * inline extent, otherwise we could end up with an empty symlink after
4912 * log replay, which is invalid on linux (symlink(2) returns -ENOENT if
4913 * one attempts to create an empty symlink).
4914 * We don't need to worry about flushing delalloc, because when we create
4915 * the inline extent when the symlink is created (we never have delalloc
4918 if (S_ISLNK(inode->vfs_inode.i_mode))
4919 inode_only = LOG_INODE_ALL;
4922 * a brute force approach to making sure we get the most uptodate
4923 * copies of everything.
4925 if (S_ISDIR(inode->vfs_inode.i_mode)) {
4926 int max_key_type = BTRFS_DIR_LOG_INDEX_KEY;
4928 if (inode_only == LOG_INODE_EXISTS)
4929 max_key_type = BTRFS_XATTR_ITEM_KEY;
4930 ret = drop_objectid_items(trans, log, path, ino, max_key_type);
4932 if (inode_only == LOG_INODE_EXISTS) {
4934 * Make sure the new inode item we write to the log has
4935 * the same isize as the current one (if it exists).
4936 * This is necessary to prevent data loss after log
4937 * replay, and also to prevent doing a wrong expanding
4938 * truncate - for e.g. create file, write 4K into offset
4939 * 0, fsync, write 4K into offset 4096, add hard link,
4940 * fsync some other file (to sync log), power fail - if
4941 * we use the inode's current i_size, after log replay
4942 * we get a 8Kb file, with the last 4Kb extent as a hole
4943 * (zeroes), as if an expanding truncate happened,
4944 * instead of getting a file of 4Kb only.
4946 err = logged_inode_size(log, inode, path, &logged_isize);
4950 if (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
4951 &inode->runtime_flags)) {
4952 if (inode_only == LOG_INODE_EXISTS) {
4953 max_key.type = BTRFS_XATTR_ITEM_KEY;
4954 ret = drop_objectid_items(trans, log, path, ino,
4957 clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
4958 &inode->runtime_flags);
4959 clear_bit(BTRFS_INODE_COPY_EVERYTHING,
4960 &inode->runtime_flags);
4962 ret = btrfs_truncate_inode_items(trans,
4963 log, &inode->vfs_inode, 0, 0);
4968 } else if (test_and_clear_bit(BTRFS_INODE_COPY_EVERYTHING,
4969 &inode->runtime_flags) ||
4970 inode_only == LOG_INODE_EXISTS) {
4971 if (inode_only == LOG_INODE_ALL)
4973 max_key.type = BTRFS_XATTR_ITEM_KEY;
4974 ret = drop_objectid_items(trans, log, path, ino,
4977 if (inode_only == LOG_INODE_ALL)
4990 ret = btrfs_search_forward(root, &min_key,
4991 path, trans->transid);
4999 /* note, ins_nr might be > 0 here, cleanup outside the loop */
5000 if (min_key.objectid != ino)
5002 if (min_key.type > max_key.type)
5005 if (min_key.type == BTRFS_INODE_ITEM_KEY)
5006 need_log_inode_item = false;
5008 if ((min_key.type == BTRFS_INODE_REF_KEY ||
5009 min_key.type == BTRFS_INODE_EXTREF_KEY) &&
5010 inode->generation == trans->transid) {
5013 ret = btrfs_check_ref_name_override(path->nodes[0],
5014 path->slots[0], &min_key, inode,
5019 } else if (ret > 0 && ctx &&
5020 other_ino != btrfs_ino(BTRFS_I(ctx->inode))) {
5021 struct btrfs_key inode_key;
5022 struct inode *other_inode;
5028 ins_start_slot = path->slots[0];
5030 ret = copy_items(trans, inode, dst_path, path,
5039 btrfs_release_path(path);
5040 inode_key.objectid = other_ino;
5041 inode_key.type = BTRFS_INODE_ITEM_KEY;
5042 inode_key.offset = 0;
5043 other_inode = btrfs_iget(fs_info->sb,
5047 * If the other inode that had a conflicting dir
5048 * entry was deleted in the current transaction,
5049 * we don't need to do more work nor fallback to
5050 * a transaction commit.
5052 if (other_inode == ERR_PTR(-ENOENT)) {
5054 } else if (IS_ERR(other_inode)) {
5055 err = PTR_ERR(other_inode);
5059 * We are safe logging the other inode without
5060 * acquiring its i_mutex as long as we log with
5061 * the LOG_INODE_EXISTS mode. We're safe against
5062 * concurrent renames of the other inode as well
5063 * because during a rename we pin the log and
5064 * update the log with the new name before we
5067 err = btrfs_log_inode(trans, root,
5068 BTRFS_I(other_inode),
5069 LOG_OTHER_INODE, 0, LLONG_MAX,
5071 btrfs_add_delayed_iput(other_inode);
5079 /* Skip xattrs, we log them later with btrfs_log_all_xattrs() */
5080 if (min_key.type == BTRFS_XATTR_ITEM_KEY) {
5083 ret = copy_items(trans, inode, dst_path, path,
5085 ins_nr, inode_only, logged_isize);
5094 if (ins_nr && ins_start_slot + ins_nr == path->slots[0]) {
5097 } else if (!ins_nr) {
5098 ins_start_slot = path->slots[0];
5103 ret = copy_items(trans, inode, dst_path, path,
5104 ins_start_slot, ins_nr, inode_only,
5111 ins_start_slot = path->slots[0];
5114 nritems = btrfs_header_nritems(path->nodes[0]);
5116 if (path->slots[0] < nritems) {
5117 btrfs_item_key_to_cpu(path->nodes[0], &min_key,
5122 ret = copy_items(trans, inode, dst_path, path,
5124 ins_nr, inode_only, logged_isize);
5131 btrfs_release_path(path);
5133 if (min_key.offset < (u64)-1) {
5135 } else if (min_key.type < max_key.type) {
5143 ret = copy_items(trans, inode, dst_path, path,
5144 ins_start_slot, ins_nr, inode_only,
5153 btrfs_release_path(path);
5154 btrfs_release_path(dst_path);
5155 err = btrfs_log_all_xattrs(trans, root, inode, path, dst_path);
5158 xattrs_logged = true;
5159 if (max_key.type >= BTRFS_EXTENT_DATA_KEY && !fast_search) {
5160 btrfs_release_path(path);
5161 btrfs_release_path(dst_path);
5162 err = btrfs_log_holes(trans, root, inode, path);
5167 btrfs_release_path(path);
5168 btrfs_release_path(dst_path);
5169 if (need_log_inode_item) {
5170 err = log_inode_item(trans, log, dst_path, inode);
5171 if (!err && !xattrs_logged) {
5172 err = btrfs_log_all_xattrs(trans, root, inode, path,
5174 btrfs_release_path(path);
5180 ret = btrfs_log_changed_extents(trans, root, inode, dst_path,
5186 } else if (inode_only == LOG_INODE_ALL) {
5187 struct extent_map *em, *n;
5189 write_lock(&em_tree->lock);
5191 * We can't just remove every em if we're called for a ranged
5192 * fsync - that is, one that doesn't cover the whole possible
5193 * file range (0 to LLONG_MAX). This is because we can have
5194 * em's that fall outside the range we're logging and therefore
5195 * their ordered operations haven't completed yet
5196 * (btrfs_finish_ordered_io() not invoked yet). This means we
5197 * didn't get their respective file extent item in the fs/subvol
5198 * tree yet, and need to let the next fast fsync (one which
5199 * consults the list of modified extent maps) find the em so
5200 * that it logs a matching file extent item and waits for the
5201 * respective ordered operation to complete (if it's still
5204 * Removing every em outside the range we're logging would make
5205 * the next fast fsync not log their matching file extent items,
5206 * therefore making us lose data after a log replay.
5208 list_for_each_entry_safe(em, n, &em_tree->modified_extents,
5210 const u64 mod_end = em->mod_start + em->mod_len - 1;
5212 if (em->mod_start >= start && mod_end <= end)
5213 list_del_init(&em->list);
5215 write_unlock(&em_tree->lock);
5218 if (inode_only == LOG_INODE_ALL && S_ISDIR(inode->vfs_inode.i_mode)) {
5219 ret = log_directory_changes(trans, root, inode, path, dst_path,
5228 * Don't update last_log_commit if we logged that an inode exists after
5229 * it was loaded to memory (full_sync bit set).
5230 * This is to prevent data loss when we do a write to the inode, then
5231 * the inode gets evicted after all delalloc was flushed, then we log
5232 * it exists (due to a rename for example) and then fsync it. This last
5233 * fsync would do nothing (not logging the extents previously written).
5235 spin_lock(&inode->lock);
5236 inode->logged_trans = trans->transid;
5237 if (inode_only != LOG_INODE_EXISTS ||
5238 !test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags))
5239 inode->last_log_commit = inode->last_sub_trans;
5240 spin_unlock(&inode->lock);
5242 mutex_unlock(&inode->log_mutex);
5244 btrfs_free_path(path);
5245 btrfs_free_path(dst_path);
5250 * Check if we must fallback to a transaction commit when logging an inode.
5251 * This must be called after logging the inode and is used only in the context
5252 * when fsyncing an inode requires the need to log some other inode - in which
5253 * case we can't lock the i_mutex of each other inode we need to log as that
5254 * can lead to deadlocks with concurrent fsync against other inodes (as we can
5255 * log inodes up or down in the hierarchy) or rename operations for example. So
5256 * we take the log_mutex of the inode after we have logged it and then check for
5257 * its last_unlink_trans value - this is safe because any task setting
5258 * last_unlink_trans must take the log_mutex and it must do this before it does
5259 * the actual unlink operation, so if we do this check before a concurrent task
5260 * sets last_unlink_trans it means we've logged a consistent version/state of
5261 * all the inode items, otherwise we are not sure and must do a transaction
5262 * commit (the concurrent task might have only updated last_unlink_trans before
5263 * we logged the inode or it might have also done the unlink).
5265 static bool btrfs_must_commit_transaction(struct btrfs_trans_handle *trans,
5266 struct btrfs_inode *inode)
5268 struct btrfs_fs_info *fs_info = inode->root->fs_info;
5271 mutex_lock(&inode->log_mutex);
5272 if (inode->last_unlink_trans > fs_info->last_trans_committed) {
5274 * Make sure any commits to the log are forced to be full
5277 btrfs_set_log_full_commit(fs_info, trans);
5280 mutex_unlock(&inode->log_mutex);
5286 * follow the dentry parent pointers up the chain and see if any
5287 * of the directories in it require a full commit before they can
5288 * be logged. Returns zero if nothing special needs to be done or 1 if
5289 * a full commit is required.
5291 static noinline int check_parent_dirs_for_sync(struct btrfs_trans_handle *trans,
5292 struct btrfs_inode *inode,
5293 struct dentry *parent,
5294 struct super_block *sb,
5298 struct dentry *old_parent = NULL;
5301 * for regular files, if its inode is already on disk, we don't
5302 * have to worry about the parents at all. This is because
5303 * we can use the last_unlink_trans field to record renames
5304 * and other fun in this file.
5306 if (S_ISREG(inode->vfs_inode.i_mode) &&
5307 inode->generation <= last_committed &&
5308 inode->last_unlink_trans <= last_committed)
5311 if (!S_ISDIR(inode->vfs_inode.i_mode)) {
5312 if (!parent || d_really_is_negative(parent) || sb != parent->d_sb)
5314 inode = BTRFS_I(d_inode(parent));
5318 if (btrfs_must_commit_transaction(trans, inode)) {
5323 if (!parent || d_really_is_negative(parent) || sb != parent->d_sb)
5326 if (IS_ROOT(parent)) {
5327 inode = BTRFS_I(d_inode(parent));
5328 if (btrfs_must_commit_transaction(trans, inode))
5333 parent = dget_parent(parent);
5335 old_parent = parent;
5336 inode = BTRFS_I(d_inode(parent));
5344 struct btrfs_dir_list {
5346 struct list_head list;
5350 * Log the inodes of the new dentries of a directory. See log_dir_items() for
5351 * details about the why it is needed.
5352 * This is a recursive operation - if an existing dentry corresponds to a
5353 * directory, that directory's new entries are logged too (same behaviour as
5354 * ext3/4, xfs, f2fs, reiserfs, nilfs2). Note that when logging the inodes
5355 * the dentries point to we do not lock their i_mutex, otherwise lockdep
5356 * complains about the following circular lock dependency / possible deadlock:
5360 * lock(&type->i_mutex_dir_key#3/2);
5361 * lock(sb_internal#2);
5362 * lock(&type->i_mutex_dir_key#3/2);
5363 * lock(&sb->s_type->i_mutex_key#14);
5365 * Where sb_internal is the lock (a counter that works as a lock) acquired by
5366 * sb_start_intwrite() in btrfs_start_transaction().
5367 * Not locking i_mutex of the inodes is still safe because:
5369 * 1) For regular files we log with a mode of LOG_INODE_EXISTS. It's possible
5370 * that while logging the inode new references (names) are added or removed
5371 * from the inode, leaving the logged inode item with a link count that does
5372 * not match the number of logged inode reference items. This is fine because
5373 * at log replay time we compute the real number of links and correct the
5374 * link count in the inode item (see replay_one_buffer() and
5375 * link_to_fixup_dir());
5377 * 2) For directories we log with a mode of LOG_INODE_ALL. It's possible that
5378 * while logging the inode's items new items with keys BTRFS_DIR_ITEM_KEY and
5379 * BTRFS_DIR_INDEX_KEY are added to fs/subvol tree and the logged inode item
5380 * has a size that doesn't match the sum of the lengths of all the logged
5381 * names. This does not result in a problem because if a dir_item key is
5382 * logged but its matching dir_index key is not logged, at log replay time we
5383 * don't use it to replay the respective name (see replay_one_name()). On the
5384 * other hand if only the dir_index key ends up being logged, the respective
5385 * name is added to the fs/subvol tree with both the dir_item and dir_index
5386 * keys created (see replay_one_name()).
5387 * The directory's inode item with a wrong i_size is not a problem as well,
5388 * since we don't use it at log replay time to set the i_size in the inode
5389 * item of the fs/subvol tree (see overwrite_item()).
5391 static int log_new_dir_dentries(struct btrfs_trans_handle *trans,
5392 struct btrfs_root *root,
5393 struct btrfs_inode *start_inode,
5394 struct btrfs_log_ctx *ctx)
5396 struct btrfs_fs_info *fs_info = root->fs_info;
5397 struct btrfs_root *log = root->log_root;
5398 struct btrfs_path *path;
5399 LIST_HEAD(dir_list);
5400 struct btrfs_dir_list *dir_elem;
5403 path = btrfs_alloc_path();
5407 dir_elem = kmalloc(sizeof(*dir_elem), GFP_NOFS);
5409 btrfs_free_path(path);
5412 dir_elem->ino = btrfs_ino(start_inode);
5413 list_add_tail(&dir_elem->list, &dir_list);
5415 while (!list_empty(&dir_list)) {
5416 struct extent_buffer *leaf;
5417 struct btrfs_key min_key;
5421 dir_elem = list_first_entry(&dir_list, struct btrfs_dir_list,
5424 goto next_dir_inode;
5426 min_key.objectid = dir_elem->ino;
5427 min_key.type = BTRFS_DIR_ITEM_KEY;
5430 btrfs_release_path(path);
5431 ret = btrfs_search_forward(log, &min_key, path, trans->transid);
5433 goto next_dir_inode;
5434 } else if (ret > 0) {
5436 goto next_dir_inode;
5440 leaf = path->nodes[0];
5441 nritems = btrfs_header_nritems(leaf);
5442 for (i = path->slots[0]; i < nritems; i++) {
5443 struct btrfs_dir_item *di;
5444 struct btrfs_key di_key;
5445 struct inode *di_inode;
5446 struct btrfs_dir_list *new_dir_elem;
5447 int log_mode = LOG_INODE_EXISTS;
5450 btrfs_item_key_to_cpu(leaf, &min_key, i);
5451 if (min_key.objectid != dir_elem->ino ||
5452 min_key.type != BTRFS_DIR_ITEM_KEY)
5453 goto next_dir_inode;
5455 di = btrfs_item_ptr(leaf, i, struct btrfs_dir_item);
5456 type = btrfs_dir_type(leaf, di);
5457 if (btrfs_dir_transid(leaf, di) < trans->transid &&
5458 type != BTRFS_FT_DIR)
5460 btrfs_dir_item_key_to_cpu(leaf, di, &di_key);
5461 if (di_key.type == BTRFS_ROOT_ITEM_KEY)
5464 btrfs_release_path(path);
5465 di_inode = btrfs_iget(fs_info->sb, &di_key, root, NULL);
5466 if (IS_ERR(di_inode)) {
5467 ret = PTR_ERR(di_inode);
5468 goto next_dir_inode;
5471 if (btrfs_inode_in_log(BTRFS_I(di_inode), trans->transid)) {
5472 btrfs_add_delayed_iput(di_inode);
5476 ctx->log_new_dentries = false;
5477 if (type == BTRFS_FT_DIR)
5478 log_mode = LOG_INODE_ALL;
5479 ret = btrfs_log_inode(trans, root, BTRFS_I(di_inode),
5480 log_mode, 0, LLONG_MAX, ctx);
5482 btrfs_must_commit_transaction(trans, BTRFS_I(di_inode)))
5484 btrfs_add_delayed_iput(di_inode);
5486 goto next_dir_inode;
5487 if (ctx->log_new_dentries) {
5488 new_dir_elem = kmalloc(sizeof(*new_dir_elem),
5490 if (!new_dir_elem) {
5492 goto next_dir_inode;
5494 new_dir_elem->ino = di_key.objectid;
5495 list_add_tail(&new_dir_elem->list, &dir_list);
5500 ret = btrfs_next_leaf(log, path);
5502 goto next_dir_inode;
5503 } else if (ret > 0) {
5505 goto next_dir_inode;
5509 if (min_key.offset < (u64)-1) {
5514 list_del(&dir_elem->list);
5518 btrfs_free_path(path);
5522 static int btrfs_log_all_parents(struct btrfs_trans_handle *trans,
5523 struct btrfs_inode *inode,
5524 struct btrfs_log_ctx *ctx)
5526 struct btrfs_fs_info *fs_info = trans->fs_info;
5528 struct btrfs_path *path;
5529 struct btrfs_key key;
5530 struct btrfs_root *root = inode->root;
5531 const u64 ino = btrfs_ino(inode);
5533 path = btrfs_alloc_path();
5536 path->skip_locking = 1;
5537 path->search_commit_root = 1;
5540 key.type = BTRFS_INODE_REF_KEY;
5542 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5547 struct extent_buffer *leaf = path->nodes[0];
5548 int slot = path->slots[0];
5553 if (slot >= btrfs_header_nritems(leaf)) {
5554 ret = btrfs_next_leaf(root, path);
5562 btrfs_item_key_to_cpu(leaf, &key, slot);
5563 /* BTRFS_INODE_EXTREF_KEY is BTRFS_INODE_REF_KEY + 1 */
5564 if (key.objectid != ino || key.type > BTRFS_INODE_EXTREF_KEY)
5567 item_size = btrfs_item_size_nr(leaf, slot);
5568 ptr = btrfs_item_ptr_offset(leaf, slot);
5569 while (cur_offset < item_size) {
5570 struct btrfs_key inode_key;
5571 struct inode *dir_inode;
5573 inode_key.type = BTRFS_INODE_ITEM_KEY;
5574 inode_key.offset = 0;
5576 if (key.type == BTRFS_INODE_EXTREF_KEY) {
5577 struct btrfs_inode_extref *extref;
5579 extref = (struct btrfs_inode_extref *)
5581 inode_key.objectid = btrfs_inode_extref_parent(
5583 cur_offset += sizeof(*extref);
5584 cur_offset += btrfs_inode_extref_name_len(leaf,
5587 inode_key.objectid = key.offset;
5588 cur_offset = item_size;
5591 dir_inode = btrfs_iget(fs_info->sb, &inode_key,
5594 * If the parent inode was deleted, return an error to
5595 * fallback to a transaction commit. This is to prevent
5596 * getting an inode that was moved from one parent A to
5597 * a parent B, got its former parent A deleted and then
5598 * it got fsync'ed, from existing at both parents after
5599 * a log replay (and the old parent still existing).
5606 * mv /mnt/B/bar /mnt/A/bar
5607 * mv -T /mnt/A /mnt/B
5611 * If we ignore the old parent B which got deleted,
5612 * after a log replay we would have file bar linked
5613 * at both parents and the old parent B would still
5616 if (IS_ERR(dir_inode)) {
5617 ret = PTR_ERR(dir_inode);
5622 ctx->log_new_dentries = false;
5623 ret = btrfs_log_inode(trans, root, BTRFS_I(dir_inode),
5624 LOG_INODE_ALL, 0, LLONG_MAX, ctx);
5626 btrfs_must_commit_transaction(trans, BTRFS_I(dir_inode)))
5628 if (!ret && ctx && ctx->log_new_dentries)
5629 ret = log_new_dir_dentries(trans, root,
5630 BTRFS_I(dir_inode), ctx);
5631 btrfs_add_delayed_iput(dir_inode);
5639 btrfs_free_path(path);
5644 * helper function around btrfs_log_inode to make sure newly created
5645 * parent directories also end up in the log. A minimal inode and backref
5646 * only logging is done of any parent directories that are older than
5647 * the last committed transaction
5649 static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
5650 struct btrfs_inode *inode,
5651 struct dentry *parent,
5655 struct btrfs_log_ctx *ctx)
5657 struct btrfs_root *root = inode->root;
5658 struct btrfs_fs_info *fs_info = root->fs_info;
5659 struct super_block *sb;
5660 struct dentry *old_parent = NULL;
5662 u64 last_committed = fs_info->last_trans_committed;
5663 bool log_dentries = false;
5664 struct btrfs_inode *orig_inode = inode;
5666 sb = inode->vfs_inode.i_sb;
5668 if (btrfs_test_opt(fs_info, NOTREELOG)) {
5674 * The prev transaction commit doesn't complete, we need do
5675 * full commit by ourselves.
5677 if (fs_info->last_trans_log_full_commit >
5678 fs_info->last_trans_committed) {
5683 if (btrfs_root_refs(&root->root_item) == 0) {
5688 ret = check_parent_dirs_for_sync(trans, inode, parent, sb,
5694 * Skip already logged inodes or inodes corresponding to tmpfiles
5695 * (since logging them is pointless, a link count of 0 means they
5696 * will never be accessible).
5698 if (btrfs_inode_in_log(inode, trans->transid) ||
5699 inode->vfs_inode.i_nlink == 0) {
5700 ret = BTRFS_NO_LOG_SYNC;
5704 ret = start_log_trans(trans, root, ctx);
5708 ret = btrfs_log_inode(trans, root, inode, inode_only, start, end, ctx);
5713 * for regular files, if its inode is already on disk, we don't
5714 * have to worry about the parents at all. This is because
5715 * we can use the last_unlink_trans field to record renames
5716 * and other fun in this file.
5718 if (S_ISREG(inode->vfs_inode.i_mode) &&
5719 inode->generation <= last_committed &&
5720 inode->last_unlink_trans <= last_committed) {
5725 if (S_ISDIR(inode->vfs_inode.i_mode) && ctx && ctx->log_new_dentries)
5726 log_dentries = true;
5729 * On unlink we must make sure all our current and old parent directory
5730 * inodes are fully logged. This is to prevent leaving dangling
5731 * directory index entries in directories that were our parents but are
5732 * not anymore. Not doing this results in old parent directory being
5733 * impossible to delete after log replay (rmdir will always fail with
5734 * error -ENOTEMPTY).
5740 * ln testdir/foo testdir/bar
5742 * unlink testdir/bar
5743 * xfs_io -c fsync testdir/foo
5745 * mount fs, triggers log replay
5747 * If we don't log the parent directory (testdir), after log replay the
5748 * directory still has an entry pointing to the file inode using the bar
5749 * name, but a matching BTRFS_INODE_[REF|EXTREF]_KEY does not exist and
5750 * the file inode has a link count of 1.
5756 * ln foo testdir/foo2
5757 * ln foo testdir/foo3
5759 * unlink testdir/foo3
5760 * xfs_io -c fsync foo
5762 * mount fs, triggers log replay
5764 * Similar as the first example, after log replay the parent directory
5765 * testdir still has an entry pointing to the inode file with name foo3
5766 * but the file inode does not have a matching BTRFS_INODE_REF_KEY item
5767 * and has a link count of 2.
5769 if (inode->last_unlink_trans > last_committed) {
5770 ret = btrfs_log_all_parents(trans, orig_inode, ctx);
5776 * If a new hard link was added to the inode in the current transaction
5777 * and its link count is now greater than 1, we need to fallback to a
5778 * transaction commit, otherwise we can end up not logging all its new
5779 * parents for all the hard links. Here just from the dentry used to
5780 * fsync, we can not visit the ancestor inodes for all the other hard
5781 * links to figure out if any is new, so we fallback to a transaction
5782 * commit (instead of adding a lot of complexity of scanning a btree,
5783 * since this scenario is not a common use case).
5785 if (inode->vfs_inode.i_nlink > 1 &&
5786 inode->last_link_trans > last_committed) {
5792 if (!parent || d_really_is_negative(parent) || sb != parent->d_sb)
5795 inode = BTRFS_I(d_inode(parent));
5796 if (root != inode->root)
5799 if (inode->generation > last_committed) {
5800 ret = btrfs_log_inode(trans, root, inode,
5801 LOG_INODE_EXISTS, 0, LLONG_MAX, ctx);
5805 if (IS_ROOT(parent))
5808 parent = dget_parent(parent);
5810 old_parent = parent;
5813 ret = log_new_dir_dentries(trans, root, orig_inode, ctx);
5819 btrfs_set_log_full_commit(fs_info, trans);
5824 btrfs_remove_log_ctx(root, ctx);
5825 btrfs_end_log_trans(root);
5831 * it is not safe to log dentry if the chunk root has added new
5832 * chunks. This returns 0 if the dentry was logged, and 1 otherwise.
5833 * If this returns 1, you must commit the transaction to safely get your
5836 int btrfs_log_dentry_safe(struct btrfs_trans_handle *trans,
5837 struct dentry *dentry,
5840 struct btrfs_log_ctx *ctx)
5842 struct dentry *parent = dget_parent(dentry);
5845 ret = btrfs_log_inode_parent(trans, BTRFS_I(d_inode(dentry)), parent,
5846 start, end, LOG_INODE_ALL, ctx);
5853 * should be called during mount to recover any replay any log trees
5856 int btrfs_recover_log_trees(struct btrfs_root *log_root_tree)
5859 struct btrfs_path *path;
5860 struct btrfs_trans_handle *trans;
5861 struct btrfs_key key;
5862 struct btrfs_key found_key;
5863 struct btrfs_key tmp_key;
5864 struct btrfs_root *log;
5865 struct btrfs_fs_info *fs_info = log_root_tree->fs_info;
5866 struct walk_control wc = {
5867 .process_func = process_one_buffer,
5871 path = btrfs_alloc_path();
5875 set_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags);
5877 trans = btrfs_start_transaction(fs_info->tree_root, 0);
5878 if (IS_ERR(trans)) {
5879 ret = PTR_ERR(trans);
5886 ret = walk_log_tree(trans, log_root_tree, &wc);
5888 btrfs_handle_fs_error(fs_info, ret,
5889 "Failed to pin buffers while recovering log root tree.");
5894 key.objectid = BTRFS_TREE_LOG_OBJECTID;
5895 key.offset = (u64)-1;
5896 key.type = BTRFS_ROOT_ITEM_KEY;
5899 ret = btrfs_search_slot(NULL, log_root_tree, &key, path, 0, 0);
5902 btrfs_handle_fs_error(fs_info, ret,
5903 "Couldn't find tree log root.");
5907 if (path->slots[0] == 0)
5911 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
5913 btrfs_release_path(path);
5914 if (found_key.objectid != BTRFS_TREE_LOG_OBJECTID)
5917 log = btrfs_read_fs_root(log_root_tree, &found_key);
5920 btrfs_handle_fs_error(fs_info, ret,
5921 "Couldn't read tree log root.");
5925 tmp_key.objectid = found_key.offset;
5926 tmp_key.type = BTRFS_ROOT_ITEM_KEY;
5927 tmp_key.offset = (u64)-1;
5929 wc.replay_dest = btrfs_read_fs_root_no_name(fs_info, &tmp_key);
5930 if (IS_ERR(wc.replay_dest)) {
5931 ret = PTR_ERR(wc.replay_dest);
5934 * We didn't find the subvol, likely because it was
5935 * deleted. This is ok, simply skip this log and go to
5938 * We need to exclude the root because we can't have
5939 * other log replays overwriting this log as we'll read
5940 * it back in a few more times. This will keep our
5941 * block from being modified, and we'll just bail for
5942 * each subsequent pass.
5945 ret = btrfs_pin_extent_for_log_replay(fs_info,
5948 free_extent_buffer(log->node);
5949 free_extent_buffer(log->commit_root);
5954 btrfs_handle_fs_error(fs_info, ret,
5955 "Couldn't read target root for tree log recovery.");
5959 wc.replay_dest->log_root = log;
5960 btrfs_record_root_in_trans(trans, wc.replay_dest);
5961 ret = walk_log_tree(trans, log, &wc);
5963 if (!ret && wc.stage == LOG_WALK_REPLAY_ALL) {
5964 ret = fixup_inode_link_counts(trans, wc.replay_dest,
5968 if (!ret && wc.stage == LOG_WALK_REPLAY_ALL) {
5969 struct btrfs_root *root = wc.replay_dest;
5971 btrfs_release_path(path);
5974 * We have just replayed everything, and the highest
5975 * objectid of fs roots probably has changed in case
5976 * some inode_item's got replayed.
5978 * root->objectid_mutex is not acquired as log replay
5979 * could only happen during mount.
5981 ret = btrfs_find_highest_objectid(root,
5982 &root->highest_objectid);
5985 wc.replay_dest->log_root = NULL;
5986 free_extent_buffer(log->node);
5987 free_extent_buffer(log->commit_root);
5993 if (found_key.offset == 0)
5995 key.offset = found_key.offset - 1;
5997 btrfs_release_path(path);
5999 /* step one is to pin it all, step two is to replay just inodes */
6002 wc.process_func = replay_one_buffer;
6003 wc.stage = LOG_WALK_REPLAY_INODES;
6006 /* step three is to replay everything */
6007 if (wc.stage < LOG_WALK_REPLAY_ALL) {
6012 btrfs_free_path(path);
6014 /* step 4: commit the transaction, which also unpins the blocks */
6015 ret = btrfs_commit_transaction(trans);
6019 free_extent_buffer(log_root_tree->node);
6020 log_root_tree->log_root = NULL;
6021 clear_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags);
6022 kfree(log_root_tree);
6027 btrfs_end_transaction(wc.trans);
6028 clear_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags);
6029 btrfs_free_path(path);
6034 * there are some corner cases where we want to force a full
6035 * commit instead of allowing a directory to be logged.
6037 * They revolve around files there were unlinked from the directory, and
6038 * this function updates the parent directory so that a full commit is
6039 * properly done if it is fsync'd later after the unlinks are done.
6041 * Must be called before the unlink operations (updates to the subvolume tree,
6042 * inodes, etc) are done.
6044 void btrfs_record_unlink_dir(struct btrfs_trans_handle *trans,
6045 struct btrfs_inode *dir, struct btrfs_inode *inode,
6049 * when we're logging a file, if it hasn't been renamed
6050 * or unlinked, and its inode is fully committed on disk,
6051 * we don't have to worry about walking up the directory chain
6052 * to log its parents.
6054 * So, we use the last_unlink_trans field to put this transid
6055 * into the file. When the file is logged we check it and
6056 * don't log the parents if the file is fully on disk.
6058 mutex_lock(&inode->log_mutex);
6059 inode->last_unlink_trans = trans->transid;
6060 mutex_unlock(&inode->log_mutex);
6063 * if this directory was already logged any new
6064 * names for this file/dir will get recorded
6066 if (dir->logged_trans == trans->transid)
6070 * if the inode we're about to unlink was logged,
6071 * the log will be properly updated for any new names
6073 if (inode->logged_trans == trans->transid)
6077 * when renaming files across directories, if the directory
6078 * there we're unlinking from gets fsync'd later on, there's
6079 * no way to find the destination directory later and fsync it
6080 * properly. So, we have to be conservative and force commits
6081 * so the new name gets discovered.
6086 /* we can safely do the unlink without any special recording */
6090 mutex_lock(&dir->log_mutex);
6091 dir->last_unlink_trans = trans->transid;
6092 mutex_unlock(&dir->log_mutex);
6096 * Make sure that if someone attempts to fsync the parent directory of a deleted
6097 * snapshot, it ends up triggering a transaction commit. This is to guarantee
6098 * that after replaying the log tree of the parent directory's root we will not
6099 * see the snapshot anymore and at log replay time we will not see any log tree
6100 * corresponding to the deleted snapshot's root, which could lead to replaying
6101 * it after replaying the log tree of the parent directory (which would replay
6102 * the snapshot delete operation).
6104 * Must be called before the actual snapshot destroy operation (updates to the
6105 * parent root and tree of tree roots trees, etc) are done.
6107 void btrfs_record_snapshot_destroy(struct btrfs_trans_handle *trans,
6108 struct btrfs_inode *dir)
6110 mutex_lock(&dir->log_mutex);
6111 dir->last_unlink_trans = trans->transid;
6112 mutex_unlock(&dir->log_mutex);
6116 * Call this after adding a new name for a file and it will properly
6117 * update the log to reflect the new name.
6119 * @ctx can not be NULL when @sync_log is false, and should be NULL when it's
6120 * true (because it's not used).
6122 * Return value depends on whether @sync_log is true or false.
6123 * When true: returns BTRFS_NEED_TRANS_COMMIT if the transaction needs to be
6124 * committed by the caller, and BTRFS_DONT_NEED_TRANS_COMMIT
6126 * When false: returns BTRFS_DONT_NEED_LOG_SYNC if the caller does not need to
6127 * to sync the log, BTRFS_NEED_LOG_SYNC if it needs to sync the log,
6128 * or BTRFS_NEED_TRANS_COMMIT if the transaction needs to be
6129 * committed (without attempting to sync the log).
6131 int btrfs_log_new_name(struct btrfs_trans_handle *trans,
6132 struct btrfs_inode *inode, struct btrfs_inode *old_dir,
6133 struct dentry *parent,
6134 bool sync_log, struct btrfs_log_ctx *ctx)
6136 struct btrfs_fs_info *fs_info = trans->fs_info;
6140 * this will force the logging code to walk the dentry chain
6143 if (!S_ISDIR(inode->vfs_inode.i_mode))
6144 inode->last_unlink_trans = trans->transid;
6147 * if this inode hasn't been logged and directory we're renaming it
6148 * from hasn't been logged, we don't need to log it
6150 if (inode->logged_trans <= fs_info->last_trans_committed &&
6151 (!old_dir || old_dir->logged_trans <= fs_info->last_trans_committed))
6152 return sync_log ? BTRFS_DONT_NEED_TRANS_COMMIT :
6153 BTRFS_DONT_NEED_LOG_SYNC;
6156 struct btrfs_log_ctx ctx2;
6158 btrfs_init_log_ctx(&ctx2, &inode->vfs_inode);
6159 ret = btrfs_log_inode_parent(trans, inode, parent, 0, LLONG_MAX,
6160 LOG_INODE_EXISTS, &ctx2);
6161 if (ret == BTRFS_NO_LOG_SYNC)
6162 return BTRFS_DONT_NEED_TRANS_COMMIT;
6164 return BTRFS_NEED_TRANS_COMMIT;
6166 ret = btrfs_sync_log(trans, inode->root, &ctx2);
6168 return BTRFS_NEED_TRANS_COMMIT;
6169 return BTRFS_DONT_NEED_TRANS_COMMIT;
6173 ret = btrfs_log_inode_parent(trans, inode, parent, 0, LLONG_MAX,
6174 LOG_INODE_EXISTS, ctx);
6175 if (ret == BTRFS_NO_LOG_SYNC)
6176 return BTRFS_DONT_NEED_LOG_SYNC;
6178 return BTRFS_NEED_TRANS_COMMIT;
6180 return BTRFS_NEED_LOG_SYNC;