1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2007 Oracle. All rights reserved.
7 #include "inode-item.h"
9 #include "transaction.h"
10 #include "print-tree.h"
12 struct btrfs_inode_ref *btrfs_find_name_in_backref(struct extent_buffer *leaf,
14 const struct fscrypt_str *name)
16 struct btrfs_inode_ref *ref;
18 unsigned long name_ptr;
23 item_size = btrfs_item_size(leaf, slot);
24 ptr = btrfs_item_ptr_offset(leaf, slot);
25 while (cur_offset < item_size) {
26 ref = (struct btrfs_inode_ref *)(ptr + cur_offset);
27 len = btrfs_inode_ref_name_len(leaf, ref);
28 name_ptr = (unsigned long)(ref + 1);
29 cur_offset += len + sizeof(*ref);
32 if (memcmp_extent_buffer(leaf, name->name, name_ptr,
39 struct btrfs_inode_extref *btrfs_find_name_in_ext_backref(
40 struct extent_buffer *leaf, int slot, u64 ref_objectid,
41 const struct fscrypt_str *name)
43 struct btrfs_inode_extref *extref;
45 unsigned long name_ptr;
50 item_size = btrfs_item_size(leaf, slot);
51 ptr = btrfs_item_ptr_offset(leaf, slot);
54 * Search all extended backrefs in this item. We're only
55 * looking through any collisions so most of the time this is
56 * just going to compare against one buffer. If all is well,
57 * we'll return success and the inode ref object.
59 while (cur_offset < item_size) {
60 extref = (struct btrfs_inode_extref *) (ptr + cur_offset);
61 name_ptr = (unsigned long)(&extref->name);
62 ref_name_len = btrfs_inode_extref_name_len(leaf, extref);
64 if (ref_name_len == name->len &&
65 btrfs_inode_extref_parent(leaf, extref) == ref_objectid &&
66 (memcmp_extent_buffer(leaf, name->name, name_ptr,
70 cur_offset += ref_name_len + sizeof(*extref);
75 /* Returns NULL if no extref found */
76 struct btrfs_inode_extref *
77 btrfs_lookup_inode_extref(struct btrfs_trans_handle *trans,
78 struct btrfs_root *root,
79 struct btrfs_path *path,
80 const struct fscrypt_str *name,
81 u64 inode_objectid, u64 ref_objectid, int ins_len,
87 key.objectid = inode_objectid;
88 key.type = BTRFS_INODE_EXTREF_KEY;
89 key.offset = btrfs_extref_hash(ref_objectid, name->name, name->len);
91 ret = btrfs_search_slot(trans, root, &key, path, ins_len, cow);
96 return btrfs_find_name_in_ext_backref(path->nodes[0], path->slots[0],
101 static int btrfs_del_inode_extref(struct btrfs_trans_handle *trans,
102 struct btrfs_root *root,
103 const struct fscrypt_str *name,
104 u64 inode_objectid, u64 ref_objectid,
107 struct btrfs_path *path;
108 struct btrfs_key key;
109 struct btrfs_inode_extref *extref;
110 struct extent_buffer *leaf;
112 int del_len = name->len + sizeof(*extref);
114 unsigned long item_start;
117 key.objectid = inode_objectid;
118 key.type = BTRFS_INODE_EXTREF_KEY;
119 key.offset = btrfs_extref_hash(ref_objectid, name->name, name->len);
121 path = btrfs_alloc_path();
125 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
132 * Sanity check - did we find the right item for this name?
133 * This should always succeed so error here will make the FS
136 extref = btrfs_find_name_in_ext_backref(path->nodes[0], path->slots[0],
139 btrfs_handle_fs_error(root->fs_info, -ENOENT, NULL);
144 leaf = path->nodes[0];
145 item_size = btrfs_item_size(leaf, path->slots[0]);
147 *index = btrfs_inode_extref_index(leaf, extref);
149 if (del_len == item_size) {
151 * Common case only one ref in the item, remove the
154 ret = btrfs_del_item(trans, root, path);
158 ptr = (unsigned long)extref;
159 item_start = btrfs_item_ptr_offset(leaf, path->slots[0]);
161 memmove_extent_buffer(leaf, ptr, ptr + del_len,
162 item_size - (ptr + del_len - item_start));
164 btrfs_truncate_item(path, item_size - del_len, 1);
167 btrfs_free_path(path);
172 int btrfs_del_inode_ref(struct btrfs_trans_handle *trans,
173 struct btrfs_root *root, const struct fscrypt_str *name,
174 u64 inode_objectid, u64 ref_objectid, u64 *index)
176 struct btrfs_path *path;
177 struct btrfs_key key;
178 struct btrfs_inode_ref *ref;
179 struct extent_buffer *leaf;
181 unsigned long item_start;
185 int search_ext_refs = 0;
186 int del_len = name->len + sizeof(*ref);
188 key.objectid = inode_objectid;
189 key.offset = ref_objectid;
190 key.type = BTRFS_INODE_REF_KEY;
192 path = btrfs_alloc_path();
196 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
201 } else if (ret < 0) {
205 ref = btrfs_find_name_in_backref(path->nodes[0], path->slots[0], name);
211 leaf = path->nodes[0];
212 item_size = btrfs_item_size(leaf, path->slots[0]);
215 *index = btrfs_inode_ref_index(leaf, ref);
217 if (del_len == item_size) {
218 ret = btrfs_del_item(trans, root, path);
221 ptr = (unsigned long)ref;
222 sub_item_len = name->len + sizeof(*ref);
223 item_start = btrfs_item_ptr_offset(leaf, path->slots[0]);
224 memmove_extent_buffer(leaf, ptr, ptr + sub_item_len,
225 item_size - (ptr + sub_item_len - item_start));
226 btrfs_truncate_item(path, item_size - sub_item_len, 1);
228 btrfs_free_path(path);
230 if (search_ext_refs) {
232 * No refs were found, or we could not find the
233 * name in our ref array. Find and remove the extended
236 return btrfs_del_inode_extref(trans, root, name,
237 inode_objectid, ref_objectid, index);
244 * btrfs_insert_inode_extref() - Inserts an extended inode ref into a tree.
246 * The caller must have checked against BTRFS_LINK_MAX already.
248 static int btrfs_insert_inode_extref(struct btrfs_trans_handle *trans,
249 struct btrfs_root *root,
250 const struct fscrypt_str *name,
251 u64 inode_objectid, u64 ref_objectid,
254 struct btrfs_inode_extref *extref;
256 int ins_len = name->len + sizeof(*extref);
258 struct btrfs_path *path;
259 struct btrfs_key key;
260 struct extent_buffer *leaf;
262 key.objectid = inode_objectid;
263 key.type = BTRFS_INODE_EXTREF_KEY;
264 key.offset = btrfs_extref_hash(ref_objectid, name->name, name->len);
266 path = btrfs_alloc_path();
270 ret = btrfs_insert_empty_item(trans, root, path, &key,
272 if (ret == -EEXIST) {
273 if (btrfs_find_name_in_ext_backref(path->nodes[0],
279 btrfs_extend_item(path, ins_len);
285 leaf = path->nodes[0];
286 ptr = (unsigned long)btrfs_item_ptr(leaf, path->slots[0], char);
287 ptr += btrfs_item_size(leaf, path->slots[0]) - ins_len;
288 extref = (struct btrfs_inode_extref *)ptr;
290 btrfs_set_inode_extref_name_len(path->nodes[0], extref, name->len);
291 btrfs_set_inode_extref_index(path->nodes[0], extref, index);
292 btrfs_set_inode_extref_parent(path->nodes[0], extref, ref_objectid);
294 ptr = (unsigned long)&extref->name;
295 write_extent_buffer(path->nodes[0], name->name, ptr, name->len);
296 btrfs_mark_buffer_dirty(path->nodes[0]);
299 btrfs_free_path(path);
303 /* Will return 0, -ENOMEM, -EMLINK, or -EEXIST or anything from the CoW path */
304 int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans,
305 struct btrfs_root *root, const struct fscrypt_str *name,
306 u64 inode_objectid, u64 ref_objectid, u64 index)
308 struct btrfs_fs_info *fs_info = root->fs_info;
309 struct btrfs_path *path;
310 struct btrfs_key key;
311 struct btrfs_inode_ref *ref;
314 int ins_len = name->len + sizeof(*ref);
316 key.objectid = inode_objectid;
317 key.offset = ref_objectid;
318 key.type = BTRFS_INODE_REF_KEY;
320 path = btrfs_alloc_path();
324 path->skip_release_on_error = 1;
325 ret = btrfs_insert_empty_item(trans, root, path, &key,
327 if (ret == -EEXIST) {
329 ref = btrfs_find_name_in_backref(path->nodes[0], path->slots[0],
334 old_size = btrfs_item_size(path->nodes[0], path->slots[0]);
335 btrfs_extend_item(path, ins_len);
336 ref = btrfs_item_ptr(path->nodes[0], path->slots[0],
337 struct btrfs_inode_ref);
338 ref = (struct btrfs_inode_ref *)((unsigned long)ref + old_size);
339 btrfs_set_inode_ref_name_len(path->nodes[0], ref, name->len);
340 btrfs_set_inode_ref_index(path->nodes[0], ref, index);
341 ptr = (unsigned long)(ref + 1);
343 } else if (ret < 0) {
344 if (ret == -EOVERFLOW) {
345 if (btrfs_find_name_in_backref(path->nodes[0],
354 ref = btrfs_item_ptr(path->nodes[0], path->slots[0],
355 struct btrfs_inode_ref);
356 btrfs_set_inode_ref_name_len(path->nodes[0], ref, name->len);
357 btrfs_set_inode_ref_index(path->nodes[0], ref, index);
358 ptr = (unsigned long)(ref + 1);
360 write_extent_buffer(path->nodes[0], name->name, ptr, name->len);
361 btrfs_mark_buffer_dirty(path->nodes[0]);
364 btrfs_free_path(path);
366 if (ret == -EMLINK) {
367 struct btrfs_super_block *disk_super = fs_info->super_copy;
368 /* We ran out of space in the ref array. Need to
369 * add an extended ref. */
370 if (btrfs_super_incompat_flags(disk_super)
371 & BTRFS_FEATURE_INCOMPAT_EXTENDED_IREF)
372 ret = btrfs_insert_inode_extref(trans, root, name,
374 ref_objectid, index);
380 int btrfs_insert_empty_inode(struct btrfs_trans_handle *trans,
381 struct btrfs_root *root,
382 struct btrfs_path *path, u64 objectid)
384 struct btrfs_key key;
386 key.objectid = objectid;
387 key.type = BTRFS_INODE_ITEM_KEY;
390 ret = btrfs_insert_empty_item(trans, root, path, &key,
391 sizeof(struct btrfs_inode_item));
395 int btrfs_lookup_inode(struct btrfs_trans_handle *trans, struct btrfs_root
396 *root, struct btrfs_path *path,
397 struct btrfs_key *location, int mod)
399 int ins_len = mod < 0 ? -1 : 0;
403 struct extent_buffer *leaf;
404 struct btrfs_key found_key;
406 ret = btrfs_search_slot(trans, root, location, path, ins_len, cow);
407 if (ret > 0 && location->type == BTRFS_ROOT_ITEM_KEY &&
408 location->offset == (u64)-1 && path->slots[0] != 0) {
409 slot = path->slots[0] - 1;
410 leaf = path->nodes[0];
411 btrfs_item_key_to_cpu(leaf, &found_key, slot);
412 if (found_key.objectid == location->objectid &&
413 found_key.type == location->type) {
421 static inline void btrfs_trace_truncate(struct btrfs_inode *inode,
422 struct extent_buffer *leaf,
423 struct btrfs_file_extent_item *fi,
424 u64 offset, int extent_type, int slot)
428 if (extent_type == BTRFS_FILE_EXTENT_INLINE)
429 trace_btrfs_truncate_show_fi_inline(inode, leaf, fi, slot,
432 trace_btrfs_truncate_show_fi_regular(inode, leaf, fi, offset);
436 * Remove inode items from a given root.
438 * @trans: A transaction handle.
439 * @root: The root from which to remove items.
440 * @inode: The inode whose items we want to remove.
441 * @control: The btrfs_truncate_control to control how and what we
444 * Remove all keys associated with the inode from the given root that have a key
445 * with a type greater than or equals to @min_type. When @min_type has a value of
446 * BTRFS_EXTENT_DATA_KEY, only remove file extent items that have an offset value
447 * greater than or equals to @new_size. If a file extent item that starts before
448 * @new_size and ends after it is found, its length is adjusted.
450 * Returns: 0 on success, < 0 on error and NEED_TRUNCATE_BLOCK when @min_type is
451 * BTRFS_EXTENT_DATA_KEY and the caller must truncate the last block.
453 int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
454 struct btrfs_root *root,
455 struct btrfs_truncate_control *control)
457 struct btrfs_fs_info *fs_info = root->fs_info;
458 struct btrfs_path *path;
459 struct extent_buffer *leaf;
460 struct btrfs_file_extent_item *fi;
461 struct btrfs_key key;
462 struct btrfs_key found_key;
463 u64 new_size = control->new_size;
464 u64 extent_num_bytes = 0;
465 u64 extent_offset = 0;
467 u32 found_type = (u8)-1;
469 int pending_del_nr = 0;
470 int pending_del_slot = 0;
471 int extent_type = -1;
473 u64 bytes_deleted = 0;
474 bool be_nice = false;
476 ASSERT(control->inode || !control->clear_extent_range);
477 ASSERT(new_size == 0 || control->min_type == BTRFS_EXTENT_DATA_KEY);
479 control->last_size = new_size;
480 control->sub_bytes = 0;
483 * For shareable roots we want to back off from time to time, this turns
484 * out to be subvolume roots, reloc roots, and data reloc roots.
486 if (test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
489 path = btrfs_alloc_path();
492 path->reada = READA_BACK;
494 key.objectid = control->ino;
495 key.offset = (u64)-1;
500 * With a 16K leaf size and 128MiB extents, you can actually queue up a
501 * huge file in a single leaf. Most of the time that bytes_deleted is
502 * > 0, it will be huge by the time we get here
504 if (be_nice && bytes_deleted > SZ_32M &&
505 btrfs_should_end_transaction(trans)) {
510 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
516 /* There are no items in the tree for us to truncate, we're done */
517 if (path->slots[0] == 0)
523 u64 clear_start = 0, clear_len = 0, extent_start = 0;
524 bool should_throttle = false;
527 leaf = path->nodes[0];
528 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
529 found_type = found_key.type;
531 if (found_key.objectid != control->ino)
534 if (found_type < control->min_type)
537 item_end = found_key.offset;
538 if (found_type == BTRFS_EXTENT_DATA_KEY) {
539 fi = btrfs_item_ptr(leaf, path->slots[0],
540 struct btrfs_file_extent_item);
541 extent_type = btrfs_file_extent_type(leaf, fi);
542 if (extent_type != BTRFS_FILE_EXTENT_INLINE)
544 btrfs_file_extent_num_bytes(leaf, fi);
545 else if (extent_type == BTRFS_FILE_EXTENT_INLINE)
546 item_end += btrfs_file_extent_ram_bytes(leaf, fi);
548 btrfs_trace_truncate(control->inode, leaf, fi,
549 found_key.offset, extent_type,
553 if (found_type > control->min_type) {
556 if (item_end < new_size)
558 if (found_key.offset >= new_size)
564 /* FIXME, shrink the extent if the ref count is only 1 */
565 if (found_type != BTRFS_EXTENT_DATA_KEY)
568 control->extents_found++;
570 if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
573 clear_start = found_key.offset;
574 extent_start = btrfs_file_extent_disk_bytenr(leaf, fi);
577 btrfs_file_extent_num_bytes(leaf, fi);
578 extent_num_bytes = ALIGN(new_size -
580 fs_info->sectorsize);
581 clear_start = ALIGN(new_size, fs_info->sectorsize);
583 btrfs_set_file_extent_num_bytes(leaf, fi,
585 num_dec = (orig_num_bytes - extent_num_bytes);
586 if (extent_start != 0)
587 control->sub_bytes += num_dec;
588 btrfs_mark_buffer_dirty(leaf);
591 btrfs_file_extent_disk_num_bytes(leaf, fi);
592 extent_offset = found_key.offset -
593 btrfs_file_extent_offset(leaf, fi);
595 /* FIXME blocksize != 4096 */
596 num_dec = btrfs_file_extent_num_bytes(leaf, fi);
597 if (extent_start != 0)
598 control->sub_bytes += num_dec;
601 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
603 * We can't truncate inline items that have had
607 btrfs_file_extent_encryption(leaf, fi) == 0 &&
608 btrfs_file_extent_other_encoding(leaf, fi) == 0 &&
609 btrfs_file_extent_compression(leaf, fi) == 0) {
610 u32 size = (u32)(new_size - found_key.offset);
612 btrfs_set_file_extent_ram_bytes(leaf, fi, size);
613 size = btrfs_file_extent_calc_inline_size(size);
614 btrfs_truncate_item(path, size, 1);
615 } else if (!del_item) {
617 * We have to bail so the last_size is set to
618 * just before this extent.
620 ret = BTRFS_NEED_TRUNCATE_BLOCK;
624 * Inline extents are special, we just treat
625 * them as a full sector worth in the file
626 * extent tree just for simplicity sake.
628 clear_len = fs_info->sectorsize;
631 control->sub_bytes += item_end + 1 - new_size;
635 * We only want to clear the file extent range if we're
636 * modifying the actual inode's mapping, which is just the
637 * normal truncate path.
639 if (control->clear_extent_range) {
640 ret = btrfs_inode_clear_file_extent_range(control->inode,
641 clear_start, clear_len);
643 btrfs_abort_transaction(trans, ret);
649 ASSERT(!pending_del_nr ||
650 ((path->slots[0] + 1) == pending_del_slot));
652 control->last_size = found_key.offset;
653 if (!pending_del_nr) {
654 /* No pending yet, add ourselves */
655 pending_del_slot = path->slots[0];
657 } else if (pending_del_nr &&
658 path->slots[0] + 1 == pending_del_slot) {
659 /* Hop on the pending chunk */
661 pending_del_slot = path->slots[0];
664 control->last_size = new_size;
668 if (del_item && extent_start != 0 && !control->skip_ref_updates) {
669 struct btrfs_ref ref = { 0 };
671 bytes_deleted += extent_num_bytes;
673 btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF,
674 extent_start, extent_num_bytes, 0);
675 btrfs_init_data_ref(&ref, btrfs_header_owner(leaf),
676 control->ino, extent_offset,
677 root->root_key.objectid, false);
678 ret = btrfs_free_extent(trans, &ref);
680 btrfs_abort_transaction(trans, ret);
684 if (btrfs_should_throttle_delayed_refs(trans))
685 should_throttle = true;
689 if (found_type == BTRFS_INODE_ITEM_KEY)
692 if (path->slots[0] == 0 ||
693 path->slots[0] != pending_del_slot ||
695 if (pending_del_nr) {
696 ret = btrfs_del_items(trans, root, path,
700 btrfs_abort_transaction(trans, ret);
705 btrfs_release_path(path);
708 * We can generate a lot of delayed refs, so we need to
709 * throttle every once and a while and make sure we're
710 * adding enough space to keep up with the work we are
711 * generating. Since we hold a transaction here we
712 * can't flush, and we don't want to FLUSH_LIMIT because
713 * we could have generated too many delayed refs to
714 * actually allocate, so just bail if we're short and
715 * let the normal reservation dance happen higher up.
717 if (should_throttle) {
718 ret = btrfs_delayed_refs_rsv_refill(fs_info,
719 BTRFS_RESERVE_NO_FLUSH);
731 if (ret >= 0 && pending_del_nr) {
734 err = btrfs_del_items(trans, root, path, pending_del_slot,
737 btrfs_abort_transaction(trans, err);
742 ASSERT(control->last_size >= new_size);
743 if (!ret && control->last_size > new_size)
744 control->last_size = new_size;
746 btrfs_free_path(path);