1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
6 * TODO: Merge attr_set_size/attr_data_get_block/attr_allocate_frame?
10 #include <linux/slab.h>
11 #include <linux/kernel.h>
18 * You can set external NTFS_MIN_LOG2_OF_CLUMP/NTFS_MAX_LOG2_OF_CLUMP to manage
19 * preallocate algorithm.
21 #ifndef NTFS_MIN_LOG2_OF_CLUMP
22 #define NTFS_MIN_LOG2_OF_CLUMP 16
25 #ifndef NTFS_MAX_LOG2_OF_CLUMP
26 #define NTFS_MAX_LOG2_OF_CLUMP 26
30 #define NTFS_CLUMP_MIN (1 << (NTFS_MIN_LOG2_OF_CLUMP + 8))
32 #define NTFS_CLUMP_MAX (1ull << (NTFS_MAX_LOG2_OF_CLUMP + 8))
34 static inline u64 get_pre_allocated(u64 size)
40 if (size <= NTFS_CLUMP_MIN) {
41 clump = 1 << NTFS_MIN_LOG2_OF_CLUMP;
42 align_shift = NTFS_MIN_LOG2_OF_CLUMP;
43 } else if (size >= NTFS_CLUMP_MAX) {
44 clump = 1 << NTFS_MAX_LOG2_OF_CLUMP;
45 align_shift = NTFS_MAX_LOG2_OF_CLUMP;
47 align_shift = NTFS_MIN_LOG2_OF_CLUMP - 1 +
48 __ffs(size >> (8 + NTFS_MIN_LOG2_OF_CLUMP));
49 clump = 1u << align_shift;
52 ret = (((size + clump - 1) >> align_shift)) << align_shift;
58 * attr_must_be_resident
60 * Return: True if attribute must be resident.
62 static inline bool attr_must_be_resident(struct ntfs_sb_info *sbi,
65 const struct ATTR_DEF_ENTRY *de;
77 de = ntfs_query_def(sbi, type);
78 if (de && (de->flags & NTFS_ATTR_MUST_BE_RESIDENT))
85 * attr_load_runs - Load all runs stored in @attr.
87 int attr_load_runs(struct ATTRIB *attr, struct ntfs_inode *ni,
88 struct runs_tree *run, const CLST *vcn)
91 CLST svcn = le64_to_cpu(attr->nres.svcn);
92 CLST evcn = le64_to_cpu(attr->nres.evcn);
96 if (svcn >= evcn + 1 || run_is_mapped_full(run, svcn, evcn))
99 if (vcn && (evcn < *vcn || *vcn < svcn))
102 asize = le32_to_cpu(attr->size);
103 run_off = le16_to_cpu(attr->nres.run_off);
108 err = run_unpack_ex(run, ni->mi.sbi, ni->mi.rno, svcn, evcn,
109 vcn ? *vcn : svcn, Add2Ptr(attr, run_off),
118 * run_deallocate_ex - Deallocate clusters.
120 static int run_deallocate_ex(struct ntfs_sb_info *sbi, struct runs_tree *run,
121 CLST vcn, CLST len, CLST *done, bool trim)
124 CLST vcn_next, vcn0 = vcn, lcn, clen, dn = 0;
130 if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) {
132 run_truncate(run, vcn0);
146 if (lcn != SPARSE_LCN) {
147 mark_as_free_ex(sbi, lcn, clen, trim);
155 vcn_next = vcn + clen;
156 if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
158 /* Save memory - don't load entire run. */
171 * attr_allocate_clusters - Find free space, mark it as used and store in @run.
173 int attr_allocate_clusters(struct ntfs_sb_info *sbi, struct runs_tree *run,
174 CLST vcn, CLST lcn, CLST len, CLST *pre_alloc,
175 enum ALLOCATE_OPT opt, CLST *alen, const size_t fr,
179 CLST flen, vcn0 = vcn, pre = pre_alloc ? *pre_alloc : 0;
180 struct wnd_bitmap *wnd = &sbi->used.bitmap;
181 size_t cnt = run->count;
184 err = ntfs_look_for_free_space(sbi, lcn, len + pre, &lcn, &flen,
187 if (err == -ENOSPC && pre) {
197 if (new_lcn && vcn == vcn0)
200 /* Add new fragment into run storage. */
201 if (!run_add_entry(run, vcn, lcn, flen, opt == ALLOCATE_MFT)) {
202 /* Undo last 'ntfs_look_for_free_space' */
203 down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
204 wnd_set_free(wnd, lcn, flen);
205 up_write(&wnd->rw_lock);
212 if (flen >= len || opt == ALLOCATE_MFT ||
213 (fr && run->count - cnt >= fr)) {
222 /* Undo 'ntfs_look_for_free_space' */
224 run_deallocate_ex(sbi, run, vcn0, vcn - vcn0, NULL, false);
225 run_truncate(run, vcn0);
232 * attr_make_nonresident
234 * If page is not NULL - it is already contains resident data
235 * and locked (called from ni_write_frame()).
237 int attr_make_nonresident(struct ntfs_inode *ni, struct ATTRIB *attr,
238 struct ATTR_LIST_ENTRY *le, struct mft_inode *mi,
239 u64 new_size, struct runs_tree *run,
240 struct ATTRIB **ins_attr, struct page *page)
242 struct ntfs_sb_info *sbi;
243 struct ATTRIB *attr_s;
245 u32 used, asize, rsize, aoff, align;
259 used = le32_to_cpu(rec->used);
260 asize = le32_to_cpu(attr->size);
261 next = Add2Ptr(attr, asize);
262 aoff = PtrOffset(rec, attr);
263 rsize = le32_to_cpu(attr->res.data_size);
264 is_data = attr->type == ATTR_DATA && !attr->name_len;
266 align = sbi->cluster_size;
267 if (is_attr_compressed(attr))
268 align <<= COMPRESSION_UNIT;
269 len = (rsize + align - 1) >> sbi->cluster_bits;
273 /* Make a copy of original attribute. */
274 attr_s = kmemdup(attr, asize, GFP_NOFS);
281 /* Empty resident -> Empty nonresident. */
284 const char *data = resident_data(attr);
286 err = attr_allocate_clusters(sbi, run, 0, 0, len, NULL,
287 ALLOCATE_DEF, &alen, 0, NULL);
292 /* Empty resident -> Non empty nonresident. */
293 } else if (!is_data) {
294 err = ntfs_sb_write_run(sbi, run, 0, data, rsize, 0);
300 page = grab_cache_page(ni->vfs_inode.i_mapping, 0);
305 kaddr = kmap_atomic(page);
306 memcpy(kaddr, data, rsize);
307 memset(kaddr + rsize, 0, PAGE_SIZE - rsize);
308 kunmap_atomic(kaddr);
309 flush_dcache_page(page);
310 SetPageUptodate(page);
311 set_page_dirty(page);
317 /* Remove original attribute. */
319 memmove(attr, Add2Ptr(attr, asize), used - aoff);
320 rec->used = cpu_to_le32(used);
323 al_remove_le(ni, le);
325 err = ni_insert_nonresident(ni, attr_s->type, attr_name(attr_s),
326 attr_s->name_len, run, 0, alen,
327 attr_s->flags, &attr, NULL);
332 attr->nres.data_size = cpu_to_le64(rsize);
333 attr->nres.valid_size = attr->nres.data_size;
338 ni->ni_flags &= ~NI_FLAG_RESIDENT;
340 /* Resident attribute becomes non resident. */
344 attr = Add2Ptr(rec, aoff);
345 memmove(next, attr, used - aoff);
346 memcpy(attr, attr_s, asize);
347 rec->used = cpu_to_le32(used + asize);
350 /* Undo: do not trim new allocated clusters. */
351 run_deallocate(sbi, run, false);
360 * attr_set_size_res - Helper for attr_set_size().
362 static int attr_set_size_res(struct ntfs_inode *ni, struct ATTRIB *attr,
363 struct ATTR_LIST_ENTRY *le, struct mft_inode *mi,
364 u64 new_size, struct runs_tree *run,
365 struct ATTRIB **ins_attr)
367 struct ntfs_sb_info *sbi = mi->sbi;
368 struct MFT_REC *rec = mi->mrec;
369 u32 used = le32_to_cpu(rec->used);
370 u32 asize = le32_to_cpu(attr->size);
371 u32 aoff = PtrOffset(rec, attr);
372 u32 rsize = le32_to_cpu(attr->res.data_size);
373 u32 tail = used - aoff - asize;
374 char *next = Add2Ptr(attr, asize);
375 s64 dsize = ALIGN(new_size, 8) - ALIGN(rsize, 8);
378 memmove(next + dsize, next, tail);
379 } else if (dsize > 0) {
380 if (used + dsize > sbi->max_bytes_per_attr)
381 return attr_make_nonresident(ni, attr, le, mi, new_size,
382 run, ins_attr, NULL);
384 memmove(next + dsize, next, tail);
385 memset(next, 0, dsize);
388 if (new_size > rsize)
389 memset(Add2Ptr(resident_data(attr), rsize), 0,
392 rec->used = cpu_to_le32(used + dsize);
393 attr->size = cpu_to_le32(asize + dsize);
394 attr->res.data_size = cpu_to_le32(new_size);
402 * attr_set_size - Change the size of attribute.
405 * - Sparse/compressed: No allocated clusters.
406 * - Normal: Append allocated and preallocated new clusters.
408 * - No deallocate if @keep_prealloc is set.
410 int attr_set_size(struct ntfs_inode *ni, enum ATTR_TYPE type,
411 const __le16 *name, u8 name_len, struct runs_tree *run,
412 u64 new_size, const u64 *new_valid, bool keep_prealloc,
416 struct ntfs_sb_info *sbi = ni->mi.sbi;
417 u8 cluster_bits = sbi->cluster_bits;
419 ni->mi.rno == MFT_REC_MFT && type == ATTR_DATA && !name_len;
420 u64 old_valid, old_size, old_alloc, new_alloc, new_alloc_tmp;
421 struct ATTRIB *attr = NULL, *attr_b;
422 struct ATTR_LIST_ENTRY *le, *le_b;
423 struct mft_inode *mi, *mi_b;
424 CLST alen, vcn, lcn, new_alen, old_alen, svcn, evcn;
425 CLST next_svcn, pre_alloc = -1, done = 0;
432 attr_b = ni_find_attr(ni, NULL, &le_b, type, name, name_len, NULL,
439 if (!attr_b->non_res) {
440 err = attr_set_size_res(ni, attr_b, le_b, mi_b, new_size, run,
442 if (err || !attr_b->non_res)
445 /* Layout of records may be changed, so do a full search. */
449 is_ext = is_attr_ext(attr_b);
452 align = sbi->cluster_size;
455 align <<= attr_b->nres.c_unit;
457 old_valid = le64_to_cpu(attr_b->nres.valid_size);
458 old_size = le64_to_cpu(attr_b->nres.data_size);
459 old_alloc = le64_to_cpu(attr_b->nres.alloc_size);
460 old_alen = old_alloc >> cluster_bits;
462 new_alloc = (new_size + align - 1) & ~(u64)(align - 1);
463 new_alen = new_alloc >> cluster_bits;
465 if (keep_prealloc && new_size < old_size) {
466 attr_b->nres.data_size = cpu_to_le64(new_size);
473 svcn = le64_to_cpu(attr_b->nres.svcn);
474 evcn = le64_to_cpu(attr_b->nres.evcn);
476 if (svcn <= vcn && vcn <= evcn) {
485 attr = ni_find_attr(ni, attr_b, &le, type, name, name_len, &vcn,
493 svcn = le64_to_cpu(attr->nres.svcn);
494 evcn = le64_to_cpu(attr->nres.evcn);
500 err = attr_load_runs(attr, ni, run, NULL);
504 if (new_size > old_size) {
508 if (new_alloc <= old_alloc) {
509 attr_b->nres.data_size = cpu_to_le64(new_size);
514 to_allocate = new_alen - old_alen;
515 add_alloc_in_same_attr_seg:
518 /* MFT allocates clusters from MFT zone. */
521 /* No preallocate for sparse/compress. */
523 } else if (pre_alloc == -1) {
525 if (type == ATTR_DATA && !name_len &&
526 sbi->options->prealloc) {
527 CLST new_alen2 = bytes_to_cluster(
528 sbi, get_pre_allocated(new_size));
529 pre_alloc = new_alen2 - new_alen;
532 /* Get the last LCN to allocate from. */
534 !run_lookup_entry(run, vcn, &lcn, NULL, NULL)) {
538 if (lcn == SPARSE_LCN)
543 free = wnd_zeroes(&sbi->used.bitmap);
544 if (to_allocate > free) {
549 if (pre_alloc && to_allocate + pre_alloc > free)
556 if (!run_add_entry(run, vcn, SPARSE_LCN, to_allocate,
563 /* ~3 bytes per fragment. */
564 err = attr_allocate_clusters(
565 sbi, run, vcn, lcn, to_allocate, &pre_alloc,
566 is_mft ? ALLOCATE_MFT : 0, &alen,
568 : (sbi->record_size -
569 le32_to_cpu(rec->used) + 8) /
579 if (to_allocate > alen)
585 err = mi_pack_runs(mi, attr, run, vcn - svcn);
589 next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
590 new_alloc_tmp = (u64)next_svcn << cluster_bits;
591 attr_b->nres.alloc_size = cpu_to_le64(new_alloc_tmp);
594 if (next_svcn >= vcn && !to_allocate) {
595 /* Normal way. Update attribute and exit. */
596 attr_b->nres.data_size = cpu_to_le64(new_size);
600 /* At least two MFT to avoid recursive loop. */
601 if (is_mft && next_svcn == vcn &&
602 ((u64)done << sbi->cluster_bits) >= 2 * sbi->record_size) {
603 new_size = new_alloc_tmp;
604 attr_b->nres.data_size = attr_b->nres.alloc_size;
608 if (le32_to_cpu(rec->used) < sbi->record_size) {
609 old_alen = next_svcn;
611 goto add_alloc_in_same_attr_seg;
614 attr_b->nres.data_size = attr_b->nres.alloc_size;
615 if (new_alloc_tmp < old_valid)
616 attr_b->nres.valid_size = attr_b->nres.data_size;
618 if (type == ATTR_LIST) {
619 err = ni_expand_list(ni);
625 /* Layout of records is changed. */
629 if (!ni->attr_list.size) {
630 err = ni_create_attr_list(ni);
633 /* Layout of records is changed. */
636 if (next_svcn >= vcn) {
637 /* This is MFT data, repeat. */
641 /* Insert new attribute segment. */
642 err = ni_insert_nonresident(ni, type, name, name_len, run,
643 next_svcn, vcn - next_svcn,
644 attr_b->flags, &attr, &mi);
649 run_truncate_head(run, evcn + 1);
651 svcn = le64_to_cpu(attr->nres.svcn);
652 evcn = le64_to_cpu(attr->nres.evcn);
656 * Layout of records maybe changed.
657 * Find base attribute to update.
659 attr_b = ni_find_attr(ni, NULL, &le_b, type, name, name_len,
666 attr_b->nres.alloc_size = cpu_to_le64((u64)vcn << cluster_bits);
667 attr_b->nres.data_size = attr_b->nres.alloc_size;
668 attr_b->nres.valid_size = attr_b->nres.alloc_size;
673 if (new_size != old_size ||
674 (new_alloc != old_alloc && !keep_prealloc)) {
675 vcn = max(svcn, new_alen);
676 new_alloc_tmp = (u64)vcn << cluster_bits;
679 err = run_deallocate_ex(sbi, run, vcn, evcn - vcn + 1, &alen,
684 run_truncate(run, vcn);
687 err = mi_pack_runs(mi, attr, run, vcn - svcn);
690 } else if (le && le->vcn) {
691 u16 le_sz = le16_to_cpu(le->size);
694 * NOTE: List entries for one attribute are always
695 * the same size. We deal with last entry (vcn==0)
696 * and it is not first in entries array
697 * (list entry for std attribute always first).
698 * So it is safe to step back.
700 mi_remove_attr(NULL, mi, attr);
702 if (!al_remove_le(ni, le)) {
707 le = (struct ATTR_LIST_ENTRY *)((u8 *)le - le_sz);
709 attr->nres.evcn = cpu_to_le64((u64)vcn - 1);
713 attr_b->nres.alloc_size = cpu_to_le64(new_alloc_tmp);
715 if (vcn == new_alen) {
716 attr_b->nres.data_size = cpu_to_le64(new_size);
717 if (new_size < old_valid)
718 attr_b->nres.valid_size =
719 attr_b->nres.data_size;
722 le64_to_cpu(attr_b->nres.data_size))
723 attr_b->nres.data_size =
724 attr_b->nres.alloc_size;
726 le64_to_cpu(attr_b->nres.valid_size))
727 attr_b->nres.valid_size =
728 attr_b->nres.alloc_size;
732 le64_sub_cpu(&attr_b->nres.total_size,
733 ((u64)alen << cluster_bits));
737 if (new_alloc_tmp <= new_alloc)
740 old_size = new_alloc_tmp;
751 if (le->type != type || le->name_len != name_len ||
752 memcmp(le_name(le), name, name_len * sizeof(short))) {
757 err = ni_load_mi(ni, le, &mi);
761 attr = mi_find_attr(mi, NULL, type, name, name_len, &le->id);
771 __le64 valid = cpu_to_le64(min(*new_valid, new_size));
773 if (attr_b->nres.valid_size != valid) {
774 attr_b->nres.valid_size = valid;
780 if (!err && attr_b && ret)
783 /* Update inode_set_bytes. */
784 if (!err && ((type == ATTR_DATA && !name_len) ||
785 (type == ATTR_ALLOC && name == I30_NAME))) {
788 if (ni->vfs_inode.i_size != new_size) {
789 ni->vfs_inode.i_size = new_size;
793 if (attr_b && attr_b->non_res) {
794 new_alloc = le64_to_cpu(attr_b->nres.alloc_size);
795 if (inode_get_bytes(&ni->vfs_inode) != new_alloc) {
796 inode_set_bytes(&ni->vfs_inode, new_alloc);
802 ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
803 mark_inode_dirty(&ni->vfs_inode);
810 int attr_data_get_block(struct ntfs_inode *ni, CLST vcn, CLST clen, CLST *lcn,
811 CLST *len, bool *new)
814 struct runs_tree *run = &ni->file.run;
815 struct ntfs_sb_info *sbi;
817 struct ATTRIB *attr = NULL, *attr_b;
818 struct ATTR_LIST_ENTRY *le, *le_b;
819 struct mft_inode *mi, *mi_b;
820 CLST hint, svcn, to_alloc, evcn1, next_svcn, asize, end;
828 down_read(&ni->file.run_lock);
829 ok = run_lookup_entry(run, vcn, lcn, len, NULL);
830 up_read(&ni->file.run_lock);
832 if (ok && (*lcn != SPARSE_LCN || !new)) {
840 if (ok && clen > *len)
844 cluster_bits = sbi->cluster_bits;
847 down_write(&ni->file.run_lock);
850 attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
856 if (!attr_b->non_res) {
862 asize = le64_to_cpu(attr_b->nres.alloc_size) >> sbi->cluster_bits;
868 clst_per_frame = 1u << attr_b->nres.c_unit;
869 to_alloc = (clen + clst_per_frame - 1) & ~(clst_per_frame - 1);
871 if (vcn + to_alloc > asize)
872 to_alloc = asize - vcn;
874 svcn = le64_to_cpu(attr_b->nres.svcn);
875 evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
881 if (le_b && (vcn < svcn || evcn1 <= vcn)) {
882 attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
888 svcn = le64_to_cpu(attr->nres.svcn);
889 evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
892 err = attr_load_runs(attr, ni, run, NULL);
897 ok = run_lookup_entry(run, vcn, lcn, len, NULL);
898 if (ok && (*lcn != SPARSE_LCN || !new)) {
910 if (ok && clen > *len) {
912 to_alloc = (clen + clst_per_frame - 1) &
913 ~(clst_per_frame - 1);
917 if (!is_attr_ext(attr_b)) {
922 /* Get the last LCN to allocate from. */
926 if (!run_add_entry(run, evcn1, SPARSE_LCN, vcn - evcn1,
931 } else if (vcn && !run_lookup_entry(run, vcn - 1, &hint, NULL, NULL)) {
935 err = attr_allocate_clusters(
936 sbi, run, vcn, hint + 1, to_alloc, NULL, 0, len,
937 (sbi->record_size - le32_to_cpu(mi->mrec->used) + 8) / 3 + 1,
945 total_size = le64_to_cpu(attr_b->nres.total_size) +
946 ((u64)*len << cluster_bits);
949 err = mi_pack_runs(mi, attr, run, max(end, evcn1) - svcn);
953 attr_b->nres.total_size = cpu_to_le64(total_size);
954 inode_set_bytes(&ni->vfs_inode, total_size);
955 ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
958 mark_inode_dirty(&ni->vfs_inode);
960 /* Stored [vcn : next_svcn) from [vcn : end). */
961 next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
964 if (next_svcn == evcn1) {
965 /* Normal way. Update attribute and exit. */
968 /* Add new segment [next_svcn : evcn1 - next_svcn). */
969 if (!ni->attr_list.size) {
970 err = ni_create_attr_list(ni);
973 /* Layout of records is changed. */
975 attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL,
991 /* Estimate next attribute. */
992 attr = ni_find_attr(ni, attr, &le, ATTR_DATA, NULL, 0, &svcn, &mi);
995 CLST alloc = bytes_to_cluster(
996 sbi, le64_to_cpu(attr_b->nres.alloc_size));
997 CLST evcn = le64_to_cpu(attr->nres.evcn);
1001 while (end > evcn) {
1002 /* Remove segment [svcn : evcn). */
1003 mi_remove_attr(NULL, mi, attr);
1005 if (!al_remove_le(ni, le)) {
1010 if (evcn + 1 >= alloc) {
1011 /* Last attribute segment. */
1016 if (ni_load_mi(ni, le, &mi)) {
1021 attr = mi_find_attr(mi, NULL, ATTR_DATA, NULL, 0,
1027 svcn = le64_to_cpu(attr->nres.svcn);
1028 evcn = le64_to_cpu(attr->nres.evcn);
1034 err = attr_load_runs(attr, ni, run, &end);
1039 attr->nres.svcn = cpu_to_le64(next_svcn);
1040 err = mi_pack_runs(mi, attr, run, evcn1 - next_svcn);
1044 le->vcn = cpu_to_le64(next_svcn);
1045 ni->attr_list.dirty = true;
1048 next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
1051 if (evcn1 > next_svcn) {
1052 err = ni_insert_nonresident(ni, ATTR_DATA, NULL, 0, run,
1053 next_svcn, evcn1 - next_svcn,
1054 attr_b->flags, &attr, &mi);
1059 run_truncate_around(run, vcn);
1061 up_write(&ni->file.run_lock);
1067 int attr_data_read_resident(struct ntfs_inode *ni, struct page *page)
1070 struct ATTRIB *attr;
1073 attr = ni_find_attr(ni, NULL, NULL, ATTR_DATA, NULL, 0, NULL, NULL);
1078 return E_NTFS_NONRESIDENT;
1080 vbo = page->index << PAGE_SHIFT;
1081 data_size = le32_to_cpu(attr->res.data_size);
1082 if (vbo < data_size) {
1083 const char *data = resident_data(attr);
1084 char *kaddr = kmap_atomic(page);
1085 u32 use = data_size - vbo;
1087 if (use > PAGE_SIZE)
1090 memcpy(kaddr, data + vbo, use);
1091 memset(kaddr + use, 0, PAGE_SIZE - use);
1092 kunmap_atomic(kaddr);
1093 flush_dcache_page(page);
1094 SetPageUptodate(page);
1095 } else if (!PageUptodate(page)) {
1096 zero_user_segment(page, 0, PAGE_SIZE);
1097 SetPageUptodate(page);
1103 int attr_data_write_resident(struct ntfs_inode *ni, struct page *page)
1106 struct mft_inode *mi;
1107 struct ATTRIB *attr;
1110 attr = ni_find_attr(ni, NULL, NULL, ATTR_DATA, NULL, 0, NULL, &mi);
1114 if (attr->non_res) {
1115 /* Return special error code to check this case. */
1116 return E_NTFS_NONRESIDENT;
1119 vbo = page->index << PAGE_SHIFT;
1120 data_size = le32_to_cpu(attr->res.data_size);
1121 if (vbo < data_size) {
1122 char *data = resident_data(attr);
1123 char *kaddr = kmap_atomic(page);
1124 u32 use = data_size - vbo;
1126 if (use > PAGE_SIZE)
1128 memcpy(data + vbo, kaddr, use);
1129 kunmap_atomic(kaddr);
1132 ni->i_valid = data_size;
1138 * attr_load_runs_vcn - Load runs with VCN.
1140 int attr_load_runs_vcn(struct ntfs_inode *ni, enum ATTR_TYPE type,
1141 const __le16 *name, u8 name_len, struct runs_tree *run,
1144 struct ATTRIB *attr;
1150 /* Is record corrupted? */
1154 attr = ni_find_attr(ni, NULL, NULL, type, name, name_len, &vcn, NULL);
1156 /* Is record corrupted? */
1160 svcn = le64_to_cpu(attr->nres.svcn);
1161 evcn = le64_to_cpu(attr->nres.evcn);
1163 if (evcn < vcn || vcn < svcn) {
1164 /* Is record corrupted? */
1168 ro = le16_to_cpu(attr->nres.run_off);
1170 if (ro > le32_to_cpu(attr->size))
1173 err = run_unpack_ex(run, ni->mi.sbi, ni->mi.rno, svcn, evcn, svcn,
1174 Add2Ptr(attr, ro), le32_to_cpu(attr->size) - ro);
1181 * attr_load_runs_range - Load runs for given range [from to).
1183 int attr_load_runs_range(struct ntfs_inode *ni, enum ATTR_TYPE type,
1184 const __le16 *name, u8 name_len, struct runs_tree *run,
1187 struct ntfs_sb_info *sbi = ni->mi.sbi;
1188 u8 cluster_bits = sbi->cluster_bits;
1189 CLST vcn = from >> cluster_bits;
1190 CLST vcn_last = (to - 1) >> cluster_bits;
1194 for (vcn = from >> cluster_bits; vcn <= vcn_last; vcn += clen) {
1195 if (!run_lookup_entry(run, vcn, &lcn, &clen, NULL)) {
1196 err = attr_load_runs_vcn(ni, type, name, name_len, run,
1200 clen = 0; /* Next run_lookup_entry(vcn) must be success. */
1207 #ifdef CONFIG_NTFS3_LZX_XPRESS
1209 * attr_wof_frame_info
1211 * Read header of Xpress/LZX file to get info about frame.
1213 int attr_wof_frame_info(struct ntfs_inode *ni, struct ATTRIB *attr,
1214 struct runs_tree *run, u64 frame, u64 frames,
1215 u8 frame_bits, u32 *ondisk_size, u64 *vbo_data)
1217 struct ntfs_sb_info *sbi = ni->mi.sbi;
1218 u64 vbo[2], off[2], wof_size;
1227 if (ni->vfs_inode.i_size < 0x100000000ull) {
1228 /* File starts with array of 32 bit offsets. */
1229 bytes_per_off = sizeof(__le32);
1230 vbo[1] = frame << 2;
1231 *vbo_data = frames << 2;
1233 /* File starts with array of 64 bit offsets. */
1234 bytes_per_off = sizeof(__le64);
1235 vbo[1] = frame << 3;
1236 *vbo_data = frames << 3;
1240 * Read 4/8 bytes at [vbo - 4(8)] == offset where compressed frame starts.
1241 * Read 4/8 bytes at [vbo] == offset where compressed frame ends.
1243 if (!attr->non_res) {
1244 if (vbo[1] + bytes_per_off > le32_to_cpu(attr->res.data_size)) {
1245 ntfs_inode_err(&ni->vfs_inode, "is corrupted");
1248 addr = resident_data(attr);
1250 if (bytes_per_off == sizeof(__le32)) {
1251 off32 = Add2Ptr(addr, vbo[1]);
1252 off[0] = vbo[1] ? le32_to_cpu(off32[-1]) : 0;
1253 off[1] = le32_to_cpu(off32[0]);
1255 off64 = Add2Ptr(addr, vbo[1]);
1256 off[0] = vbo[1] ? le64_to_cpu(off64[-1]) : 0;
1257 off[1] = le64_to_cpu(off64[0]);
1260 *vbo_data += off[0];
1261 *ondisk_size = off[1] - off[0];
1265 wof_size = le64_to_cpu(attr->nres.data_size);
1266 down_write(&ni->file.run_lock);
1267 page = ni->file.offs_page;
1269 page = alloc_page(GFP_KERNEL);
1275 ni->file.offs_page = page;
1278 addr = page_address(page);
1281 voff = vbo[1] & (PAGE_SIZE - 1);
1282 vbo[0] = vbo[1] - bytes_per_off;
1292 pgoff_t index = vbo[i] >> PAGE_SHIFT;
1294 if (index != page->index) {
1295 u64 from = vbo[i] & ~(u64)(PAGE_SIZE - 1);
1296 u64 to = min(from + PAGE_SIZE, wof_size);
1298 err = attr_load_runs_range(ni, ATTR_DATA, WOF_NAME,
1299 ARRAY_SIZE(WOF_NAME), run,
1304 err = ntfs_bio_pages(sbi, run, &page, 1, from,
1305 to - from, REQ_OP_READ);
1310 page->index = index;
1314 if (bytes_per_off == sizeof(__le32)) {
1315 off32 = Add2Ptr(addr, voff);
1316 off[1] = le32_to_cpu(*off32);
1318 off64 = Add2Ptr(addr, voff);
1319 off[1] = le64_to_cpu(*off64);
1322 if (bytes_per_off == sizeof(__le32)) {
1323 off32 = Add2Ptr(addr, PAGE_SIZE - sizeof(u32));
1324 off[0] = le32_to_cpu(*off32);
1326 off64 = Add2Ptr(addr, PAGE_SIZE - sizeof(u64));
1327 off[0] = le64_to_cpu(*off64);
1330 /* Two values in one page. */
1331 if (bytes_per_off == sizeof(__le32)) {
1332 off32 = Add2Ptr(addr, voff);
1333 off[0] = le32_to_cpu(off32[-1]);
1334 off[1] = le32_to_cpu(off32[0]);
1336 off64 = Add2Ptr(addr, voff);
1337 off[0] = le64_to_cpu(off64[-1]);
1338 off[1] = le64_to_cpu(off64[0]);
1344 *vbo_data += off[0];
1345 *ondisk_size = off[1] - off[0];
1350 up_write(&ni->file.run_lock);
1356 * attr_is_frame_compressed - Used to detect compressed frame.
1358 int attr_is_frame_compressed(struct ntfs_inode *ni, struct ATTRIB *attr,
1359 CLST frame, CLST *clst_data)
1363 CLST clen, lcn, vcn, alen, slen, vcn_next;
1365 struct runs_tree *run;
1369 if (!is_attr_compressed(attr))
1375 clst_frame = 1u << attr->nres.c_unit;
1376 vcn = frame * clst_frame;
1377 run = &ni->file.run;
1379 if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) {
1380 err = attr_load_runs_vcn(ni, attr->type, attr_name(attr),
1381 attr->name_len, run, vcn);
1385 if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx))
1389 if (lcn == SPARSE_LCN) {
1390 /* Sparsed frame. */
1394 if (clen >= clst_frame) {
1396 * The frame is not compressed 'cause
1397 * it does not contain any sparse clusters.
1399 *clst_data = clst_frame;
1403 alen = bytes_to_cluster(ni->mi.sbi, le64_to_cpu(attr->nres.alloc_size));
1408 * The frame is compressed if *clst_data + slen >= clst_frame.
1409 * Check next fragments.
1411 while ((vcn += clen) < alen) {
1414 if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
1416 err = attr_load_runs_vcn(ni, attr->type,
1418 attr->name_len, run, vcn_next);
1423 if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx))
1427 if (lcn == SPARSE_LCN) {
1432 * Data_clusters + sparse_clusters =
1433 * not enough for frame.
1440 if (*clst_data + slen >= clst_frame) {
1443 * There is no sparsed clusters in this frame
1444 * so it is not compressed.
1446 *clst_data = clst_frame;
1448 /* Frame is compressed. */
1458 * attr_allocate_frame - Allocate/free clusters for @frame.
1460 * Assumed: down_write(&ni->file.run_lock);
1462 int attr_allocate_frame(struct ntfs_inode *ni, CLST frame, size_t compr_size,
1466 struct runs_tree *run = &ni->file.run;
1467 struct ntfs_sb_info *sbi = ni->mi.sbi;
1468 struct ATTRIB *attr = NULL, *attr_b;
1469 struct ATTR_LIST_ENTRY *le, *le_b;
1470 struct mft_inode *mi, *mi_b;
1471 CLST svcn, evcn1, next_svcn, lcn, len;
1472 CLST vcn, end, clst_data;
1473 u64 total_size, valid_size, data_size;
1476 attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
1480 if (!is_attr_ext(attr_b))
1483 vcn = frame << NTFS_LZNT_CUNIT;
1484 total_size = le64_to_cpu(attr_b->nres.total_size);
1486 svcn = le64_to_cpu(attr_b->nres.svcn);
1487 evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
1488 data_size = le64_to_cpu(attr_b->nres.data_size);
1490 if (svcn <= vcn && vcn < evcn1) {
1499 attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
1505 svcn = le64_to_cpu(attr->nres.svcn);
1506 evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
1509 err = attr_load_runs(attr, ni, run, NULL);
1513 err = attr_is_frame_compressed(ni, attr_b, frame, &clst_data);
1517 total_size -= (u64)clst_data << sbi->cluster_bits;
1519 len = bytes_to_cluster(sbi, compr_size);
1521 if (len == clst_data)
1524 if (len < clst_data) {
1525 err = run_deallocate_ex(sbi, run, vcn + len, clst_data - len,
1530 if (!run_add_entry(run, vcn + len, SPARSE_LCN, clst_data - len,
1535 end = vcn + clst_data;
1536 /* Run contains updated range [vcn + len : end). */
1538 CLST alen, hint = 0;
1539 /* Get the last LCN to allocate from. */
1540 if (vcn + clst_data &&
1541 !run_lookup_entry(run, vcn + clst_data - 1, &hint, NULL,
1546 err = attr_allocate_clusters(sbi, run, vcn + clst_data,
1547 hint + 1, len - clst_data, NULL, 0,
1553 /* Run contains updated range [vcn + clst_data : end). */
1556 total_size += (u64)len << sbi->cluster_bits;
1559 err = mi_pack_runs(mi, attr, run, max(end, evcn1) - svcn);
1563 attr_b->nres.total_size = cpu_to_le64(total_size);
1564 inode_set_bytes(&ni->vfs_inode, total_size);
1567 mark_inode_dirty(&ni->vfs_inode);
1569 /* Stored [vcn : next_svcn) from [vcn : end). */
1570 next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
1573 if (next_svcn == evcn1) {
1574 /* Normal way. Update attribute and exit. */
1577 /* Add new segment [next_svcn : evcn1 - next_svcn). */
1578 if (!ni->attr_list.size) {
1579 err = ni_create_attr_list(ni);
1582 /* Layout of records is changed. */
1584 attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL,
1600 /* Estimate next attribute. */
1601 attr = ni_find_attr(ni, attr, &le, ATTR_DATA, NULL, 0, &svcn, &mi);
1604 CLST alloc = bytes_to_cluster(
1605 sbi, le64_to_cpu(attr_b->nres.alloc_size));
1606 CLST evcn = le64_to_cpu(attr->nres.evcn);
1608 if (end < next_svcn)
1610 while (end > evcn) {
1611 /* Remove segment [svcn : evcn). */
1612 mi_remove_attr(NULL, mi, attr);
1614 if (!al_remove_le(ni, le)) {
1619 if (evcn + 1 >= alloc) {
1620 /* Last attribute segment. */
1625 if (ni_load_mi(ni, le, &mi)) {
1630 attr = mi_find_attr(mi, NULL, ATTR_DATA, NULL, 0,
1636 svcn = le64_to_cpu(attr->nres.svcn);
1637 evcn = le64_to_cpu(attr->nres.evcn);
1643 err = attr_load_runs(attr, ni, run, &end);
1648 attr->nres.svcn = cpu_to_le64(next_svcn);
1649 err = mi_pack_runs(mi, attr, run, evcn1 - next_svcn);
1653 le->vcn = cpu_to_le64(next_svcn);
1654 ni->attr_list.dirty = true;
1657 next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
1660 if (evcn1 > next_svcn) {
1661 err = ni_insert_nonresident(ni, ATTR_DATA, NULL, 0, run,
1662 next_svcn, evcn1 - next_svcn,
1663 attr_b->flags, &attr, &mi);
1668 run_truncate_around(run, vcn);
1670 if (new_valid > data_size)
1671 new_valid = data_size;
1673 valid_size = le64_to_cpu(attr_b->nres.valid_size);
1674 if (new_valid != valid_size) {
1675 attr_b->nres.valid_size = cpu_to_le64(valid_size);
1683 * attr_collapse_range - Collapse range in file.
1685 int attr_collapse_range(struct ntfs_inode *ni, u64 vbo, u64 bytes)
1688 struct runs_tree *run = &ni->file.run;
1689 struct ntfs_sb_info *sbi = ni->mi.sbi;
1690 struct ATTRIB *attr = NULL, *attr_b;
1691 struct ATTR_LIST_ENTRY *le, *le_b;
1692 struct mft_inode *mi, *mi_b;
1693 CLST svcn, evcn1, len, dealloc, alen;
1695 u64 valid_size, data_size, alloc_size, total_size;
1703 attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
1707 if (!attr_b->non_res) {
1708 /* Attribute is resident. Nothing to do? */
1712 data_size = le64_to_cpu(attr_b->nres.data_size);
1713 alloc_size = le64_to_cpu(attr_b->nres.alloc_size);
1714 a_flags = attr_b->flags;
1716 if (is_attr_ext(attr_b)) {
1717 total_size = le64_to_cpu(attr_b->nres.total_size);
1718 mask = (sbi->cluster_size << attr_b->nres.c_unit) - 1;
1720 total_size = alloc_size;
1721 mask = sbi->cluster_mask;
1724 if ((vbo & mask) || (bytes & mask)) {
1725 /* Allow to collapse only cluster aligned ranges. */
1729 if (vbo > data_size)
1732 down_write(&ni->file.run_lock);
1734 if (vbo + bytes >= data_size) {
1735 u64 new_valid = min(ni->i_valid, vbo);
1737 /* Simple truncate file at 'vbo'. */
1738 truncate_setsize(&ni->vfs_inode, vbo);
1739 err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run, vbo,
1740 &new_valid, true, NULL);
1742 if (!err && new_valid < ni->i_valid)
1743 ni->i_valid = new_valid;
1749 * Enumerate all attribute segments and collapse.
1751 alen = alloc_size >> sbi->cluster_bits;
1752 vcn = vbo >> sbi->cluster_bits;
1753 len = bytes >> sbi->cluster_bits;
1757 svcn = le64_to_cpu(attr_b->nres.svcn);
1758 evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
1760 if (svcn <= vcn && vcn < evcn1) {
1769 attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
1776 svcn = le64_to_cpu(attr->nres.svcn);
1777 evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
1783 attr->nres.svcn = cpu_to_le64(svcn - len);
1784 attr->nres.evcn = cpu_to_le64(evcn1 - 1 - len);
1786 le->vcn = attr->nres.svcn;
1787 ni->attr_list.dirty = true;
1790 } else if (svcn < vcn || end < evcn1) {
1791 CLST vcn1, eat, next_svcn;
1793 /* Collapse a part of this attribute segment. */
1794 err = attr_load_runs(attr, ni, run, &svcn);
1797 vcn1 = max(vcn, svcn);
1798 eat = min(end, evcn1) - vcn1;
1800 err = run_deallocate_ex(sbi, run, vcn1, eat, &dealloc,
1805 if (!run_collapse_range(run, vcn1, eat)) {
1812 attr->nres.svcn = cpu_to_le64(vcn);
1814 le->vcn = attr->nres.svcn;
1815 ni->attr_list.dirty = true;
1819 err = mi_pack_runs(mi, attr, run, evcn1 - svcn - eat);
1823 next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
1824 if (next_svcn + eat < evcn1) {
1825 err = ni_insert_nonresident(
1826 ni, ATTR_DATA, NULL, 0, run, next_svcn,
1827 evcn1 - eat - next_svcn, a_flags, &attr,
1832 /* Layout of records maybe changed. */
1834 le = al_find_ex(ni, NULL, ATTR_DATA, NULL, 0,
1842 /* Free all allocated memory. */
1843 run_truncate(run, 0);
1846 u16 roff = le16_to_cpu(attr->nres.run_off);
1848 if (roff > le32_to_cpu(attr->size)) {
1853 run_unpack_ex(RUN_DEALLOCATE, sbi, ni->mi.rno, svcn,
1854 evcn1 - 1, svcn, Add2Ptr(attr, roff),
1855 le32_to_cpu(attr->size) - roff);
1857 /* Delete this attribute segment. */
1858 mi_remove_attr(NULL, mi, attr);
1862 le_sz = le16_to_cpu(le->size);
1863 if (!al_remove_le(ni, le)) {
1872 /* Load next record that contains this attribute. */
1873 if (ni_load_mi(ni, le, &mi)) {
1878 /* Look for required attribute. */
1879 attr = mi_find_attr(mi, NULL, ATTR_DATA, NULL,
1887 le = (struct ATTR_LIST_ENTRY *)((u8 *)le - le_sz);
1893 attr = ni_enum_attr_ex(ni, attr, &le, &mi);
1900 svcn = le64_to_cpu(attr->nres.svcn);
1901 evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
1906 attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL,
1915 valid_size = ni->i_valid;
1916 if (vbo + bytes <= valid_size)
1917 valid_size -= bytes;
1918 else if (vbo < valid_size)
1921 attr_b->nres.alloc_size = cpu_to_le64(alloc_size - bytes);
1922 attr_b->nres.data_size = cpu_to_le64(data_size);
1923 attr_b->nres.valid_size = cpu_to_le64(min(valid_size, data_size));
1924 total_size -= (u64)dealloc << sbi->cluster_bits;
1925 if (is_attr_ext(attr_b))
1926 attr_b->nres.total_size = cpu_to_le64(total_size);
1929 /* Update inode size. */
1930 ni->i_valid = valid_size;
1931 ni->vfs_inode.i_size = data_size;
1932 inode_set_bytes(&ni->vfs_inode, total_size);
1933 ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
1934 mark_inode_dirty(&ni->vfs_inode);
1937 up_write(&ni->file.run_lock);
1939 make_bad_inode(&ni->vfs_inode);
1947 * Not for normal files.
1949 int attr_punch_hole(struct ntfs_inode *ni, u64 vbo, u64 bytes, u32 *frame_size)
1952 struct runs_tree *run = &ni->file.run;
1953 struct ntfs_sb_info *sbi = ni->mi.sbi;
1954 struct ATTRIB *attr = NULL, *attr_b;
1955 struct ATTR_LIST_ENTRY *le, *le_b;
1956 struct mft_inode *mi, *mi_b;
1957 CLST svcn, evcn1, vcn, len, end, alen, dealloc;
1958 u64 total_size, alloc_size;
1965 attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
1969 if (!attr_b->non_res) {
1970 u32 data_size = le32_to_cpu(attr_b->res.data_size);
1973 if (vbo > data_size)
1977 to = min_t(u64, vbo + bytes, data_size);
1978 memset(Add2Ptr(resident_data(attr_b), from), 0, to - from);
1982 if (!is_attr_ext(attr_b))
1985 alloc_size = le64_to_cpu(attr_b->nres.alloc_size);
1986 total_size = le64_to_cpu(attr_b->nres.total_size);
1988 if (vbo >= alloc_size) {
1989 /* NOTE: It is allowed. */
1993 mask = (sbi->cluster_size << attr_b->nres.c_unit) - 1;
1996 if (bytes > alloc_size)
2000 if ((vbo & mask) || (bytes & mask)) {
2001 /* We have to zero a range(s). */
2002 if (frame_size == NULL) {
2003 /* Caller insists range is aligned. */
2006 *frame_size = mask + 1;
2007 return E_NTFS_NOTALIGNED;
2010 down_write(&ni->file.run_lock);
2012 * Enumerate all attribute segments and punch hole where necessary.
2014 alen = alloc_size >> sbi->cluster_bits;
2015 vcn = vbo >> sbi->cluster_bits;
2016 len = bytes >> sbi->cluster_bits;
2020 svcn = le64_to_cpu(attr_b->nres.svcn);
2021 evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
2023 if (svcn <= vcn && vcn < evcn1) {
2032 attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
2039 svcn = le64_to_cpu(attr->nres.svcn);
2040 evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
2043 while (svcn < end) {
2044 CLST vcn1, zero, dealloc2;
2046 err = attr_load_runs(attr, ni, run, &svcn);
2049 vcn1 = max(vcn, svcn);
2050 zero = min(end, evcn1) - vcn1;
2053 err = run_deallocate_ex(sbi, run, vcn1, zero, &dealloc, true);
2057 if (dealloc2 == dealloc) {
2058 /* Looks like the required range is already sparsed. */
2060 if (!run_add_entry(run, vcn1, SPARSE_LCN, zero,
2066 err = mi_pack_runs(mi, attr, run, evcn1 - svcn);
2070 /* Free all allocated memory. */
2071 run_truncate(run, 0);
2076 attr = ni_enum_attr_ex(ni, attr, &le, &mi);
2082 svcn = le64_to_cpu(attr->nres.svcn);
2083 evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
2086 total_size -= (u64)dealloc << sbi->cluster_bits;
2087 attr_b->nres.total_size = cpu_to_le64(total_size);
2090 /* Update inode size. */
2091 inode_set_bytes(&ni->vfs_inode, total_size);
2092 ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
2093 mark_inode_dirty(&ni->vfs_inode);
2096 up_write(&ni->file.run_lock);
2098 make_bad_inode(&ni->vfs_inode);