1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
8 #include <linux/blkdev.h>
9 #include <linux/buffer_head.h>
11 #include <linux/kernel.h>
18 const struct cpu_str NAME_MFT = {
19 4, 0, { '$', 'M', 'F', 'T' },
21 const struct cpu_str NAME_MIRROR = {
22 8, 0, { '$', 'M', 'F', 'T', 'M', 'i', 'r', 'r' },
24 const struct cpu_str NAME_LOGFILE = {
25 8, 0, { '$', 'L', 'o', 'g', 'F', 'i', 'l', 'e' },
27 const struct cpu_str NAME_VOLUME = {
28 7, 0, { '$', 'V', 'o', 'l', 'u', 'm', 'e' },
30 const struct cpu_str NAME_ATTRDEF = {
31 8, 0, { '$', 'A', 't', 't', 'r', 'D', 'e', 'f' },
33 const struct cpu_str NAME_ROOT = {
36 const struct cpu_str NAME_BITMAP = {
37 7, 0, { '$', 'B', 'i', 't', 'm', 'a', 'p' },
39 const struct cpu_str NAME_BOOT = {
40 5, 0, { '$', 'B', 'o', 'o', 't' },
42 const struct cpu_str NAME_BADCLUS = {
43 8, 0, { '$', 'B', 'a', 'd', 'C', 'l', 'u', 's' },
45 const struct cpu_str NAME_QUOTA = {
46 6, 0, { '$', 'Q', 'u', 'o', 't', 'a' },
48 const struct cpu_str NAME_SECURE = {
49 7, 0, { '$', 'S', 'e', 'c', 'u', 'r', 'e' },
51 const struct cpu_str NAME_UPCASE = {
52 7, 0, { '$', 'U', 'p', 'C', 'a', 's', 'e' },
54 const struct cpu_str NAME_EXTEND = {
55 7, 0, { '$', 'E', 'x', 't', 'e', 'n', 'd' },
57 const struct cpu_str NAME_OBJID = {
58 6, 0, { '$', 'O', 'b', 'j', 'I', 'd' },
60 const struct cpu_str NAME_REPARSE = {
61 8, 0, { '$', 'R', 'e', 'p', 'a', 'r', 's', 'e' },
63 const struct cpu_str NAME_USNJRNL = {
64 8, 0, { '$', 'U', 's', 'n', 'J', 'r', 'n', 'l' },
66 const __le16 BAD_NAME[4] = {
67 cpu_to_le16('$'), cpu_to_le16('B'), cpu_to_le16('a'), cpu_to_le16('d'),
69 const __le16 I30_NAME[4] = {
70 cpu_to_le16('$'), cpu_to_le16('I'), cpu_to_le16('3'), cpu_to_le16('0'),
72 const __le16 SII_NAME[4] = {
73 cpu_to_le16('$'), cpu_to_le16('S'), cpu_to_le16('I'), cpu_to_le16('I'),
75 const __le16 SDH_NAME[4] = {
76 cpu_to_le16('$'), cpu_to_le16('S'), cpu_to_le16('D'), cpu_to_le16('H'),
78 const __le16 SDS_NAME[4] = {
79 cpu_to_le16('$'), cpu_to_le16('S'), cpu_to_le16('D'), cpu_to_le16('S'),
81 const __le16 SO_NAME[2] = {
82 cpu_to_le16('$'), cpu_to_le16('O'),
84 const __le16 SQ_NAME[2] = {
85 cpu_to_le16('$'), cpu_to_le16('Q'),
87 const __le16 SR_NAME[2] = {
88 cpu_to_le16('$'), cpu_to_le16('R'),
91 #ifdef CONFIG_NTFS3_LZX_XPRESS
92 const __le16 WOF_NAME[17] = {
93 cpu_to_le16('W'), cpu_to_le16('o'), cpu_to_le16('f'), cpu_to_le16('C'),
94 cpu_to_le16('o'), cpu_to_le16('m'), cpu_to_le16('p'), cpu_to_le16('r'),
95 cpu_to_le16('e'), cpu_to_le16('s'), cpu_to_le16('s'), cpu_to_le16('e'),
96 cpu_to_le16('d'), cpu_to_le16('D'), cpu_to_le16('a'), cpu_to_le16('t'),
104 * ntfs_fix_pre_write - Insert fixups into @rhdr before writing to disk.
106 bool ntfs_fix_pre_write(struct NTFS_RECORD_HEADER *rhdr, size_t bytes)
110 u16 fo = le16_to_cpu(rhdr->fix_off);
111 u16 fn = le16_to_cpu(rhdr->fix_num);
113 if ((fo & 1) || fo + fn * sizeof(short) > SECTOR_SIZE || !fn-- ||
114 fn * SECTOR_SIZE > bytes) {
118 /* Get fixup pointer. */
119 fixup = Add2Ptr(rhdr, fo);
121 if (*fixup >= 0x7FFF)
128 ptr = Add2Ptr(rhdr, SECTOR_SIZE - sizeof(short));
133 ptr += SECTOR_SIZE / sizeof(short);
139 * ntfs_fix_post_read - Remove fixups after reading from disk.
141 * Return: < 0 if error, 0 if ok, 1 if need to update fixups.
143 int ntfs_fix_post_read(struct NTFS_RECORD_HEADER *rhdr, size_t bytes,
150 fo = le16_to_cpu(rhdr->fix_off);
151 fn = simple ? ((bytes >> SECTOR_SHIFT) + 1)
152 : le16_to_cpu(rhdr->fix_num);
155 if ((fo & 1) || fo + fn * sizeof(short) > SECTOR_SIZE || !fn-- ||
156 fn * SECTOR_SIZE > bytes) {
157 return -E_NTFS_CORRUPT;
160 /* Get fixup pointer. */
161 fixup = Add2Ptr(rhdr, fo);
163 ptr = Add2Ptr(rhdr, SECTOR_SIZE - sizeof(short));
167 /* Test current word. */
168 if (*ptr != sample) {
169 /* Fixup does not match! Is it serious error? */
175 ptr += SECTOR_SIZE / sizeof(short);
182 * ntfs_extend_init - Load $Extend file.
184 int ntfs_extend_init(struct ntfs_sb_info *sbi)
187 struct super_block *sb = sbi->sb;
188 struct inode *inode, *inode2;
191 if (sbi->volume.major_ver < 3) {
192 ntfs_notice(sb, "Skip $Extend 'cause NTFS version");
196 ref.low = cpu_to_le32(MFT_REC_EXTEND);
198 ref.seq = cpu_to_le16(MFT_REC_EXTEND);
199 inode = ntfs_iget5(sb, &ref, &NAME_EXTEND);
201 err = PTR_ERR(inode);
202 ntfs_err(sb, "Failed to load $Extend.");
207 /* If ntfs_iget5() reads from disk it never returns bad inode. */
208 if (!S_ISDIR(inode->i_mode)) {
213 /* Try to find $ObjId */
214 inode2 = dir_search_u(inode, &NAME_OBJID, NULL);
215 if (inode2 && !IS_ERR(inode2)) {
216 if (is_bad_inode(inode2)) {
219 sbi->objid.ni = ntfs_i(inode2);
220 sbi->objid_no = inode2->i_ino;
224 /* Try to find $Quota */
225 inode2 = dir_search_u(inode, &NAME_QUOTA, NULL);
226 if (inode2 && !IS_ERR(inode2)) {
227 sbi->quota_no = inode2->i_ino;
231 /* Try to find $Reparse */
232 inode2 = dir_search_u(inode, &NAME_REPARSE, NULL);
233 if (inode2 && !IS_ERR(inode2)) {
234 sbi->reparse.ni = ntfs_i(inode2);
235 sbi->reparse_no = inode2->i_ino;
238 /* Try to find $UsnJrnl */
239 inode2 = dir_search_u(inode, &NAME_USNJRNL, NULL);
240 if (inode2 && !IS_ERR(inode2)) {
241 sbi->usn_jrnl_no = inode2->i_ino;
251 int ntfs_loadlog_and_replay(struct ntfs_inode *ni, struct ntfs_sb_info *sbi)
254 struct super_block *sb = sbi->sb;
255 bool initialized = false;
260 if (ni->vfs_inode.i_size >= 0x100000000ull) {
261 ntfs_err(sb, "\x24LogFile is too big");
266 sbi->flags |= NTFS_FLAGS_LOG_REPLAYING;
268 ref.low = cpu_to_le32(MFT_REC_MFT);
270 ref.seq = cpu_to_le16(1);
272 inode = ntfs_iget5(sb, &ref, NULL);
278 /* Try to use MFT copy. */
279 u64 t64 = sbi->mft.lbo;
281 sbi->mft.lbo = sbi->mft.lbo2;
282 inode = ntfs_iget5(sb, &ref, NULL);
290 ntfs_err(sb, "Failed to load $MFT.");
294 sbi->mft.ni = ntfs_i(inode);
296 /* LogFile should not contains attribute list. */
297 err = ni_load_all_mi(sbi->mft.ni);
299 err = log_replay(ni, &initialized);
304 sync_blockdev(sb->s_bdev);
305 invalidate_bdev(sb->s_bdev);
307 if (sbi->flags & NTFS_FLAGS_NEED_REPLAY) {
312 if (sb_rdonly(sb) || !initialized)
315 /* Fill LogFile by '-1' if it is initialized. */
316 err = ntfs_bio_fill_1(sbi, &ni->file.run);
319 sbi->flags &= ~NTFS_FLAGS_LOG_REPLAYING;
327 * Return: Current ATTR_DEF_ENTRY for given attribute type.
329 const struct ATTR_DEF_ENTRY *ntfs_query_def(struct ntfs_sb_info *sbi,
332 int type_in = le32_to_cpu(type);
334 size_t max_idx = sbi->def_entries - 1;
336 while (min_idx <= max_idx) {
337 size_t i = min_idx + ((max_idx - min_idx) >> 1);
338 const struct ATTR_DEF_ENTRY *entry = sbi->def_table + i;
339 int diff = le32_to_cpu(entry->type) - type_in;
354 * ntfs_look_for_free_space - Look for a free space in bitmap.
356 int ntfs_look_for_free_space(struct ntfs_sb_info *sbi, CLST lcn, CLST len,
357 CLST *new_lcn, CLST *new_len,
358 enum ALLOCATE_OPT opt)
362 struct super_block *sb = sbi->sb;
363 size_t alcn, zlen, zeroes, zlcn, zlen2, ztrim, new_zlen;
364 struct wnd_bitmap *wnd = &sbi->used.bitmap;
366 down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
367 if (opt & ALLOCATE_MFT) {
368 zlen = wnd_zone_len(wnd);
371 err = ntfs_refresh_zone(sbi);
375 zlen = wnd_zone_len(wnd);
379 ntfs_err(sbi->sb, "no free space to extend mft");
384 lcn = wnd_zone_bit(wnd);
385 alen = min_t(CLST, len, zlen);
387 wnd_zone_set(wnd, lcn + alen, zlen - alen);
389 err = wnd_set_used(wnd, lcn, alen);
397 * 'Cause cluster 0 is always used this value means that we should use
398 * cached value of 'next_free_lcn' to improve performance.
401 lcn = sbi->used.next_free_lcn;
403 if (lcn >= wnd->nbits)
406 alen = wnd_find(wnd, len, lcn, BITMAP_FIND_MARK_AS_USED, &alcn);
410 /* Try to use clusters from MftZone. */
411 zlen = wnd_zone_len(wnd);
412 zeroes = wnd_zeroes(wnd);
414 /* Check too big request */
415 if (len > zeroes + zlen || zlen <= NTFS_MIN_MFT_ZONE) {
420 /* How many clusters to cat from zone. */
421 zlcn = wnd_zone_bit(wnd);
423 ztrim = clamp_val(len, zlen2, zlen);
424 new_zlen = max_t(size_t, zlen - ztrim, NTFS_MIN_MFT_ZONE);
426 wnd_zone_set(wnd, zlcn, new_zlen);
428 /* Allocate continues clusters. */
429 alen = wnd_find(wnd, len, 0,
430 BITMAP_FIND_MARK_AS_USED | BITMAP_FIND_FULL, &alcn);
441 ntfs_unmap_meta(sb, alcn, alen);
443 /* Set hint for next requests. */
444 if (!(opt & ALLOCATE_MFT))
445 sbi->used.next_free_lcn = alcn + alen;
447 up_write(&wnd->rw_lock);
452 * ntfs_extend_mft - Allocate additional MFT records.
454 * sbi->mft.bitmap is locked for write.
457 * ntfs_look_free_mft ->
460 * ni_insert_nonresident ->
463 * ntfs_look_free_mft ->
466 * To avoid recursive always allocate space for two new MFT records
467 * see attrib.c: "at least two MFT to avoid recursive loop".
469 static int ntfs_extend_mft(struct ntfs_sb_info *sbi)
472 struct ntfs_inode *ni = sbi->mft.ni;
473 size_t new_mft_total;
474 u64 new_mft_bytes, new_bitmap_bytes;
476 struct wnd_bitmap *wnd = &sbi->mft.bitmap;
478 new_mft_total = (wnd->nbits + MFT_INCREASE_CHUNK + 127) & (CLST)~127;
479 new_mft_bytes = (u64)new_mft_total << sbi->record_bits;
481 /* Step 1: Resize $MFT::DATA. */
482 down_write(&ni->file.run_lock);
483 err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run,
484 new_mft_bytes, NULL, false, &attr);
487 up_write(&ni->file.run_lock);
491 attr->nres.valid_size = attr->nres.data_size;
492 new_mft_total = le64_to_cpu(attr->nres.alloc_size) >> sbi->record_bits;
495 /* Step 2: Resize $MFT::BITMAP. */
496 new_bitmap_bytes = bitmap_size(new_mft_total);
498 err = attr_set_size(ni, ATTR_BITMAP, NULL, 0, &sbi->mft.bitmap.run,
499 new_bitmap_bytes, &new_bitmap_bytes, true, NULL);
501 /* Refresh MFT Zone if necessary. */
502 down_write_nested(&sbi->used.bitmap.rw_lock, BITMAP_MUTEX_CLUSTERS);
504 ntfs_refresh_zone(sbi);
506 up_write(&sbi->used.bitmap.rw_lock);
507 up_write(&ni->file.run_lock);
512 err = wnd_extend(wnd, new_mft_total);
517 ntfs_clear_mft_tail(sbi, sbi->mft.used, new_mft_total);
519 err = _ni_write_inode(&ni->vfs_inode, 0);
525 * ntfs_look_free_mft - Look for a free MFT record.
527 int ntfs_look_free_mft(struct ntfs_sb_info *sbi, CLST *rno, bool mft,
528 struct ntfs_inode *ni, struct mft_inode **mi)
531 size_t zbit, zlen, from, to, fr;
534 struct super_block *sb = sbi->sb;
535 struct wnd_bitmap *wnd = &sbi->mft.bitmap;
538 static_assert(sizeof(sbi->mft.reserved_bitmap) * 8 >=
539 MFT_REC_FREE - MFT_REC_RESERVED);
542 down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_MFT);
544 zlen = wnd_zone_len(wnd);
546 /* Always reserve space for MFT. */
549 zbit = wnd_zone_bit(wnd);
551 wnd_zone_set(wnd, zbit + 1, zlen - 1);
556 /* No MFT zone. Find the nearest to '0' free MFT. */
557 if (!wnd_find(wnd, 1, MFT_REC_FREE, 0, &zbit)) {
559 mft_total = wnd->nbits;
561 err = ntfs_extend_mft(sbi);
567 if (!mft || MFT_REC_FREE == sbi->mft.next_reserved)
573 * Look for free record reserved area [11-16) ==
574 * [MFT_REC_RESERVED, MFT_REC_FREE ) MFT bitmap always
577 if (!sbi->mft.reserved_bitmap) {
578 /* Once per session create internal bitmap for 5 bits. */
579 sbi->mft.reserved_bitmap = 0xFF;
582 for (ir = MFT_REC_RESERVED; ir < MFT_REC_FREE; ir++) {
584 struct ntfs_inode *ni;
585 struct MFT_REC *mrec;
587 ref.low = cpu_to_le32(ir);
588 ref.seq = cpu_to_le16(ir);
590 i = ntfs_iget5(sb, &ref, NULL);
595 "Invalid reserved record %x",
599 if (is_bad_inode(i)) {
608 if (!is_rec_base(mrec))
611 if (mrec->hard_links)
617 if (ni_find_attr(ni, NULL, NULL, ATTR_NAME,
618 NULL, 0, NULL, NULL))
621 __clear_bit(ir - MFT_REC_RESERVED,
622 &sbi->mft.reserved_bitmap);
626 /* Scan 5 bits for zero. Bit 0 == MFT_REC_RESERVED */
627 zbit = find_next_zero_bit(&sbi->mft.reserved_bitmap,
628 MFT_REC_FREE, MFT_REC_RESERVED);
629 if (zbit >= MFT_REC_FREE) {
630 sbi->mft.next_reserved = MFT_REC_FREE;
635 sbi->mft.next_reserved = zbit;
638 zlen = zbit == MFT_REC_FREE ? (MFT_REC_USER - MFT_REC_FREE) : 4;
639 if (zbit + zlen > wnd->nbits)
640 zlen = wnd->nbits - zbit;
642 while (zlen > 1 && !wnd_is_free(wnd, zbit, zlen))
645 /* [zbit, zbit + zlen) will be used for MFT itself. */
646 from = sbi->mft.used;
651 ntfs_clear_mft_tail(sbi, from, to);
662 wnd_zone_set(wnd, zbit, zlen);
666 /* The request to get record for general purpose. */
667 if (sbi->mft.next_free < MFT_REC_USER)
668 sbi->mft.next_free = MFT_REC_USER;
671 if (sbi->mft.next_free >= sbi->mft.bitmap.nbits) {
672 } else if (!wnd_find(wnd, 1, MFT_REC_USER, 0, &fr)) {
673 sbi->mft.next_free = sbi->mft.bitmap.nbits;
676 sbi->mft.next_free = *rno + 1;
680 err = ntfs_extend_mft(sbi);
686 if (ni && !ni_add_subrecord(ni, *rno, mi)) {
691 /* We have found a record that are not reserved for next MFT. */
692 if (*rno >= MFT_REC_FREE)
693 wnd_set_used(wnd, *rno, 1);
694 else if (*rno >= MFT_REC_RESERVED && sbi->mft.reserved_bitmap_inited)
695 __set_bit(*rno - MFT_REC_RESERVED, &sbi->mft.reserved_bitmap);
699 up_write(&wnd->rw_lock);
705 * ntfs_mark_rec_free - Mark record as free.
707 void ntfs_mark_rec_free(struct ntfs_sb_info *sbi, CLST rno)
709 struct wnd_bitmap *wnd = &sbi->mft.bitmap;
711 down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_MFT);
712 if (rno >= wnd->nbits)
715 if (rno >= MFT_REC_FREE) {
716 if (!wnd_is_used(wnd, rno, 1))
717 ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
719 wnd_set_free(wnd, rno, 1);
720 } else if (rno >= MFT_REC_RESERVED && sbi->mft.reserved_bitmap_inited) {
721 __clear_bit(rno - MFT_REC_RESERVED, &sbi->mft.reserved_bitmap);
724 if (rno < wnd_zone_bit(wnd))
725 wnd_zone_set(wnd, rno, 1);
726 else if (rno < sbi->mft.next_free && rno >= MFT_REC_USER)
727 sbi->mft.next_free = rno;
730 up_write(&wnd->rw_lock);
734 * ntfs_clear_mft_tail - Format empty records [from, to).
736 * sbi->mft.bitmap is locked for write.
738 int ntfs_clear_mft_tail(struct ntfs_sb_info *sbi, size_t from, size_t to)
743 struct runs_tree *run;
744 struct ntfs_inode *ni;
749 rs = sbi->record_size;
753 down_read(&ni->file.run_lock);
754 vbo = (u64)from * rs;
755 for (; from < to; from++, vbo += rs) {
756 struct ntfs_buffers nb;
758 err = ntfs_get_bh(sbi, run, vbo, rs, &nb);
762 err = ntfs_write_bh(sbi, &sbi->new_rec->rhdr, &nb, 0);
769 sbi->mft.used = from;
770 up_read(&ni->file.run_lock);
775 * ntfs_refresh_zone - Refresh MFT zone.
777 * sbi->used.bitmap is locked for rw.
778 * sbi->mft.bitmap is locked for write.
779 * sbi->mft.ni->file.run_lock for write.
781 int ntfs_refresh_zone(struct ntfs_sb_info *sbi)
783 CLST zone_limit, zone_max, lcn, vcn, len;
785 struct wnd_bitmap *wnd = &sbi->used.bitmap;
786 struct ntfs_inode *ni = sbi->mft.ni;
788 /* Do not change anything unless we have non empty MFT zone. */
789 if (wnd_zone_len(wnd))
793 * Compute the MFT zone at two steps.
794 * It would be nice if we are able to allocate 1/8 of
795 * total clusters for MFT but not more then 512 MB.
797 zone_limit = (512 * 1024 * 1024) >> sbi->cluster_bits;
798 zone_max = wnd->nbits >> 3;
799 if (zone_max > zone_limit)
800 zone_max = zone_limit;
802 vcn = bytes_to_cluster(sbi,
803 (u64)sbi->mft.bitmap.nbits << sbi->record_bits);
805 if (!run_lookup_entry(&ni->file.run, vcn - 1, &lcn, &len, NULL))
808 /* We should always find Last Lcn for MFT. */
809 if (lcn == SPARSE_LCN)
814 /* Try to allocate clusters after last MFT run. */
815 zlen = wnd_find(wnd, zone_max, lcn_s, 0, &lcn_s);
817 ntfs_notice(sbi->sb, "MftZone: unavailable");
821 /* Truncate too large zone. */
822 wnd_zone_set(wnd, lcn_s, zlen);
828 * ntfs_update_mftmirr - Update $MFTMirr data.
830 int ntfs_update_mftmirr(struct ntfs_sb_info *sbi, int wait)
833 struct super_block *sb = sbi->sb;
835 sector_t block1, block2;
841 blocksize = sb->s_blocksize;
843 if (!(sbi->flags & NTFS_FLAGS_MFTMIRR))
847 bytes = sbi->mft.recs_mirr << sbi->record_bits;
848 block1 = sbi->mft.lbo >> sb->s_blocksize_bits;
849 block2 = sbi->mft.lbo2 >> sb->s_blocksize_bits;
851 for (; bytes >= blocksize; bytes -= blocksize) {
852 struct buffer_head *bh1, *bh2;
854 bh1 = sb_bread(sb, block1++);
860 bh2 = sb_getblk(sb, block2++);
867 if (buffer_locked(bh2))
868 __wait_on_buffer(bh2);
871 memcpy(bh2->b_data, bh1->b_data, blocksize);
872 set_buffer_uptodate(bh2);
873 mark_buffer_dirty(bh2);
880 err = sync_dirty_buffer(bh2);
887 sbi->flags &= ~NTFS_FLAGS_MFTMIRR;
896 * Mount: ntfs_set_state(NTFS_DIRTY_DIRTY)
897 * Umount: ntfs_set_state(NTFS_DIRTY_CLEAR)
898 * NTFS error: ntfs_set_state(NTFS_DIRTY_ERROR)
900 int ntfs_set_state(struct ntfs_sb_info *sbi, enum NTFS_DIRTY_FLAGS dirty)
904 struct VOLUME_INFO *info;
905 struct mft_inode *mi;
906 struct ntfs_inode *ni;
909 * Do not change state if fs was real_dirty.
910 * Do not change state if fs already dirty(clear).
911 * Do not change any thing if mounted read only.
913 if (sbi->volume.real_dirty || sb_rdonly(sbi->sb))
916 /* Check cached value. */
917 if ((dirty == NTFS_DIRTY_CLEAR ? 0 : VOLUME_FLAG_DIRTY) ==
918 (sbi->volume.flags & VOLUME_FLAG_DIRTY))
925 mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_DIRTY);
927 attr = ni_find_attr(ni, NULL, NULL, ATTR_VOL_INFO, NULL, 0, NULL, &mi);
933 info = resident_data_ex(attr, SIZEOF_ATTRIBUTE_VOLUME_INFO);
940 case NTFS_DIRTY_ERROR:
941 ntfs_notice(sbi->sb, "Mark volume as dirty due to NTFS errors");
942 sbi->volume.real_dirty = true;
944 case NTFS_DIRTY_DIRTY:
945 info->flags |= VOLUME_FLAG_DIRTY;
947 case NTFS_DIRTY_CLEAR:
948 info->flags &= ~VOLUME_FLAG_DIRTY;
951 /* Cache current volume flags. */
952 sbi->volume.flags = info->flags;
961 mark_inode_dirty(&ni->vfs_inode);
962 /* verify(!ntfs_update_mftmirr()); */
965 * If we used wait=1, sync_inode_metadata waits for the io for the
966 * inode to finish. It hangs when media is removed.
967 * So wait=0 is sent down to sync_inode_metadata
968 * and filemap_fdatawrite is used for the data blocks.
970 err = sync_inode_metadata(&ni->vfs_inode, 0);
972 err = filemap_fdatawrite(ni->vfs_inode.i_mapping);
978 * security_hash - Calculates a hash of security descriptor.
980 static inline __le32 security_hash(const void *sd, size_t bytes)
983 const __le32 *ptr = sd;
987 hash = ((hash >> 0x1D) | (hash << 3)) + le32_to_cpu(*ptr++);
988 return cpu_to_le32(hash);
991 int ntfs_sb_read(struct super_block *sb, u64 lbo, size_t bytes, void *buffer)
993 struct block_device *bdev = sb->s_bdev;
994 u32 blocksize = sb->s_blocksize;
995 u64 block = lbo >> sb->s_blocksize_bits;
996 u32 off = lbo & (blocksize - 1);
997 u32 op = blocksize - off;
999 for (; bytes; block += 1, off = 0, op = blocksize) {
1000 struct buffer_head *bh = __bread(bdev, block, blocksize);
1008 memcpy(buffer, bh->b_data + off, op);
1013 buffer = Add2Ptr(buffer, op);
1019 int ntfs_sb_write(struct super_block *sb, u64 lbo, size_t bytes,
1020 const void *buf, int wait)
1022 u32 blocksize = sb->s_blocksize;
1023 struct block_device *bdev = sb->s_bdev;
1024 sector_t block = lbo >> sb->s_blocksize_bits;
1025 u32 off = lbo & (blocksize - 1);
1026 u32 op = blocksize - off;
1027 struct buffer_head *bh;
1029 if (!wait && (sb->s_flags & SB_SYNCHRONOUS))
1032 for (; bytes; block += 1, off = 0, op = blocksize) {
1036 if (op < blocksize) {
1037 bh = __bread(bdev, block, blocksize);
1039 ntfs_err(sb, "failed to read block %llx",
1044 bh = __getblk(bdev, block, blocksize);
1049 if (buffer_locked(bh))
1050 __wait_on_buffer(bh);
1054 memcpy(bh->b_data + off, buf, op);
1055 buf = Add2Ptr(buf, op);
1057 memset(bh->b_data + off, -1, op);
1060 set_buffer_uptodate(bh);
1061 mark_buffer_dirty(bh);
1065 int err = sync_dirty_buffer(bh);
1070 "failed to sync buffer at block %llx, error %d",
1084 int ntfs_sb_write_run(struct ntfs_sb_info *sbi, const struct runs_tree *run,
1085 u64 vbo, const void *buf, size_t bytes, int sync)
1087 struct super_block *sb = sbi->sb;
1088 u8 cluster_bits = sbi->cluster_bits;
1089 u32 off = vbo & sbi->cluster_mask;
1090 CLST lcn, clen, vcn = vbo >> cluster_bits, vcn_next;
1094 if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx))
1097 if (lcn == SPARSE_LCN)
1100 lbo = ((u64)lcn << cluster_bits) + off;
1101 len = ((u64)clen << cluster_bits) - off;
1104 u32 op = min_t(u64, len, bytes);
1105 int err = ntfs_sb_write(sb, lbo, op, buf, sync);
1114 vcn_next = vcn + clen;
1115 if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
1119 if (lcn == SPARSE_LCN)
1123 buf = Add2Ptr(buf, op);
1125 lbo = ((u64)lcn << cluster_bits);
1126 len = ((u64)clen << cluster_bits);
1132 struct buffer_head *ntfs_bread_run(struct ntfs_sb_info *sbi,
1133 const struct runs_tree *run, u64 vbo)
1135 struct super_block *sb = sbi->sb;
1136 u8 cluster_bits = sbi->cluster_bits;
1140 if (!run_lookup_entry(run, vbo >> cluster_bits, &lcn, NULL, NULL))
1141 return ERR_PTR(-ENOENT);
1143 lbo = ((u64)lcn << cluster_bits) + (vbo & sbi->cluster_mask);
1145 return ntfs_bread(sb, lbo >> sb->s_blocksize_bits);
1148 int ntfs_read_run_nb(struct ntfs_sb_info *sbi, const struct runs_tree *run,
1149 u64 vbo, void *buf, u32 bytes, struct ntfs_buffers *nb)
1152 struct super_block *sb = sbi->sb;
1153 u32 blocksize = sb->s_blocksize;
1154 u8 cluster_bits = sbi->cluster_bits;
1155 u32 off = vbo & sbi->cluster_mask;
1157 CLST vcn_next, vcn = vbo >> cluster_bits;
1161 struct buffer_head *bh;
1164 /* First reading of $Volume + $MFTMirr + $LogFile goes here. */
1165 if (vbo > MFT_REC_VOL * sbi->record_size) {
1170 /* Use absolute boot's 'MFTCluster' to read record. */
1171 lbo = vbo + sbi->mft.lbo;
1172 len = sbi->record_size;
1173 } else if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) {
1177 if (lcn == SPARSE_LCN) {
1182 lbo = ((u64)lcn << cluster_bits) + off;
1183 len = ((u64)clen << cluster_bits) - off;
1186 off = lbo & (blocksize - 1);
1193 u32 len32 = len >= bytes ? bytes : len;
1194 sector_t block = lbo >> sb->s_blocksize_bits;
1197 u32 op = blocksize - off;
1202 bh = ntfs_bread(sb, block);
1209 memcpy(buf, bh->b_data + off, op);
1210 buf = Add2Ptr(buf, op);
1215 } else if (nbh >= ARRAY_SIZE(nb->bh)) {
1232 vcn_next = vcn + clen;
1233 if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
1239 if (lcn == SPARSE_LCN) {
1244 lbo = ((u64)lcn << cluster_bits);
1245 len = ((u64)clen << cluster_bits);
1253 put_bh(nb->bh[--nbh]);
1264 * Return: < 0 if error, 0 if ok, -E_NTFS_FIXUP if need to update fixups.
1266 int ntfs_read_bh(struct ntfs_sb_info *sbi, const struct runs_tree *run, u64 vbo,
1267 struct NTFS_RECORD_HEADER *rhdr, u32 bytes,
1268 struct ntfs_buffers *nb)
1270 int err = ntfs_read_run_nb(sbi, run, vbo, rhdr, bytes, nb);
1274 return ntfs_fix_post_read(rhdr, nb->bytes, true);
1277 int ntfs_get_bh(struct ntfs_sb_info *sbi, const struct runs_tree *run, u64 vbo,
1278 u32 bytes, struct ntfs_buffers *nb)
1281 struct super_block *sb = sbi->sb;
1282 u32 blocksize = sb->s_blocksize;
1283 u8 cluster_bits = sbi->cluster_bits;
1284 CLST vcn_next, vcn = vbo >> cluster_bits;
1293 if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) {
1298 off = vbo & sbi->cluster_mask;
1299 lbo = ((u64)lcn << cluster_bits) + off;
1300 len = ((u64)clen << cluster_bits) - off;
1302 nb->off = off = lbo & (blocksize - 1);
1305 u32 len32 = min_t(u64, len, bytes);
1306 sector_t block = lbo >> sb->s_blocksize_bits;
1310 struct buffer_head *bh;
1312 if (nbh >= ARRAY_SIZE(nb->bh)) {
1317 op = blocksize - off;
1321 if (op == blocksize) {
1322 bh = sb_getblk(sb, block);
1327 if (buffer_locked(bh))
1328 __wait_on_buffer(bh);
1329 set_buffer_uptodate(bh);
1331 bh = ntfs_bread(sb, block);
1350 vcn_next = vcn + clen;
1351 if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
1357 lbo = ((u64)lcn << cluster_bits);
1358 len = ((u64)clen << cluster_bits);
1363 put_bh(nb->bh[--nbh]);
1372 int ntfs_write_bh(struct ntfs_sb_info *sbi, struct NTFS_RECORD_HEADER *rhdr,
1373 struct ntfs_buffers *nb, int sync)
1376 struct super_block *sb = sbi->sb;
1377 u32 block_size = sb->s_blocksize;
1378 u32 bytes = nb->bytes;
1380 u16 fo = le16_to_cpu(rhdr->fix_off);
1381 u16 fn = le16_to_cpu(rhdr->fix_num);
1386 if ((fo & 1) || fo + fn * sizeof(short) > SECTOR_SIZE || !fn-- ||
1387 fn * SECTOR_SIZE > bytes) {
1391 for (idx = 0; bytes && idx < nb->nbufs; idx += 1, off = 0) {
1392 u32 op = block_size - off;
1394 struct buffer_head *bh = nb->bh[idx];
1395 __le16 *ptr, *end_data;
1400 if (buffer_locked(bh))
1401 __wait_on_buffer(bh);
1403 lock_buffer(nb->bh[idx]);
1405 bh_data = bh->b_data + off;
1406 end_data = Add2Ptr(bh_data, op);
1407 memcpy(bh_data, rhdr, op);
1412 fixup = Add2Ptr(bh_data, fo);
1414 t16 = le16_to_cpu(sample);
1415 if (t16 >= 0x7FFF) {
1416 sample = *fixup = cpu_to_le16(1);
1418 sample = cpu_to_le16(t16 + 1);
1422 *(__le16 *)Add2Ptr(rhdr, fo) = sample;
1425 ptr = Add2Ptr(bh_data, SECTOR_SIZE - sizeof(short));
1430 ptr += SECTOR_SIZE / sizeof(short);
1431 } while (ptr < end_data);
1433 set_buffer_uptodate(bh);
1434 mark_buffer_dirty(bh);
1438 int err2 = sync_dirty_buffer(bh);
1445 rhdr = Add2Ptr(rhdr, op);
1451 static inline struct bio *ntfs_alloc_bio(u32 nr_vecs)
1453 struct bio *bio = bio_alloc(GFP_NOFS | __GFP_HIGH, nr_vecs);
1455 if (!bio && (current->flags & PF_MEMALLOC)) {
1456 while (!bio && (nr_vecs /= 2))
1457 bio = bio_alloc(GFP_NOFS | __GFP_HIGH, nr_vecs);
1463 * ntfs_bio_pages - Read/write pages from/to disk.
1465 int ntfs_bio_pages(struct ntfs_sb_info *sbi, const struct runs_tree *run,
1466 struct page **pages, u32 nr_pages, u64 vbo, u32 bytes,
1470 struct bio *new, *bio = NULL;
1471 struct super_block *sb = sbi->sb;
1472 struct block_device *bdev = sb->s_bdev;
1474 u8 cluster_bits = sbi->cluster_bits;
1475 CLST lcn, clen, vcn, vcn_next;
1476 u32 add, off, page_idx;
1479 struct blk_plug plug;
1484 blk_start_plug(&plug);
1486 /* Align vbo and bytes to be 512 bytes aligned. */
1487 lbo = (vbo + bytes + 511) & ~511ull;
1488 vbo = vbo & ~511ull;
1491 vcn = vbo >> cluster_bits;
1492 if (!run_lookup_entry(run, vcn, &lcn, &clen, &run_idx)) {
1496 off = vbo & sbi->cluster_mask;
1501 lbo = ((u64)lcn << cluster_bits) + off;
1502 len = ((u64)clen << cluster_bits) - off;
1504 new = ntfs_alloc_bio(nr_pages - page_idx);
1510 bio_chain(bio, new);
1514 bio_set_dev(bio, bdev);
1515 bio->bi_iter.bi_sector = lbo >> 9;
1519 off = vbo & (PAGE_SIZE - 1);
1520 add = off + len > PAGE_SIZE ? (PAGE_SIZE - off) : len;
1522 if (bio_add_page(bio, page, add, off) < add)
1530 if (add + off == PAGE_SIZE) {
1532 if (WARN_ON(page_idx >= nr_pages)) {
1536 page = pages[page_idx];
1545 vcn_next = vcn + clen;
1546 if (!run_get_entry(run, ++run_idx, &vcn, &lcn, &clen) ||
1556 err = submit_bio_wait(bio);
1559 blk_finish_plug(&plug);
1565 * ntfs_bio_fill_1 - Helper for ntfs_loadlog_and_replay().
1567 * Fill on-disk logfile range by (-1)
1568 * this means empty logfile.
1570 int ntfs_bio_fill_1(struct ntfs_sb_info *sbi, const struct runs_tree *run)
1573 struct super_block *sb = sbi->sb;
1574 struct block_device *bdev = sb->s_bdev;
1575 u8 cluster_bits = sbi->cluster_bits;
1576 struct bio *new, *bio = NULL;
1582 struct blk_plug plug;
1584 fill = alloc_page(GFP_KERNEL);
1588 kaddr = kmap_atomic(fill);
1589 memset(kaddr, -1, PAGE_SIZE);
1590 kunmap_atomic(kaddr);
1591 flush_dcache_page(fill);
1594 if (!run_lookup_entry(run, 0, &lcn, &clen, &run_idx)) {
1600 * TODO: Try blkdev_issue_write_same.
1602 blk_start_plug(&plug);
1604 lbo = (u64)lcn << cluster_bits;
1605 len = (u64)clen << cluster_bits;
1607 new = ntfs_alloc_bio(BIO_MAX_VECS);
1613 bio_chain(bio, new);
1617 bio_set_dev(bio, bdev);
1618 bio->bi_opf = REQ_OP_WRITE;
1619 bio->bi_iter.bi_sector = lbo >> 9;
1622 u32 add = len > PAGE_SIZE ? PAGE_SIZE : len;
1624 if (bio_add_page(bio, fill, add, 0) < add)
1632 } while (run_get_entry(run, ++run_idx, NULL, &lcn, &clen));
1636 err = submit_bio_wait(bio);
1639 blk_finish_plug(&plug);
1647 int ntfs_vbo_to_lbo(struct ntfs_sb_info *sbi, const struct runs_tree *run,
1648 u64 vbo, u64 *lbo, u64 *bytes)
1652 u8 cluster_bits = sbi->cluster_bits;
1654 if (!run_lookup_entry(run, vbo >> cluster_bits, &lcn, &len, NULL))
1657 off = vbo & sbi->cluster_mask;
1658 *lbo = lcn == SPARSE_LCN ? -1 : (((u64)lcn << cluster_bits) + off);
1659 *bytes = ((u64)len << cluster_bits) - off;
1664 struct ntfs_inode *ntfs_new_inode(struct ntfs_sb_info *sbi, CLST rno, bool dir)
1667 struct super_block *sb = sbi->sb;
1668 struct inode *inode = new_inode(sb);
1669 struct ntfs_inode *ni;
1672 return ERR_PTR(-ENOMEM);
1676 err = mi_format_new(&ni->mi, sbi, rno, dir ? RECORD_FLAG_DIR : 0,
1682 if (insert_inode_locked(inode) < 0) {
1689 make_bad_inode(inode);
1697 * O:BAG:BAD:(A;OICI;FA;;;WD)
1698 * Owner S-1-5-32-544 (Administrators)
1699 * Group S-1-5-32-544 (Administrators)
1700 * ACE: allow S-1-1-0 (Everyone) with FILE_ALL_ACCESS
1702 const u8 s_default_security[] __aligned(8) = {
1703 0x01, 0x00, 0x04, 0x80, 0x30, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00,
1704 0x00, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x02, 0x00, 0x1C, 0x00,
1705 0x01, 0x00, 0x00, 0x00, 0x00, 0x03, 0x14, 0x00, 0xFF, 0x01, 0x1F, 0x00,
1706 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
1707 0x01, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x20, 0x00, 0x00, 0x00,
1708 0x20, 0x02, 0x00, 0x00, 0x01, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05,
1709 0x20, 0x00, 0x00, 0x00, 0x20, 0x02, 0x00, 0x00,
1712 static_assert(sizeof(s_default_security) == 0x50);
1714 static inline u32 sid_length(const struct SID *sid)
1716 return struct_size(sid, SubAuthority, sid->SubAuthorityCount);
1722 * Thanks Mark Harmstone for idea.
1724 static bool is_acl_valid(const struct ACL *acl, u32 len)
1726 const struct ACE_HEADER *ace;
1728 u16 ace_count, ace_size;
1730 if (acl->AclRevision != ACL_REVISION &&
1731 acl->AclRevision != ACL_REVISION_DS) {
1733 * This value should be ACL_REVISION, unless the ACL contains an
1734 * object-specific ACE, in which case this value must be ACL_REVISION_DS.
1735 * All ACEs in an ACL must be at the same revision level.
1743 if (le16_to_cpu(acl->AclSize) > len)
1749 len -= sizeof(struct ACL);
1750 ace = (struct ACE_HEADER *)&acl[1];
1751 ace_count = le16_to_cpu(acl->AceCount);
1753 for (i = 0; i < ace_count; i++) {
1754 if (len < sizeof(struct ACE_HEADER))
1757 ace_size = le16_to_cpu(ace->AceSize);
1762 ace = Add2Ptr(ace, ace_size);
1768 bool is_sd_valid(const struct SECURITY_DESCRIPTOR_RELATIVE *sd, u32 len)
1770 u32 sd_owner, sd_group, sd_sacl, sd_dacl;
1772 if (len < sizeof(struct SECURITY_DESCRIPTOR_RELATIVE))
1775 if (sd->Revision != 1)
1781 if (!(sd->Control & SE_SELF_RELATIVE))
1784 sd_owner = le32_to_cpu(sd->Owner);
1786 const struct SID *owner = Add2Ptr(sd, sd_owner);
1788 if (sd_owner + offsetof(struct SID, SubAuthority) > len)
1791 if (owner->Revision != 1)
1794 if (sd_owner + sid_length(owner) > len)
1798 sd_group = le32_to_cpu(sd->Group);
1800 const struct SID *group = Add2Ptr(sd, sd_group);
1802 if (sd_group + offsetof(struct SID, SubAuthority) > len)
1805 if (group->Revision != 1)
1808 if (sd_group + sid_length(group) > len)
1812 sd_sacl = le32_to_cpu(sd->Sacl);
1814 const struct ACL *sacl = Add2Ptr(sd, sd_sacl);
1816 if (sd_sacl + sizeof(struct ACL) > len)
1819 if (!is_acl_valid(sacl, len - sd_sacl))
1823 sd_dacl = le32_to_cpu(sd->Dacl);
1825 const struct ACL *dacl = Add2Ptr(sd, sd_dacl);
1827 if (sd_dacl + sizeof(struct ACL) > len)
1830 if (!is_acl_valid(dacl, len - sd_dacl))
1838 * ntfs_security_init - Load and parse $Secure.
1840 int ntfs_security_init(struct ntfs_sb_info *sbi)
1843 struct super_block *sb = sbi->sb;
1844 struct inode *inode;
1845 struct ntfs_inode *ni;
1847 struct ATTRIB *attr;
1848 struct ATTR_LIST_ENTRY *le;
1852 struct NTFS_DE_SII *sii_e;
1853 struct ntfs_fnd *fnd_sii = NULL;
1854 const struct INDEX_ROOT *root_sii;
1855 const struct INDEX_ROOT *root_sdh;
1856 struct ntfs_index *indx_sdh = &sbi->security.index_sdh;
1857 struct ntfs_index *indx_sii = &sbi->security.index_sii;
1859 ref.low = cpu_to_le32(MFT_REC_SECURE);
1861 ref.seq = cpu_to_le16(MFT_REC_SECURE);
1863 inode = ntfs_iget5(sb, &ref, &NAME_SECURE);
1864 if (IS_ERR(inode)) {
1865 err = PTR_ERR(inode);
1866 ntfs_err(sb, "Failed to load $Secure.");
1875 attr = ni_find_attr(ni, NULL, &le, ATTR_ROOT, SDH_NAME,
1876 ARRAY_SIZE(SDH_NAME), NULL, NULL);
1882 root_sdh = resident_data_ex(attr, sizeof(struct INDEX_ROOT));
1883 if (root_sdh->type != ATTR_ZERO ||
1884 root_sdh->rule != NTFS_COLLATION_TYPE_SECURITY_HASH ||
1885 offsetof(struct INDEX_ROOT, ihdr) + root_sdh->ihdr.used > attr->res.data_size) {
1890 err = indx_init(indx_sdh, sbi, attr, INDEX_MUTEX_SDH);
1894 attr = ni_find_attr(ni, attr, &le, ATTR_ROOT, SII_NAME,
1895 ARRAY_SIZE(SII_NAME), NULL, NULL);
1901 root_sii = resident_data_ex(attr, sizeof(struct INDEX_ROOT));
1902 if (root_sii->type != ATTR_ZERO ||
1903 root_sii->rule != NTFS_COLLATION_TYPE_UINT ||
1904 offsetof(struct INDEX_ROOT, ihdr) + root_sii->ihdr.used > attr->res.data_size) {
1909 err = indx_init(indx_sii, sbi, attr, INDEX_MUTEX_SII);
1913 fnd_sii = fnd_get();
1919 sds_size = inode->i_size;
1921 /* Find the last valid Id. */
1922 sbi->security.next_id = SECURITY_ID_FIRST;
1923 /* Always write new security at the end of bucket. */
1924 sbi->security.next_off =
1925 ALIGN(sds_size - SecurityDescriptorsBlockSize, 16);
1933 err = indx_find_raw(indx_sii, ni, root_sii, &ne, &off, fnd_sii);
1937 sii_e = (struct NTFS_DE_SII *)ne;
1938 if (le16_to_cpu(ne->view.data_size) < SIZEOF_SECURITY_HDR)
1941 next_id = le32_to_cpu(sii_e->sec_id) + 1;
1942 if (next_id >= sbi->security.next_id)
1943 sbi->security.next_id = next_id;
1946 sbi->security.ni = ni;
1956 * ntfs_get_security_by_id - Read security descriptor by id.
1958 int ntfs_get_security_by_id(struct ntfs_sb_info *sbi, __le32 security_id,
1959 struct SECURITY_DESCRIPTOR_RELATIVE **sd,
1964 struct ntfs_inode *ni = sbi->security.ni;
1965 struct ntfs_index *indx = &sbi->security.index_sii;
1967 struct NTFS_DE_SII *sii_e;
1968 struct ntfs_fnd *fnd_sii;
1969 struct SECURITY_HDR d_security;
1970 const struct INDEX_ROOT *root_sii;
1975 mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_SECURITY);
1977 fnd_sii = fnd_get();
1983 root_sii = indx_get_root(indx, ni, NULL, NULL);
1989 /* Try to find this SECURITY descriptor in SII indexes. */
1990 err = indx_find(indx, ni, root_sii, &security_id, sizeof(security_id),
1991 NULL, &diff, (struct NTFS_DE **)&sii_e, fnd_sii);
1998 t32 = le32_to_cpu(sii_e->sec_hdr.size);
1999 if (t32 < SIZEOF_SECURITY_HDR) {
2004 if (t32 > SIZEOF_SECURITY_HDR + 0x10000) {
2005 /* Looks like too big security. 0x10000 - is arbitrary big number. */
2010 *size = t32 - SIZEOF_SECURITY_HDR;
2012 p = kmalloc(*size, GFP_NOFS);
2018 err = ntfs_read_run_nb(sbi, &ni->file.run,
2019 le64_to_cpu(sii_e->sec_hdr.off), &d_security,
2020 sizeof(d_security), NULL);
2024 if (memcmp(&d_security, &sii_e->sec_hdr, SIZEOF_SECURITY_HDR)) {
2029 err = ntfs_read_run_nb(sbi, &ni->file.run,
2030 le64_to_cpu(sii_e->sec_hdr.off) +
2031 SIZEOF_SECURITY_HDR,
2048 * ntfs_insert_security - Insert security descriptor into $Secure::SDS.
2050 * SECURITY Descriptor Stream data is organized into chunks of 256K bytes
2051 * and it contains a mirror copy of each security descriptor. When writing
2052 * to a security descriptor at location X, another copy will be written at
2053 * location (X+256K).
2054 * When writing a security descriptor that will cross the 256K boundary,
2055 * the pointer will be advanced by 256K to skip
2056 * over the mirror portion.
2058 int ntfs_insert_security(struct ntfs_sb_info *sbi,
2059 const struct SECURITY_DESCRIPTOR_RELATIVE *sd,
2060 u32 size_sd, __le32 *security_id, bool *inserted)
2063 struct ntfs_inode *ni = sbi->security.ni;
2064 struct ntfs_index *indx_sdh = &sbi->security.index_sdh;
2065 struct ntfs_index *indx_sii = &sbi->security.index_sii;
2066 struct NTFS_DE_SDH *e;
2067 struct NTFS_DE_SDH sdh_e;
2068 struct NTFS_DE_SII sii_e;
2069 struct SECURITY_HDR *d_security;
2070 u32 new_sec_size = size_sd + SIZEOF_SECURITY_HDR;
2071 u32 aligned_sec_size = ALIGN(new_sec_size, 16);
2072 struct SECURITY_KEY hash_key;
2073 struct ntfs_fnd *fnd_sdh = NULL;
2074 const struct INDEX_ROOT *root_sdh;
2075 const struct INDEX_ROOT *root_sii;
2076 u64 mirr_off, new_sds_size;
2079 static_assert((1 << Log2OfSecurityDescriptorsBlockSize) ==
2080 SecurityDescriptorsBlockSize);
2082 hash_key.hash = security_hash(sd, size_sd);
2083 hash_key.sec_id = SECURITY_ID_INVALID;
2087 *security_id = SECURITY_ID_INVALID;
2089 /* Allocate a temporal buffer. */
2090 d_security = kzalloc(aligned_sec_size, GFP_NOFS);
2094 mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_SECURITY);
2096 fnd_sdh = fnd_get();
2102 root_sdh = indx_get_root(indx_sdh, ni, NULL, NULL);
2108 root_sii = indx_get_root(indx_sii, ni, NULL, NULL);
2115 * Check if such security already exists.
2116 * Use "SDH" and hash -> to get the offset in "SDS".
2118 err = indx_find(indx_sdh, ni, root_sdh, &hash_key, sizeof(hash_key),
2119 &d_security->key.sec_id, &diff, (struct NTFS_DE **)&e,
2125 if (le32_to_cpu(e->sec_hdr.size) == new_sec_size) {
2126 err = ntfs_read_run_nb(sbi, &ni->file.run,
2127 le64_to_cpu(e->sec_hdr.off),
2128 d_security, new_sec_size, NULL);
2132 if (le32_to_cpu(d_security->size) == new_sec_size &&
2133 d_security->key.hash == hash_key.hash &&
2134 !memcmp(d_security + 1, sd, size_sd)) {
2135 *security_id = d_security->key.sec_id;
2136 /* Such security already exists. */
2142 err = indx_find_sort(indx_sdh, ni, root_sdh,
2143 (struct NTFS_DE **)&e, fnd_sdh);
2147 if (!e || e->key.hash != hash_key.hash)
2151 /* Zero unused space. */
2152 next = sbi->security.next_off & (SecurityDescriptorsBlockSize - 1);
2153 left = SecurityDescriptorsBlockSize - next;
2155 /* Zero gap until SecurityDescriptorsBlockSize. */
2156 if (left < new_sec_size) {
2157 /* Zero "left" bytes from sbi->security.next_off. */
2158 sbi->security.next_off += SecurityDescriptorsBlockSize + left;
2161 /* Zero tail of previous security. */
2162 //used = ni->vfs_inode.i_size & (SecurityDescriptorsBlockSize - 1);
2166 * 0x40438 == ni->vfs_inode.i_size
2167 * 0x00440 == sbi->security.next_off
2168 * need to zero [0x438-0x440)
2169 * if (next > used) {
2170 * u32 tozero = next - used;
2171 * zero "tozero" bytes from sbi->security.next_off - tozero
2174 /* Format new security descriptor. */
2175 d_security->key.hash = hash_key.hash;
2176 d_security->key.sec_id = cpu_to_le32(sbi->security.next_id);
2177 d_security->off = cpu_to_le64(sbi->security.next_off);
2178 d_security->size = cpu_to_le32(new_sec_size);
2179 memcpy(d_security + 1, sd, size_sd);
2181 /* Write main SDS bucket. */
2182 err = ntfs_sb_write_run(sbi, &ni->file.run, sbi->security.next_off,
2183 d_security, aligned_sec_size, 0);
2188 mirr_off = sbi->security.next_off + SecurityDescriptorsBlockSize;
2189 new_sds_size = mirr_off + aligned_sec_size;
2191 if (new_sds_size > ni->vfs_inode.i_size) {
2192 err = attr_set_size(ni, ATTR_DATA, SDS_NAME,
2193 ARRAY_SIZE(SDS_NAME), &ni->file.run,
2194 new_sds_size, &new_sds_size, false, NULL);
2199 /* Write copy SDS bucket. */
2200 err = ntfs_sb_write_run(sbi, &ni->file.run, mirr_off, d_security,
2201 aligned_sec_size, 0);
2205 /* Fill SII entry. */
2206 sii_e.de.view.data_off =
2207 cpu_to_le16(offsetof(struct NTFS_DE_SII, sec_hdr));
2208 sii_e.de.view.data_size = cpu_to_le16(SIZEOF_SECURITY_HDR);
2209 sii_e.de.view.res = 0;
2210 sii_e.de.size = cpu_to_le16(SIZEOF_SII_DIRENTRY);
2211 sii_e.de.key_size = cpu_to_le16(sizeof(d_security->key.sec_id));
2214 sii_e.sec_id = d_security->key.sec_id;
2215 memcpy(&sii_e.sec_hdr, d_security, SIZEOF_SECURITY_HDR);
2217 err = indx_insert_entry(indx_sii, ni, &sii_e.de, NULL, NULL, 0);
2221 /* Fill SDH entry. */
2222 sdh_e.de.view.data_off =
2223 cpu_to_le16(offsetof(struct NTFS_DE_SDH, sec_hdr));
2224 sdh_e.de.view.data_size = cpu_to_le16(SIZEOF_SECURITY_HDR);
2225 sdh_e.de.view.res = 0;
2226 sdh_e.de.size = cpu_to_le16(SIZEOF_SDH_DIRENTRY);
2227 sdh_e.de.key_size = cpu_to_le16(sizeof(sdh_e.key));
2230 sdh_e.key.hash = d_security->key.hash;
2231 sdh_e.key.sec_id = d_security->key.sec_id;
2232 memcpy(&sdh_e.sec_hdr, d_security, SIZEOF_SECURITY_HDR);
2233 sdh_e.magic[0] = cpu_to_le16('I');
2234 sdh_e.magic[1] = cpu_to_le16('I');
2237 err = indx_insert_entry(indx_sdh, ni, &sdh_e.de, (void *)(size_t)1,
2242 *security_id = d_security->key.sec_id;
2246 /* Update Id and offset for next descriptor. */
2247 sbi->security.next_id += 1;
2248 sbi->security.next_off += aligned_sec_size;
2252 mark_inode_dirty(&ni->vfs_inode);
2260 * ntfs_reparse_init - Load and parse $Extend/$Reparse.
2262 int ntfs_reparse_init(struct ntfs_sb_info *sbi)
2265 struct ntfs_inode *ni = sbi->reparse.ni;
2266 struct ntfs_index *indx = &sbi->reparse.index_r;
2267 struct ATTRIB *attr;
2268 struct ATTR_LIST_ENTRY *le;
2269 const struct INDEX_ROOT *root_r;
2275 attr = ni_find_attr(ni, NULL, &le, ATTR_ROOT, SR_NAME,
2276 ARRAY_SIZE(SR_NAME), NULL, NULL);
2282 root_r = resident_data(attr);
2283 if (root_r->type != ATTR_ZERO ||
2284 root_r->rule != NTFS_COLLATION_TYPE_UINTS) {
2289 err = indx_init(indx, sbi, attr, INDEX_MUTEX_SR);
2298 * ntfs_objid_init - Load and parse $Extend/$ObjId.
2300 int ntfs_objid_init(struct ntfs_sb_info *sbi)
2303 struct ntfs_inode *ni = sbi->objid.ni;
2304 struct ntfs_index *indx = &sbi->objid.index_o;
2305 struct ATTRIB *attr;
2306 struct ATTR_LIST_ENTRY *le;
2307 const struct INDEX_ROOT *root;
2313 attr = ni_find_attr(ni, NULL, &le, ATTR_ROOT, SO_NAME,
2314 ARRAY_SIZE(SO_NAME), NULL, NULL);
2320 root = resident_data(attr);
2321 if (root->type != ATTR_ZERO ||
2322 root->rule != NTFS_COLLATION_TYPE_UINTS) {
2327 err = indx_init(indx, sbi, attr, INDEX_MUTEX_SO);
2335 int ntfs_objid_remove(struct ntfs_sb_info *sbi, struct GUID *guid)
2338 struct ntfs_inode *ni = sbi->objid.ni;
2339 struct ntfs_index *indx = &sbi->objid.index_o;
2344 mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_OBJID);
2346 err = indx_delete_entry(indx, ni, guid, sizeof(*guid), NULL);
2348 mark_inode_dirty(&ni->vfs_inode);
2354 int ntfs_insert_reparse(struct ntfs_sb_info *sbi, __le32 rtag,
2355 const struct MFT_REF *ref)
2358 struct ntfs_inode *ni = sbi->reparse.ni;
2359 struct ntfs_index *indx = &sbi->reparse.index_r;
2360 struct NTFS_DE_R re;
2365 memset(&re, 0, sizeof(re));
2367 re.de.view.data_off = cpu_to_le16(offsetof(struct NTFS_DE_R, zero));
2368 re.de.size = cpu_to_le16(sizeof(struct NTFS_DE_R));
2369 re.de.key_size = cpu_to_le16(sizeof(re.key));
2371 re.key.ReparseTag = rtag;
2372 memcpy(&re.key.ref, ref, sizeof(*ref));
2374 mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_REPARSE);
2376 err = indx_insert_entry(indx, ni, &re.de, NULL, NULL, 0);
2378 mark_inode_dirty(&ni->vfs_inode);
2384 int ntfs_remove_reparse(struct ntfs_sb_info *sbi, __le32 rtag,
2385 const struct MFT_REF *ref)
2388 struct ntfs_inode *ni = sbi->reparse.ni;
2389 struct ntfs_index *indx = &sbi->reparse.index_r;
2390 struct ntfs_fnd *fnd = NULL;
2391 struct REPARSE_KEY rkey;
2392 struct NTFS_DE_R *re;
2393 struct INDEX_ROOT *root_r;
2398 rkey.ReparseTag = rtag;
2401 mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_REPARSE);
2404 err = indx_delete_entry(indx, ni, &rkey, sizeof(rkey), NULL);
2414 root_r = indx_get_root(indx, ni, NULL, NULL);
2420 /* 1 - forces to ignore rkey.ReparseTag when comparing keys. */
2421 err = indx_find(indx, ni, root_r, &rkey, sizeof(rkey), (void *)1, &diff,
2422 (struct NTFS_DE **)&re, fnd);
2426 if (memcmp(&re->key.ref, ref, sizeof(*ref))) {
2427 /* Impossible. Looks like volume corrupt? */
2431 memcpy(&rkey, &re->key, sizeof(rkey));
2436 err = indx_delete_entry(indx, ni, &rkey, sizeof(rkey), NULL);
2444 mark_inode_dirty(&ni->vfs_inode);
2450 static inline void ntfs_unmap_and_discard(struct ntfs_sb_info *sbi, CLST lcn,
2453 ntfs_unmap_meta(sbi->sb, lcn, len);
2454 ntfs_discard(sbi, lcn, len);
2457 void mark_as_free_ex(struct ntfs_sb_info *sbi, CLST lcn, CLST len, bool trim)
2460 struct wnd_bitmap *wnd = &sbi->used.bitmap;
2463 down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
2464 if (!wnd_is_used(wnd, lcn, len)) {
2465 /* mark volume as dirty out of wnd->rw_lock */
2470 for (i = lcn; i < end; i++) {
2471 if (wnd_is_used(wnd, i, 1)) {
2482 ntfs_unmap_and_discard(sbi, lcn, len);
2484 wnd_set_free(wnd, lcn, len);
2493 ntfs_unmap_and_discard(sbi, lcn, len);
2494 wnd_set_free(wnd, lcn, len);
2497 up_write(&wnd->rw_lock);
2499 ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
2503 * run_deallocate - Deallocate clusters.
2505 int run_deallocate(struct ntfs_sb_info *sbi, struct runs_tree *run, bool trim)
2510 while (run_get_entry(run, idx++, NULL, &lcn, &len)) {
2511 if (lcn == SPARSE_LCN)
2514 mark_as_free_ex(sbi, lcn, len, trim);