1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
8 #include <linux/blkdev.h>
9 #include <linux/buffer_head.h>
11 #include <linux/kernel.h>
18 const struct cpu_str NAME_MFT = {
19 4, 0, { '$', 'M', 'F', 'T' },
21 const struct cpu_str NAME_MIRROR = {
22 8, 0, { '$', 'M', 'F', 'T', 'M', 'i', 'r', 'r' },
24 const struct cpu_str NAME_LOGFILE = {
25 8, 0, { '$', 'L', 'o', 'g', 'F', 'i', 'l', 'e' },
27 const struct cpu_str NAME_VOLUME = {
28 7, 0, { '$', 'V', 'o', 'l', 'u', 'm', 'e' },
30 const struct cpu_str NAME_ATTRDEF = {
31 8, 0, { '$', 'A', 't', 't', 'r', 'D', 'e', 'f' },
33 const struct cpu_str NAME_ROOT = {
36 const struct cpu_str NAME_BITMAP = {
37 7, 0, { '$', 'B', 'i', 't', 'm', 'a', 'p' },
39 const struct cpu_str NAME_BOOT = {
40 5, 0, { '$', 'B', 'o', 'o', 't' },
42 const struct cpu_str NAME_BADCLUS = {
43 8, 0, { '$', 'B', 'a', 'd', 'C', 'l', 'u', 's' },
45 const struct cpu_str NAME_QUOTA = {
46 6, 0, { '$', 'Q', 'u', 'o', 't', 'a' },
48 const struct cpu_str NAME_SECURE = {
49 7, 0, { '$', 'S', 'e', 'c', 'u', 'r', 'e' },
51 const struct cpu_str NAME_UPCASE = {
52 7, 0, { '$', 'U', 'p', 'C', 'a', 's', 'e' },
54 const struct cpu_str NAME_EXTEND = {
55 7, 0, { '$', 'E', 'x', 't', 'e', 'n', 'd' },
57 const struct cpu_str NAME_OBJID = {
58 6, 0, { '$', 'O', 'b', 'j', 'I', 'd' },
60 const struct cpu_str NAME_REPARSE = {
61 8, 0, { '$', 'R', 'e', 'p', 'a', 'r', 's', 'e' },
63 const struct cpu_str NAME_USNJRNL = {
64 8, 0, { '$', 'U', 's', 'n', 'J', 'r', 'n', 'l' },
66 const __le16 BAD_NAME[4] = {
67 cpu_to_le16('$'), cpu_to_le16('B'), cpu_to_le16('a'), cpu_to_le16('d'),
69 const __le16 I30_NAME[4] = {
70 cpu_to_le16('$'), cpu_to_le16('I'), cpu_to_le16('3'), cpu_to_le16('0'),
72 const __le16 SII_NAME[4] = {
73 cpu_to_le16('$'), cpu_to_le16('S'), cpu_to_le16('I'), cpu_to_le16('I'),
75 const __le16 SDH_NAME[4] = {
76 cpu_to_le16('$'), cpu_to_le16('S'), cpu_to_le16('D'), cpu_to_le16('H'),
78 const __le16 SDS_NAME[4] = {
79 cpu_to_le16('$'), cpu_to_le16('S'), cpu_to_le16('D'), cpu_to_le16('S'),
81 const __le16 SO_NAME[2] = {
82 cpu_to_le16('$'), cpu_to_le16('O'),
84 const __le16 SQ_NAME[2] = {
85 cpu_to_le16('$'), cpu_to_le16('Q'),
87 const __le16 SR_NAME[2] = {
88 cpu_to_le16('$'), cpu_to_le16('R'),
91 #ifdef CONFIG_NTFS3_LZX_XPRESS
92 const __le16 WOF_NAME[17] = {
93 cpu_to_le16('W'), cpu_to_le16('o'), cpu_to_le16('f'), cpu_to_le16('C'),
94 cpu_to_le16('o'), cpu_to_le16('m'), cpu_to_le16('p'), cpu_to_le16('r'),
95 cpu_to_le16('e'), cpu_to_le16('s'), cpu_to_le16('s'), cpu_to_le16('e'),
96 cpu_to_le16('d'), cpu_to_le16('D'), cpu_to_le16('a'), cpu_to_le16('t'),
104 * ntfs_fix_pre_write - Insert fixups into @rhdr before writing to disk.
106 bool ntfs_fix_pre_write(struct NTFS_RECORD_HEADER *rhdr, size_t bytes)
110 u16 fo = le16_to_cpu(rhdr->fix_off);
111 u16 fn = le16_to_cpu(rhdr->fix_num);
113 if ((fo & 1) || fo + fn * sizeof(short) > SECTOR_SIZE || !fn-- ||
114 fn * SECTOR_SIZE > bytes) {
118 /* Get fixup pointer. */
119 fixup = Add2Ptr(rhdr, fo);
121 if (*fixup >= 0x7FFF)
128 ptr = Add2Ptr(rhdr, SECTOR_SIZE - sizeof(short));
133 ptr += SECTOR_SIZE / sizeof(short);
139 * ntfs_fix_post_read - Remove fixups after reading from disk.
141 * Return: < 0 if error, 0 if ok, 1 if need to update fixups.
143 int ntfs_fix_post_read(struct NTFS_RECORD_HEADER *rhdr, size_t bytes,
150 fo = le16_to_cpu(rhdr->fix_off);
151 fn = simple ? ((bytes >> SECTOR_SHIFT) + 1)
152 : le16_to_cpu(rhdr->fix_num);
155 if ((fo & 1) || fo + fn * sizeof(short) > SECTOR_SIZE || !fn-- ||
156 fn * SECTOR_SIZE > bytes) {
157 return -E_NTFS_CORRUPT;
160 /* Get fixup pointer. */
161 fixup = Add2Ptr(rhdr, fo);
163 ptr = Add2Ptr(rhdr, SECTOR_SIZE - sizeof(short));
167 /* Test current word. */
168 if (*ptr != sample) {
169 /* Fixup does not match! Is it serious error? */
175 ptr += SECTOR_SIZE / sizeof(short);
182 * ntfs_extend_init - Load $Extend file.
184 int ntfs_extend_init(struct ntfs_sb_info *sbi)
187 struct super_block *sb = sbi->sb;
188 struct inode *inode, *inode2;
191 if (sbi->volume.major_ver < 3) {
192 ntfs_notice(sb, "Skip $Extend 'cause NTFS version");
196 ref.low = cpu_to_le32(MFT_REC_EXTEND);
198 ref.seq = cpu_to_le16(MFT_REC_EXTEND);
199 inode = ntfs_iget5(sb, &ref, &NAME_EXTEND);
201 err = PTR_ERR(inode);
202 ntfs_err(sb, "Failed to load $Extend.");
207 /* If ntfs_iget5() reads from disk it never returns bad inode. */
208 if (!S_ISDIR(inode->i_mode)) {
213 /* Try to find $ObjId */
214 inode2 = dir_search_u(inode, &NAME_OBJID, NULL);
215 if (inode2 && !IS_ERR(inode2)) {
216 if (is_bad_inode(inode2)) {
219 sbi->objid.ni = ntfs_i(inode2);
220 sbi->objid_no = inode2->i_ino;
224 /* Try to find $Quota */
225 inode2 = dir_search_u(inode, &NAME_QUOTA, NULL);
226 if (inode2 && !IS_ERR(inode2)) {
227 sbi->quota_no = inode2->i_ino;
231 /* Try to find $Reparse */
232 inode2 = dir_search_u(inode, &NAME_REPARSE, NULL);
233 if (inode2 && !IS_ERR(inode2)) {
234 sbi->reparse.ni = ntfs_i(inode2);
235 sbi->reparse_no = inode2->i_ino;
238 /* Try to find $UsnJrnl */
239 inode2 = dir_search_u(inode, &NAME_USNJRNL, NULL);
240 if (inode2 && !IS_ERR(inode2)) {
241 sbi->usn_jrnl_no = inode2->i_ino;
251 int ntfs_loadlog_and_replay(struct ntfs_inode *ni, struct ntfs_sb_info *sbi)
254 struct super_block *sb = sbi->sb;
255 bool initialized = false;
260 if (ni->vfs_inode.i_size >= 0x100000000ull) {
261 ntfs_err(sb, "\x24LogFile is too big");
266 sbi->flags |= NTFS_FLAGS_LOG_REPLAYING;
268 ref.low = cpu_to_le32(MFT_REC_MFT);
270 ref.seq = cpu_to_le16(1);
272 inode = ntfs_iget5(sb, &ref, NULL);
278 /* Try to use MFT copy. */
279 u64 t64 = sbi->mft.lbo;
281 sbi->mft.lbo = sbi->mft.lbo2;
282 inode = ntfs_iget5(sb, &ref, NULL);
290 ntfs_err(sb, "Failed to load $MFT.");
294 sbi->mft.ni = ntfs_i(inode);
296 /* LogFile should not contains attribute list. */
297 err = ni_load_all_mi(sbi->mft.ni);
299 err = log_replay(ni, &initialized);
304 sync_blockdev(sb->s_bdev);
305 invalidate_bdev(sb->s_bdev);
307 if (sbi->flags & NTFS_FLAGS_NEED_REPLAY) {
312 if (sb_rdonly(sb) || !initialized)
315 /* Fill LogFile by '-1' if it is initialized. */
316 err = ntfs_bio_fill_1(sbi, &ni->file.run);
319 sbi->flags &= ~NTFS_FLAGS_LOG_REPLAYING;
327 * Return: Current ATTR_DEF_ENTRY for given attribute type.
329 const struct ATTR_DEF_ENTRY *ntfs_query_def(struct ntfs_sb_info *sbi,
332 int type_in = le32_to_cpu(type);
334 size_t max_idx = sbi->def_entries - 1;
336 while (min_idx <= max_idx) {
337 size_t i = min_idx + ((max_idx - min_idx) >> 1);
338 const struct ATTR_DEF_ENTRY *entry = sbi->def_table + i;
339 int diff = le32_to_cpu(entry->type) - type_in;
354 * ntfs_look_for_free_space - Look for a free space in bitmap.
356 int ntfs_look_for_free_space(struct ntfs_sb_info *sbi, CLST lcn, CLST len,
357 CLST *new_lcn, CLST *new_len,
358 enum ALLOCATE_OPT opt)
362 struct super_block *sb = sbi->sb;
363 size_t alcn, zlen, zeroes, zlcn, zlen2, ztrim, new_zlen;
364 struct wnd_bitmap *wnd = &sbi->used.bitmap;
366 down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
367 if (opt & ALLOCATE_MFT) {
368 zlen = wnd_zone_len(wnd);
371 err = ntfs_refresh_zone(sbi);
375 zlen = wnd_zone_len(wnd);
379 ntfs_err(sbi->sb, "no free space to extend mft");
384 lcn = wnd_zone_bit(wnd);
385 alen = min_t(CLST, len, zlen);
387 wnd_zone_set(wnd, lcn + alen, zlen - alen);
389 err = wnd_set_used(wnd, lcn, alen);
397 * 'Cause cluster 0 is always used this value means that we should use
398 * cached value of 'next_free_lcn' to improve performance.
401 lcn = sbi->used.next_free_lcn;
403 if (lcn >= wnd->nbits)
406 alen = wnd_find(wnd, len, lcn, BITMAP_FIND_MARK_AS_USED, &alcn);
410 /* Try to use clusters from MftZone. */
411 zlen = wnd_zone_len(wnd);
412 zeroes = wnd_zeroes(wnd);
414 /* Check too big request */
415 if (len > zeroes + zlen || zlen <= NTFS_MIN_MFT_ZONE) {
420 /* How many clusters to cat from zone. */
421 zlcn = wnd_zone_bit(wnd);
423 ztrim = clamp_val(len, zlen2, zlen);
424 new_zlen = max_t(size_t, zlen - ztrim, NTFS_MIN_MFT_ZONE);
426 wnd_zone_set(wnd, zlcn, new_zlen);
428 /* Allocate continues clusters. */
429 alen = wnd_find(wnd, len, 0,
430 BITMAP_FIND_MARK_AS_USED | BITMAP_FIND_FULL, &alcn);
441 ntfs_unmap_meta(sb, alcn, alen);
443 /* Set hint for next requests. */
444 if (!(opt & ALLOCATE_MFT))
445 sbi->used.next_free_lcn = alcn + alen;
447 up_write(&wnd->rw_lock);
452 * ntfs_extend_mft - Allocate additional MFT records.
454 * sbi->mft.bitmap is locked for write.
457 * ntfs_look_free_mft ->
460 * ni_insert_nonresident ->
463 * ntfs_look_free_mft ->
466 * To avoid recursive always allocate space for two new MFT records
467 * see attrib.c: "at least two MFT to avoid recursive loop".
469 static int ntfs_extend_mft(struct ntfs_sb_info *sbi)
472 struct ntfs_inode *ni = sbi->mft.ni;
473 size_t new_mft_total;
474 u64 new_mft_bytes, new_bitmap_bytes;
476 struct wnd_bitmap *wnd = &sbi->mft.bitmap;
478 new_mft_total = (wnd->nbits + MFT_INCREASE_CHUNK + 127) & (CLST)~127;
479 new_mft_bytes = (u64)new_mft_total << sbi->record_bits;
481 /* Step 1: Resize $MFT::DATA. */
482 down_write(&ni->file.run_lock);
483 err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run,
484 new_mft_bytes, NULL, false, &attr);
487 up_write(&ni->file.run_lock);
491 attr->nres.valid_size = attr->nres.data_size;
492 new_mft_total = le64_to_cpu(attr->nres.alloc_size) >> sbi->record_bits;
495 /* Step 2: Resize $MFT::BITMAP. */
496 new_bitmap_bytes = bitmap_size(new_mft_total);
498 err = attr_set_size(ni, ATTR_BITMAP, NULL, 0, &sbi->mft.bitmap.run,
499 new_bitmap_bytes, &new_bitmap_bytes, true, NULL);
501 /* Refresh MFT Zone if necessary. */
502 down_write_nested(&sbi->used.bitmap.rw_lock, BITMAP_MUTEX_CLUSTERS);
504 ntfs_refresh_zone(sbi);
506 up_write(&sbi->used.bitmap.rw_lock);
507 up_write(&ni->file.run_lock);
512 err = wnd_extend(wnd, new_mft_total);
517 ntfs_clear_mft_tail(sbi, sbi->mft.used, new_mft_total);
519 err = _ni_write_inode(&ni->vfs_inode, 0);
525 * ntfs_look_free_mft - Look for a free MFT record.
527 int ntfs_look_free_mft(struct ntfs_sb_info *sbi, CLST *rno, bool mft,
528 struct ntfs_inode *ni, struct mft_inode **mi)
531 size_t zbit, zlen, from, to, fr;
534 struct super_block *sb = sbi->sb;
535 struct wnd_bitmap *wnd = &sbi->mft.bitmap;
538 static_assert(sizeof(sbi->mft.reserved_bitmap) * 8 >=
539 MFT_REC_FREE - MFT_REC_RESERVED);
542 down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_MFT);
544 zlen = wnd_zone_len(wnd);
546 /* Always reserve space for MFT. */
549 zbit = wnd_zone_bit(wnd);
551 wnd_zone_set(wnd, zbit + 1, zlen - 1);
556 /* No MFT zone. Find the nearest to '0' free MFT. */
557 if (!wnd_find(wnd, 1, MFT_REC_FREE, 0, &zbit)) {
559 mft_total = wnd->nbits;
561 err = ntfs_extend_mft(sbi);
567 if (!mft || MFT_REC_FREE == sbi->mft.next_reserved)
573 * Look for free record reserved area [11-16) ==
574 * [MFT_REC_RESERVED, MFT_REC_FREE ) MFT bitmap always
577 if (!sbi->mft.reserved_bitmap) {
578 /* Once per session create internal bitmap for 5 bits. */
579 sbi->mft.reserved_bitmap = 0xFF;
582 for (ir = MFT_REC_RESERVED; ir < MFT_REC_FREE; ir++) {
584 struct ntfs_inode *ni;
585 struct MFT_REC *mrec;
587 ref.low = cpu_to_le32(ir);
588 ref.seq = cpu_to_le16(ir);
590 i = ntfs_iget5(sb, &ref, NULL);
595 "Invalid reserved record %x",
599 if (is_bad_inode(i)) {
608 if (!is_rec_base(mrec))
611 if (mrec->hard_links)
617 if (ni_find_attr(ni, NULL, NULL, ATTR_NAME,
618 NULL, 0, NULL, NULL))
621 __clear_bit(ir - MFT_REC_RESERVED,
622 &sbi->mft.reserved_bitmap);
626 /* Scan 5 bits for zero. Bit 0 == MFT_REC_RESERVED */
627 zbit = find_next_zero_bit(&sbi->mft.reserved_bitmap,
628 MFT_REC_FREE, MFT_REC_RESERVED);
629 if (zbit >= MFT_REC_FREE) {
630 sbi->mft.next_reserved = MFT_REC_FREE;
635 sbi->mft.next_reserved = zbit;
638 zlen = zbit == MFT_REC_FREE ? (MFT_REC_USER - MFT_REC_FREE) : 4;
639 if (zbit + zlen > wnd->nbits)
640 zlen = wnd->nbits - zbit;
642 while (zlen > 1 && !wnd_is_free(wnd, zbit, zlen))
645 /* [zbit, zbit + zlen) will be used for MFT itself. */
646 from = sbi->mft.used;
651 ntfs_clear_mft_tail(sbi, from, to);
662 wnd_zone_set(wnd, zbit, zlen);
666 /* The request to get record for general purpose. */
667 if (sbi->mft.next_free < MFT_REC_USER)
668 sbi->mft.next_free = MFT_REC_USER;
671 if (sbi->mft.next_free >= sbi->mft.bitmap.nbits) {
672 } else if (!wnd_find(wnd, 1, MFT_REC_USER, 0, &fr)) {
673 sbi->mft.next_free = sbi->mft.bitmap.nbits;
676 sbi->mft.next_free = *rno + 1;
680 err = ntfs_extend_mft(sbi);
686 if (ni && !ni_add_subrecord(ni, *rno, mi)) {
691 /* We have found a record that are not reserved for next MFT. */
692 if (*rno >= MFT_REC_FREE)
693 wnd_set_used(wnd, *rno, 1);
694 else if (*rno >= MFT_REC_RESERVED && sbi->mft.reserved_bitmap_inited)
695 __set_bit(*rno - MFT_REC_RESERVED, &sbi->mft.reserved_bitmap);
699 up_write(&wnd->rw_lock);
705 * ntfs_mark_rec_free - Mark record as free.
706 * is_mft - true if we are changing MFT
708 void ntfs_mark_rec_free(struct ntfs_sb_info *sbi, CLST rno, bool is_mft)
710 struct wnd_bitmap *wnd = &sbi->mft.bitmap;
713 down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_MFT);
714 if (rno >= wnd->nbits)
717 if (rno >= MFT_REC_FREE) {
718 if (!wnd_is_used(wnd, rno, 1))
719 ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
721 wnd_set_free(wnd, rno, 1);
722 } else if (rno >= MFT_REC_RESERVED && sbi->mft.reserved_bitmap_inited) {
723 __clear_bit(rno - MFT_REC_RESERVED, &sbi->mft.reserved_bitmap);
726 if (rno < wnd_zone_bit(wnd))
727 wnd_zone_set(wnd, rno, 1);
728 else if (rno < sbi->mft.next_free && rno >= MFT_REC_USER)
729 sbi->mft.next_free = rno;
733 up_write(&wnd->rw_lock);
737 * ntfs_clear_mft_tail - Format empty records [from, to).
739 * sbi->mft.bitmap is locked for write.
741 int ntfs_clear_mft_tail(struct ntfs_sb_info *sbi, size_t from, size_t to)
746 struct runs_tree *run;
747 struct ntfs_inode *ni;
752 rs = sbi->record_size;
756 down_read(&ni->file.run_lock);
757 vbo = (u64)from * rs;
758 for (; from < to; from++, vbo += rs) {
759 struct ntfs_buffers nb;
761 err = ntfs_get_bh(sbi, run, vbo, rs, &nb);
765 err = ntfs_write_bh(sbi, &sbi->new_rec->rhdr, &nb, 0);
772 sbi->mft.used = from;
773 up_read(&ni->file.run_lock);
778 * ntfs_refresh_zone - Refresh MFT zone.
780 * sbi->used.bitmap is locked for rw.
781 * sbi->mft.bitmap is locked for write.
782 * sbi->mft.ni->file.run_lock for write.
784 int ntfs_refresh_zone(struct ntfs_sb_info *sbi)
788 struct wnd_bitmap *wnd = &sbi->used.bitmap;
789 struct ntfs_inode *ni = sbi->mft.ni;
791 /* Do not change anything unless we have non empty MFT zone. */
792 if (wnd_zone_len(wnd))
795 vcn = bytes_to_cluster(sbi,
796 (u64)sbi->mft.bitmap.nbits << sbi->record_bits);
798 if (!run_lookup_entry(&ni->file.run, vcn - 1, &lcn, &len, NULL))
801 /* We should always find Last Lcn for MFT. */
802 if (lcn == SPARSE_LCN)
807 /* Try to allocate clusters after last MFT run. */
808 zlen = wnd_find(wnd, sbi->zone_max, lcn_s, 0, &lcn_s);
809 wnd_zone_set(wnd, lcn_s, zlen);
815 * ntfs_update_mftmirr - Update $MFTMirr data.
817 void ntfs_update_mftmirr(struct ntfs_sb_info *sbi, int wait)
820 struct super_block *sb = sbi->sb;
822 sector_t block1, block2;
828 blocksize = sb->s_blocksize;
830 if (!(sbi->flags & NTFS_FLAGS_MFTMIRR))
834 bytes = sbi->mft.recs_mirr << sbi->record_bits;
835 block1 = sbi->mft.lbo >> sb->s_blocksize_bits;
836 block2 = sbi->mft.lbo2 >> sb->s_blocksize_bits;
838 for (; bytes >= blocksize; bytes -= blocksize) {
839 struct buffer_head *bh1, *bh2;
841 bh1 = sb_bread(sb, block1++);
845 bh2 = sb_getblk(sb, block2++);
851 if (buffer_locked(bh2))
852 __wait_on_buffer(bh2);
855 memcpy(bh2->b_data, bh1->b_data, blocksize);
856 set_buffer_uptodate(bh2);
857 mark_buffer_dirty(bh2);
864 err = sync_dirty_buffer(bh2);
871 sbi->flags &= ~NTFS_FLAGS_MFTMIRR;
877 * Marks inode as bad and marks fs as 'dirty'
879 void ntfs_bad_inode(struct inode *inode, const char *hint)
881 struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info;
883 ntfs_inode_err(inode, "%s", hint);
884 make_bad_inode(inode);
885 ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
891 * Mount: ntfs_set_state(NTFS_DIRTY_DIRTY)
892 * Umount: ntfs_set_state(NTFS_DIRTY_CLEAR)
893 * NTFS error: ntfs_set_state(NTFS_DIRTY_ERROR)
895 int ntfs_set_state(struct ntfs_sb_info *sbi, enum NTFS_DIRTY_FLAGS dirty)
899 struct VOLUME_INFO *info;
900 struct mft_inode *mi;
901 struct ntfs_inode *ni;
904 * Do not change state if fs was real_dirty.
905 * Do not change state if fs already dirty(clear).
906 * Do not change any thing if mounted read only.
908 if (sbi->volume.real_dirty || sb_rdonly(sbi->sb))
911 /* Check cached value. */
912 if ((dirty == NTFS_DIRTY_CLEAR ? 0 : VOLUME_FLAG_DIRTY) ==
913 (sbi->volume.flags & VOLUME_FLAG_DIRTY))
920 mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_DIRTY);
922 attr = ni_find_attr(ni, NULL, NULL, ATTR_VOL_INFO, NULL, 0, NULL, &mi);
928 info = resident_data_ex(attr, SIZEOF_ATTRIBUTE_VOLUME_INFO);
935 case NTFS_DIRTY_ERROR:
936 ntfs_notice(sbi->sb, "Mark volume as dirty due to NTFS errors");
937 sbi->volume.real_dirty = true;
939 case NTFS_DIRTY_DIRTY:
940 info->flags |= VOLUME_FLAG_DIRTY;
942 case NTFS_DIRTY_CLEAR:
943 info->flags &= ~VOLUME_FLAG_DIRTY;
946 /* Cache current volume flags. */
947 sbi->volume.flags = info->flags;
956 mark_inode_dirty_sync(&ni->vfs_inode);
957 /* verify(!ntfs_update_mftmirr()); */
959 /* write mft record on disk. */
960 err = _ni_write_inode(&ni->vfs_inode, 1);
966 * security_hash - Calculates a hash of security descriptor.
968 static inline __le32 security_hash(const void *sd, size_t bytes)
971 const __le32 *ptr = sd;
975 hash = ((hash >> 0x1D) | (hash << 3)) + le32_to_cpu(*ptr++);
976 return cpu_to_le32(hash);
980 * simple wrapper for sb_bread_unmovable.
982 struct buffer_head *ntfs_bread(struct super_block *sb, sector_t block)
984 struct ntfs_sb_info *sbi = sb->s_fs_info;
985 struct buffer_head *bh;
987 if (unlikely(block >= sbi->volume.blocks)) {
988 /* prevent generic message "attempt to access beyond end of device" */
989 ntfs_err(sb, "try to read out of volume at offset 0x%llx",
990 (u64)block << sb->s_blocksize_bits);
994 bh = sb_bread_unmovable(sb, block);
998 ntfs_err(sb, "failed to read volume at offset 0x%llx",
999 (u64)block << sb->s_blocksize_bits);
1003 int ntfs_sb_read(struct super_block *sb, u64 lbo, size_t bytes, void *buffer)
1005 struct block_device *bdev = sb->s_bdev;
1006 u32 blocksize = sb->s_blocksize;
1007 u64 block = lbo >> sb->s_blocksize_bits;
1008 u32 off = lbo & (blocksize - 1);
1009 u32 op = blocksize - off;
1011 for (; bytes; block += 1, off = 0, op = blocksize) {
1012 struct buffer_head *bh = __bread(bdev, block, blocksize);
1020 memcpy(buffer, bh->b_data + off, op);
1025 buffer = Add2Ptr(buffer, op);
1031 int ntfs_sb_write(struct super_block *sb, u64 lbo, size_t bytes,
1032 const void *buf, int wait)
1034 u32 blocksize = sb->s_blocksize;
1035 struct block_device *bdev = sb->s_bdev;
1036 sector_t block = lbo >> sb->s_blocksize_bits;
1037 u32 off = lbo & (blocksize - 1);
1038 u32 op = blocksize - off;
1039 struct buffer_head *bh;
1041 if (!wait && (sb->s_flags & SB_SYNCHRONOUS))
1044 for (; bytes; block += 1, off = 0, op = blocksize) {
1048 if (op < blocksize) {
1049 bh = __bread(bdev, block, blocksize);
1051 ntfs_err(sb, "failed to read block %llx",
1056 bh = __getblk(bdev, block, blocksize);
1061 if (buffer_locked(bh))
1062 __wait_on_buffer(bh);
1066 memcpy(bh->b_data + off, buf, op);
1067 buf = Add2Ptr(buf, op);
1069 memset(bh->b_data + off, -1, op);
1072 set_buffer_uptodate(bh);
1073 mark_buffer_dirty(bh);
1077 int err = sync_dirty_buffer(bh);
1082 "failed to sync buffer at block %llx, error %d",
1096 int ntfs_sb_write_run(struct ntfs_sb_info *sbi, const struct runs_tree *run,
1097 u64 vbo, const void *buf, size_t bytes, int sync)
1099 struct super_block *sb = sbi->sb;
1100 u8 cluster_bits = sbi->cluster_bits;
1101 u32 off = vbo & sbi->cluster_mask;
1102 CLST lcn, clen, vcn = vbo >> cluster_bits, vcn_next;
1106 if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx))
1109 if (lcn == SPARSE_LCN)
1112 lbo = ((u64)lcn << cluster_bits) + off;
1113 len = ((u64)clen << cluster_bits) - off;
1116 u32 op = min_t(u64, len, bytes);
1117 int err = ntfs_sb_write(sb, lbo, op, buf, sync);
1126 vcn_next = vcn + clen;
1127 if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
1131 if (lcn == SPARSE_LCN)
1135 buf = Add2Ptr(buf, op);
1137 lbo = ((u64)lcn << cluster_bits);
1138 len = ((u64)clen << cluster_bits);
1144 struct buffer_head *ntfs_bread_run(struct ntfs_sb_info *sbi,
1145 const struct runs_tree *run, u64 vbo)
1147 struct super_block *sb = sbi->sb;
1148 u8 cluster_bits = sbi->cluster_bits;
1152 if (!run_lookup_entry(run, vbo >> cluster_bits, &lcn, NULL, NULL))
1153 return ERR_PTR(-ENOENT);
1155 lbo = ((u64)lcn << cluster_bits) + (vbo & sbi->cluster_mask);
1157 return ntfs_bread(sb, lbo >> sb->s_blocksize_bits);
1160 int ntfs_read_run_nb(struct ntfs_sb_info *sbi, const struct runs_tree *run,
1161 u64 vbo, void *buf, u32 bytes, struct ntfs_buffers *nb)
1164 struct super_block *sb = sbi->sb;
1165 u32 blocksize = sb->s_blocksize;
1166 u8 cluster_bits = sbi->cluster_bits;
1167 u32 off = vbo & sbi->cluster_mask;
1169 CLST vcn_next, vcn = vbo >> cluster_bits;
1173 struct buffer_head *bh;
1176 /* First reading of $Volume + $MFTMirr + $LogFile goes here. */
1177 if (vbo > MFT_REC_VOL * sbi->record_size) {
1182 /* Use absolute boot's 'MFTCluster' to read record. */
1183 lbo = vbo + sbi->mft.lbo;
1184 len = sbi->record_size;
1185 } else if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) {
1189 if (lcn == SPARSE_LCN) {
1194 lbo = ((u64)lcn << cluster_bits) + off;
1195 len = ((u64)clen << cluster_bits) - off;
1198 off = lbo & (blocksize - 1);
1205 u32 len32 = len >= bytes ? bytes : len;
1206 sector_t block = lbo >> sb->s_blocksize_bits;
1209 u32 op = blocksize - off;
1214 bh = ntfs_bread(sb, block);
1221 memcpy(buf, bh->b_data + off, op);
1222 buf = Add2Ptr(buf, op);
1227 } else if (nbh >= ARRAY_SIZE(nb->bh)) {
1244 vcn_next = vcn + clen;
1245 if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
1251 if (lcn == SPARSE_LCN) {
1256 lbo = ((u64)lcn << cluster_bits);
1257 len = ((u64)clen << cluster_bits);
1265 put_bh(nb->bh[--nbh]);
1276 * Return: < 0 if error, 0 if ok, -E_NTFS_FIXUP if need to update fixups.
1278 int ntfs_read_bh(struct ntfs_sb_info *sbi, const struct runs_tree *run, u64 vbo,
1279 struct NTFS_RECORD_HEADER *rhdr, u32 bytes,
1280 struct ntfs_buffers *nb)
1282 int err = ntfs_read_run_nb(sbi, run, vbo, rhdr, bytes, nb);
1286 return ntfs_fix_post_read(rhdr, nb->bytes, true);
1289 int ntfs_get_bh(struct ntfs_sb_info *sbi, const struct runs_tree *run, u64 vbo,
1290 u32 bytes, struct ntfs_buffers *nb)
1293 struct super_block *sb = sbi->sb;
1294 u32 blocksize = sb->s_blocksize;
1295 u8 cluster_bits = sbi->cluster_bits;
1296 CLST vcn_next, vcn = vbo >> cluster_bits;
1305 if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) {
1310 off = vbo & sbi->cluster_mask;
1311 lbo = ((u64)lcn << cluster_bits) + off;
1312 len = ((u64)clen << cluster_bits) - off;
1314 nb->off = off = lbo & (blocksize - 1);
1317 u32 len32 = min_t(u64, len, bytes);
1318 sector_t block = lbo >> sb->s_blocksize_bits;
1322 struct buffer_head *bh;
1324 if (nbh >= ARRAY_SIZE(nb->bh)) {
1329 op = blocksize - off;
1333 if (op == blocksize) {
1334 bh = sb_getblk(sb, block);
1339 if (buffer_locked(bh))
1340 __wait_on_buffer(bh);
1341 set_buffer_uptodate(bh);
1343 bh = ntfs_bread(sb, block);
1362 vcn_next = vcn + clen;
1363 if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
1369 lbo = ((u64)lcn << cluster_bits);
1370 len = ((u64)clen << cluster_bits);
1375 put_bh(nb->bh[--nbh]);
1384 int ntfs_write_bh(struct ntfs_sb_info *sbi, struct NTFS_RECORD_HEADER *rhdr,
1385 struct ntfs_buffers *nb, int sync)
1388 struct super_block *sb = sbi->sb;
1389 u32 block_size = sb->s_blocksize;
1390 u32 bytes = nb->bytes;
1392 u16 fo = le16_to_cpu(rhdr->fix_off);
1393 u16 fn = le16_to_cpu(rhdr->fix_num);
1398 if ((fo & 1) || fo + fn * sizeof(short) > SECTOR_SIZE || !fn-- ||
1399 fn * SECTOR_SIZE > bytes) {
1403 for (idx = 0; bytes && idx < nb->nbufs; idx += 1, off = 0) {
1404 u32 op = block_size - off;
1406 struct buffer_head *bh = nb->bh[idx];
1407 __le16 *ptr, *end_data;
1412 if (buffer_locked(bh))
1413 __wait_on_buffer(bh);
1417 bh_data = bh->b_data + off;
1418 end_data = Add2Ptr(bh_data, op);
1419 memcpy(bh_data, rhdr, op);
1424 fixup = Add2Ptr(bh_data, fo);
1426 t16 = le16_to_cpu(sample);
1427 if (t16 >= 0x7FFF) {
1428 sample = *fixup = cpu_to_le16(1);
1430 sample = cpu_to_le16(t16 + 1);
1434 *(__le16 *)Add2Ptr(rhdr, fo) = sample;
1437 ptr = Add2Ptr(bh_data, SECTOR_SIZE - sizeof(short));
1442 ptr += SECTOR_SIZE / sizeof(short);
1443 } while (ptr < end_data);
1445 set_buffer_uptodate(bh);
1446 mark_buffer_dirty(bh);
1450 int err2 = sync_dirty_buffer(bh);
1457 rhdr = Add2Ptr(rhdr, op);
1464 * ntfs_bio_pages - Read/write pages from/to disk.
1466 int ntfs_bio_pages(struct ntfs_sb_info *sbi, const struct runs_tree *run,
1467 struct page **pages, u32 nr_pages, u64 vbo, u32 bytes,
1471 struct bio *new, *bio = NULL;
1472 struct super_block *sb = sbi->sb;
1473 struct block_device *bdev = sb->s_bdev;
1475 u8 cluster_bits = sbi->cluster_bits;
1476 CLST lcn, clen, vcn, vcn_next;
1477 u32 add, off, page_idx;
1480 struct blk_plug plug;
1485 blk_start_plug(&plug);
1487 /* Align vbo and bytes to be 512 bytes aligned. */
1488 lbo = (vbo + bytes + 511) & ~511ull;
1489 vbo = vbo & ~511ull;
1492 vcn = vbo >> cluster_bits;
1493 if (!run_lookup_entry(run, vcn, &lcn, &clen, &run_idx)) {
1497 off = vbo & sbi->cluster_mask;
1502 lbo = ((u64)lcn << cluster_bits) + off;
1503 len = ((u64)clen << cluster_bits) - off;
1505 new = bio_alloc(bdev, nr_pages - page_idx, op, GFP_NOFS);
1507 bio_chain(bio, new);
1511 bio->bi_iter.bi_sector = lbo >> 9;
1514 off = vbo & (PAGE_SIZE - 1);
1515 add = off + len > PAGE_SIZE ? (PAGE_SIZE - off) : len;
1517 if (bio_add_page(bio, page, add, off) < add)
1525 if (add + off == PAGE_SIZE) {
1527 if (WARN_ON(page_idx >= nr_pages)) {
1531 page = pages[page_idx];
1540 vcn_next = vcn + clen;
1541 if (!run_get_entry(run, ++run_idx, &vcn, &lcn, &clen) ||
1551 err = submit_bio_wait(bio);
1554 blk_finish_plug(&plug);
1560 * ntfs_bio_fill_1 - Helper for ntfs_loadlog_and_replay().
1562 * Fill on-disk logfile range by (-1)
1563 * this means empty logfile.
1565 int ntfs_bio_fill_1(struct ntfs_sb_info *sbi, const struct runs_tree *run)
1568 struct super_block *sb = sbi->sb;
1569 struct block_device *bdev = sb->s_bdev;
1570 u8 cluster_bits = sbi->cluster_bits;
1571 struct bio *new, *bio = NULL;
1577 struct blk_plug plug;
1579 fill = alloc_page(GFP_KERNEL);
1583 kaddr = kmap_atomic(fill);
1584 memset(kaddr, -1, PAGE_SIZE);
1585 kunmap_atomic(kaddr);
1586 flush_dcache_page(fill);
1589 if (!run_lookup_entry(run, 0, &lcn, &clen, &run_idx)) {
1595 * TODO: Try blkdev_issue_write_same.
1597 blk_start_plug(&plug);
1599 lbo = (u64)lcn << cluster_bits;
1600 len = (u64)clen << cluster_bits;
1602 new = bio_alloc(bdev, BIO_MAX_VECS, REQ_OP_WRITE, GFP_NOFS);
1604 bio_chain(bio, new);
1608 bio->bi_iter.bi_sector = lbo >> 9;
1611 u32 add = len > PAGE_SIZE ? PAGE_SIZE : len;
1613 if (bio_add_page(bio, fill, add, 0) < add)
1621 } while (run_get_entry(run, ++run_idx, NULL, &lcn, &clen));
1624 err = submit_bio_wait(bio);
1627 blk_finish_plug(&plug);
1635 int ntfs_vbo_to_lbo(struct ntfs_sb_info *sbi, const struct runs_tree *run,
1636 u64 vbo, u64 *lbo, u64 *bytes)
1640 u8 cluster_bits = sbi->cluster_bits;
1642 if (!run_lookup_entry(run, vbo >> cluster_bits, &lcn, &len, NULL))
1645 off = vbo & sbi->cluster_mask;
1646 *lbo = lcn == SPARSE_LCN ? -1 : (((u64)lcn << cluster_bits) + off);
1647 *bytes = ((u64)len << cluster_bits) - off;
1652 struct ntfs_inode *ntfs_new_inode(struct ntfs_sb_info *sbi, CLST rno, bool dir)
1655 struct super_block *sb = sbi->sb;
1656 struct inode *inode = new_inode(sb);
1657 struct ntfs_inode *ni;
1660 return ERR_PTR(-ENOMEM);
1664 err = mi_format_new(&ni->mi, sbi, rno, dir ? RECORD_FLAG_DIR : 0,
1670 if (insert_inode_locked(inode) < 0) {
1677 make_bad_inode(inode);
1685 * O:BAG:BAD:(A;OICI;FA;;;WD)
1686 * Owner S-1-5-32-544 (Administrators)
1687 * Group S-1-5-32-544 (Administrators)
1688 * ACE: allow S-1-1-0 (Everyone) with FILE_ALL_ACCESS
1690 const u8 s_default_security[] __aligned(8) = {
1691 0x01, 0x00, 0x04, 0x80, 0x30, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00,
1692 0x00, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x02, 0x00, 0x1C, 0x00,
1693 0x01, 0x00, 0x00, 0x00, 0x00, 0x03, 0x14, 0x00, 0xFF, 0x01, 0x1F, 0x00,
1694 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
1695 0x01, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x20, 0x00, 0x00, 0x00,
1696 0x20, 0x02, 0x00, 0x00, 0x01, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05,
1697 0x20, 0x00, 0x00, 0x00, 0x20, 0x02, 0x00, 0x00,
1700 static_assert(sizeof(s_default_security) == 0x50);
1702 static inline u32 sid_length(const struct SID *sid)
1704 return struct_size(sid, SubAuthority, sid->SubAuthorityCount);
1710 * Thanks Mark Harmstone for idea.
1712 static bool is_acl_valid(const struct ACL *acl, u32 len)
1714 const struct ACE_HEADER *ace;
1716 u16 ace_count, ace_size;
1718 if (acl->AclRevision != ACL_REVISION &&
1719 acl->AclRevision != ACL_REVISION_DS) {
1721 * This value should be ACL_REVISION, unless the ACL contains an
1722 * object-specific ACE, in which case this value must be ACL_REVISION_DS.
1723 * All ACEs in an ACL must be at the same revision level.
1731 if (le16_to_cpu(acl->AclSize) > len)
1737 len -= sizeof(struct ACL);
1738 ace = (struct ACE_HEADER *)&acl[1];
1739 ace_count = le16_to_cpu(acl->AceCount);
1741 for (i = 0; i < ace_count; i++) {
1742 if (len < sizeof(struct ACE_HEADER))
1745 ace_size = le16_to_cpu(ace->AceSize);
1750 ace = Add2Ptr(ace, ace_size);
1756 bool is_sd_valid(const struct SECURITY_DESCRIPTOR_RELATIVE *sd, u32 len)
1758 u32 sd_owner, sd_group, sd_sacl, sd_dacl;
1760 if (len < sizeof(struct SECURITY_DESCRIPTOR_RELATIVE))
1763 if (sd->Revision != 1)
1769 if (!(sd->Control & SE_SELF_RELATIVE))
1772 sd_owner = le32_to_cpu(sd->Owner);
1774 const struct SID *owner = Add2Ptr(sd, sd_owner);
1776 if (sd_owner + offsetof(struct SID, SubAuthority) > len)
1779 if (owner->Revision != 1)
1782 if (sd_owner + sid_length(owner) > len)
1786 sd_group = le32_to_cpu(sd->Group);
1788 const struct SID *group = Add2Ptr(sd, sd_group);
1790 if (sd_group + offsetof(struct SID, SubAuthority) > len)
1793 if (group->Revision != 1)
1796 if (sd_group + sid_length(group) > len)
1800 sd_sacl = le32_to_cpu(sd->Sacl);
1802 const struct ACL *sacl = Add2Ptr(sd, sd_sacl);
1804 if (sd_sacl + sizeof(struct ACL) > len)
1807 if (!is_acl_valid(sacl, len - sd_sacl))
1811 sd_dacl = le32_to_cpu(sd->Dacl);
1813 const struct ACL *dacl = Add2Ptr(sd, sd_dacl);
1815 if (sd_dacl + sizeof(struct ACL) > len)
1818 if (!is_acl_valid(dacl, len - sd_dacl))
1826 * ntfs_security_init - Load and parse $Secure.
1828 int ntfs_security_init(struct ntfs_sb_info *sbi)
1831 struct super_block *sb = sbi->sb;
1832 struct inode *inode;
1833 struct ntfs_inode *ni;
1835 struct ATTRIB *attr;
1836 struct ATTR_LIST_ENTRY *le;
1840 struct NTFS_DE_SII *sii_e;
1841 struct ntfs_fnd *fnd_sii = NULL;
1842 const struct INDEX_ROOT *root_sii;
1843 const struct INDEX_ROOT *root_sdh;
1844 struct ntfs_index *indx_sdh = &sbi->security.index_sdh;
1845 struct ntfs_index *indx_sii = &sbi->security.index_sii;
1847 ref.low = cpu_to_le32(MFT_REC_SECURE);
1849 ref.seq = cpu_to_le16(MFT_REC_SECURE);
1851 inode = ntfs_iget5(sb, &ref, &NAME_SECURE);
1852 if (IS_ERR(inode)) {
1853 err = PTR_ERR(inode);
1854 ntfs_err(sb, "Failed to load $Secure.");
1863 attr = ni_find_attr(ni, NULL, &le, ATTR_ROOT, SDH_NAME,
1864 ARRAY_SIZE(SDH_NAME), NULL, NULL);
1870 if(!(root_sdh = resident_data_ex(attr, sizeof(struct INDEX_ROOT))) ||
1871 root_sdh->type != ATTR_ZERO ||
1872 root_sdh->rule != NTFS_COLLATION_TYPE_SECURITY_HASH ||
1873 offsetof(struct INDEX_ROOT, ihdr) +
1874 le32_to_cpu(root_sdh->ihdr.used) >
1875 le32_to_cpu(attr->res.data_size)) {
1880 err = indx_init(indx_sdh, sbi, attr, INDEX_MUTEX_SDH);
1884 attr = ni_find_attr(ni, attr, &le, ATTR_ROOT, SII_NAME,
1885 ARRAY_SIZE(SII_NAME), NULL, NULL);
1891 if(!(root_sii = resident_data_ex(attr, sizeof(struct INDEX_ROOT))) ||
1892 root_sii->type != ATTR_ZERO ||
1893 root_sii->rule != NTFS_COLLATION_TYPE_UINT ||
1894 offsetof(struct INDEX_ROOT, ihdr) +
1895 le32_to_cpu(root_sii->ihdr.used) >
1896 le32_to_cpu(attr->res.data_size)) {
1901 err = indx_init(indx_sii, sbi, attr, INDEX_MUTEX_SII);
1905 fnd_sii = fnd_get();
1911 sds_size = inode->i_size;
1913 /* Find the last valid Id. */
1914 sbi->security.next_id = SECURITY_ID_FIRST;
1915 /* Always write new security at the end of bucket. */
1916 sbi->security.next_off =
1917 ALIGN(sds_size - SecurityDescriptorsBlockSize, 16);
1925 err = indx_find_raw(indx_sii, ni, root_sii, &ne, &off, fnd_sii);
1929 sii_e = (struct NTFS_DE_SII *)ne;
1930 if (le16_to_cpu(ne->view.data_size) < SIZEOF_SECURITY_HDR)
1933 next_id = le32_to_cpu(sii_e->sec_id) + 1;
1934 if (next_id >= sbi->security.next_id)
1935 sbi->security.next_id = next_id;
1938 sbi->security.ni = ni;
1948 * ntfs_get_security_by_id - Read security descriptor by id.
1950 int ntfs_get_security_by_id(struct ntfs_sb_info *sbi, __le32 security_id,
1951 struct SECURITY_DESCRIPTOR_RELATIVE **sd,
1956 struct ntfs_inode *ni = sbi->security.ni;
1957 struct ntfs_index *indx = &sbi->security.index_sii;
1959 struct NTFS_DE_SII *sii_e;
1960 struct ntfs_fnd *fnd_sii;
1961 struct SECURITY_HDR d_security;
1962 const struct INDEX_ROOT *root_sii;
1967 mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_SECURITY);
1969 fnd_sii = fnd_get();
1975 root_sii = indx_get_root(indx, ni, NULL, NULL);
1981 /* Try to find this SECURITY descriptor in SII indexes. */
1982 err = indx_find(indx, ni, root_sii, &security_id, sizeof(security_id),
1983 NULL, &diff, (struct NTFS_DE **)&sii_e, fnd_sii);
1990 t32 = le32_to_cpu(sii_e->sec_hdr.size);
1991 if (t32 < SIZEOF_SECURITY_HDR) {
1996 if (t32 > SIZEOF_SECURITY_HDR + 0x10000) {
1997 /* Looks like too big security. 0x10000 - is arbitrary big number. */
2002 *size = t32 - SIZEOF_SECURITY_HDR;
2004 p = kmalloc(*size, GFP_NOFS);
2010 err = ntfs_read_run_nb(sbi, &ni->file.run,
2011 le64_to_cpu(sii_e->sec_hdr.off), &d_security,
2012 sizeof(d_security), NULL);
2016 if (memcmp(&d_security, &sii_e->sec_hdr, SIZEOF_SECURITY_HDR)) {
2021 err = ntfs_read_run_nb(sbi, &ni->file.run,
2022 le64_to_cpu(sii_e->sec_hdr.off) +
2023 SIZEOF_SECURITY_HDR,
2040 * ntfs_insert_security - Insert security descriptor into $Secure::SDS.
2042 * SECURITY Descriptor Stream data is organized into chunks of 256K bytes
2043 * and it contains a mirror copy of each security descriptor. When writing
2044 * to a security descriptor at location X, another copy will be written at
2045 * location (X+256K).
2046 * When writing a security descriptor that will cross the 256K boundary,
2047 * the pointer will be advanced by 256K to skip
2048 * over the mirror portion.
2050 int ntfs_insert_security(struct ntfs_sb_info *sbi,
2051 const struct SECURITY_DESCRIPTOR_RELATIVE *sd,
2052 u32 size_sd, __le32 *security_id, bool *inserted)
2055 struct ntfs_inode *ni = sbi->security.ni;
2056 struct ntfs_index *indx_sdh = &sbi->security.index_sdh;
2057 struct ntfs_index *indx_sii = &sbi->security.index_sii;
2058 struct NTFS_DE_SDH *e;
2059 struct NTFS_DE_SDH sdh_e;
2060 struct NTFS_DE_SII sii_e;
2061 struct SECURITY_HDR *d_security;
2062 u32 new_sec_size = size_sd + SIZEOF_SECURITY_HDR;
2063 u32 aligned_sec_size = ALIGN(new_sec_size, 16);
2064 struct SECURITY_KEY hash_key;
2065 struct ntfs_fnd *fnd_sdh = NULL;
2066 const struct INDEX_ROOT *root_sdh;
2067 const struct INDEX_ROOT *root_sii;
2068 u64 mirr_off, new_sds_size;
2071 static_assert((1 << Log2OfSecurityDescriptorsBlockSize) ==
2072 SecurityDescriptorsBlockSize);
2074 hash_key.hash = security_hash(sd, size_sd);
2075 hash_key.sec_id = SECURITY_ID_INVALID;
2079 *security_id = SECURITY_ID_INVALID;
2081 /* Allocate a temporal buffer. */
2082 d_security = kzalloc(aligned_sec_size, GFP_NOFS);
2086 mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_SECURITY);
2088 fnd_sdh = fnd_get();
2094 root_sdh = indx_get_root(indx_sdh, ni, NULL, NULL);
2100 root_sii = indx_get_root(indx_sii, ni, NULL, NULL);
2107 * Check if such security already exists.
2108 * Use "SDH" and hash -> to get the offset in "SDS".
2110 err = indx_find(indx_sdh, ni, root_sdh, &hash_key, sizeof(hash_key),
2111 &d_security->key.sec_id, &diff, (struct NTFS_DE **)&e,
2117 if (le32_to_cpu(e->sec_hdr.size) == new_sec_size) {
2118 err = ntfs_read_run_nb(sbi, &ni->file.run,
2119 le64_to_cpu(e->sec_hdr.off),
2120 d_security, new_sec_size, NULL);
2124 if (le32_to_cpu(d_security->size) == new_sec_size &&
2125 d_security->key.hash == hash_key.hash &&
2126 !memcmp(d_security + 1, sd, size_sd)) {
2127 *security_id = d_security->key.sec_id;
2128 /* Such security already exists. */
2134 err = indx_find_sort(indx_sdh, ni, root_sdh,
2135 (struct NTFS_DE **)&e, fnd_sdh);
2139 if (!e || e->key.hash != hash_key.hash)
2143 /* Zero unused space. */
2144 next = sbi->security.next_off & (SecurityDescriptorsBlockSize - 1);
2145 left = SecurityDescriptorsBlockSize - next;
2147 /* Zero gap until SecurityDescriptorsBlockSize. */
2148 if (left < new_sec_size) {
2149 /* Zero "left" bytes from sbi->security.next_off. */
2150 sbi->security.next_off += SecurityDescriptorsBlockSize + left;
2153 /* Zero tail of previous security. */
2154 //used = ni->vfs_inode.i_size & (SecurityDescriptorsBlockSize - 1);
2158 * 0x40438 == ni->vfs_inode.i_size
2159 * 0x00440 == sbi->security.next_off
2160 * need to zero [0x438-0x440)
2161 * if (next > used) {
2162 * u32 tozero = next - used;
2163 * zero "tozero" bytes from sbi->security.next_off - tozero
2166 /* Format new security descriptor. */
2167 d_security->key.hash = hash_key.hash;
2168 d_security->key.sec_id = cpu_to_le32(sbi->security.next_id);
2169 d_security->off = cpu_to_le64(sbi->security.next_off);
2170 d_security->size = cpu_to_le32(new_sec_size);
2171 memcpy(d_security + 1, sd, size_sd);
2173 /* Write main SDS bucket. */
2174 err = ntfs_sb_write_run(sbi, &ni->file.run, sbi->security.next_off,
2175 d_security, aligned_sec_size, 0);
2180 mirr_off = sbi->security.next_off + SecurityDescriptorsBlockSize;
2181 new_sds_size = mirr_off + aligned_sec_size;
2183 if (new_sds_size > ni->vfs_inode.i_size) {
2184 err = attr_set_size(ni, ATTR_DATA, SDS_NAME,
2185 ARRAY_SIZE(SDS_NAME), &ni->file.run,
2186 new_sds_size, &new_sds_size, false, NULL);
2191 /* Write copy SDS bucket. */
2192 err = ntfs_sb_write_run(sbi, &ni->file.run, mirr_off, d_security,
2193 aligned_sec_size, 0);
2197 /* Fill SII entry. */
2198 sii_e.de.view.data_off =
2199 cpu_to_le16(offsetof(struct NTFS_DE_SII, sec_hdr));
2200 sii_e.de.view.data_size = cpu_to_le16(SIZEOF_SECURITY_HDR);
2201 sii_e.de.view.res = 0;
2202 sii_e.de.size = cpu_to_le16(SIZEOF_SII_DIRENTRY);
2203 sii_e.de.key_size = cpu_to_le16(sizeof(d_security->key.sec_id));
2206 sii_e.sec_id = d_security->key.sec_id;
2207 memcpy(&sii_e.sec_hdr, d_security, SIZEOF_SECURITY_HDR);
2209 err = indx_insert_entry(indx_sii, ni, &sii_e.de, NULL, NULL, 0);
2213 /* Fill SDH entry. */
2214 sdh_e.de.view.data_off =
2215 cpu_to_le16(offsetof(struct NTFS_DE_SDH, sec_hdr));
2216 sdh_e.de.view.data_size = cpu_to_le16(SIZEOF_SECURITY_HDR);
2217 sdh_e.de.view.res = 0;
2218 sdh_e.de.size = cpu_to_le16(SIZEOF_SDH_DIRENTRY);
2219 sdh_e.de.key_size = cpu_to_le16(sizeof(sdh_e.key));
2222 sdh_e.key.hash = d_security->key.hash;
2223 sdh_e.key.sec_id = d_security->key.sec_id;
2224 memcpy(&sdh_e.sec_hdr, d_security, SIZEOF_SECURITY_HDR);
2225 sdh_e.magic[0] = cpu_to_le16('I');
2226 sdh_e.magic[1] = cpu_to_le16('I');
2229 err = indx_insert_entry(indx_sdh, ni, &sdh_e.de, (void *)(size_t)1,
2234 *security_id = d_security->key.sec_id;
2238 /* Update Id and offset for next descriptor. */
2239 sbi->security.next_id += 1;
2240 sbi->security.next_off += aligned_sec_size;
2244 mark_inode_dirty(&ni->vfs_inode);
2252 * ntfs_reparse_init - Load and parse $Extend/$Reparse.
2254 int ntfs_reparse_init(struct ntfs_sb_info *sbi)
2257 struct ntfs_inode *ni = sbi->reparse.ni;
2258 struct ntfs_index *indx = &sbi->reparse.index_r;
2259 struct ATTRIB *attr;
2260 struct ATTR_LIST_ENTRY *le;
2261 const struct INDEX_ROOT *root_r;
2267 attr = ni_find_attr(ni, NULL, &le, ATTR_ROOT, SR_NAME,
2268 ARRAY_SIZE(SR_NAME), NULL, NULL);
2274 root_r = resident_data(attr);
2275 if (root_r->type != ATTR_ZERO ||
2276 root_r->rule != NTFS_COLLATION_TYPE_UINTS) {
2281 err = indx_init(indx, sbi, attr, INDEX_MUTEX_SR);
2290 * ntfs_objid_init - Load and parse $Extend/$ObjId.
2292 int ntfs_objid_init(struct ntfs_sb_info *sbi)
2295 struct ntfs_inode *ni = sbi->objid.ni;
2296 struct ntfs_index *indx = &sbi->objid.index_o;
2297 struct ATTRIB *attr;
2298 struct ATTR_LIST_ENTRY *le;
2299 const struct INDEX_ROOT *root;
2305 attr = ni_find_attr(ni, NULL, &le, ATTR_ROOT, SO_NAME,
2306 ARRAY_SIZE(SO_NAME), NULL, NULL);
2312 root = resident_data(attr);
2313 if (root->type != ATTR_ZERO ||
2314 root->rule != NTFS_COLLATION_TYPE_UINTS) {
2319 err = indx_init(indx, sbi, attr, INDEX_MUTEX_SO);
2327 int ntfs_objid_remove(struct ntfs_sb_info *sbi, struct GUID *guid)
2330 struct ntfs_inode *ni = sbi->objid.ni;
2331 struct ntfs_index *indx = &sbi->objid.index_o;
2336 mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_OBJID);
2338 err = indx_delete_entry(indx, ni, guid, sizeof(*guid), NULL);
2340 mark_inode_dirty(&ni->vfs_inode);
2346 int ntfs_insert_reparse(struct ntfs_sb_info *sbi, __le32 rtag,
2347 const struct MFT_REF *ref)
2350 struct ntfs_inode *ni = sbi->reparse.ni;
2351 struct ntfs_index *indx = &sbi->reparse.index_r;
2352 struct NTFS_DE_R re;
2357 memset(&re, 0, sizeof(re));
2359 re.de.view.data_off = cpu_to_le16(offsetof(struct NTFS_DE_R, zero));
2360 re.de.size = cpu_to_le16(sizeof(struct NTFS_DE_R));
2361 re.de.key_size = cpu_to_le16(sizeof(re.key));
2363 re.key.ReparseTag = rtag;
2364 memcpy(&re.key.ref, ref, sizeof(*ref));
2366 mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_REPARSE);
2368 err = indx_insert_entry(indx, ni, &re.de, NULL, NULL, 0);
2370 mark_inode_dirty(&ni->vfs_inode);
2376 int ntfs_remove_reparse(struct ntfs_sb_info *sbi, __le32 rtag,
2377 const struct MFT_REF *ref)
2380 struct ntfs_inode *ni = sbi->reparse.ni;
2381 struct ntfs_index *indx = &sbi->reparse.index_r;
2382 struct ntfs_fnd *fnd = NULL;
2383 struct REPARSE_KEY rkey;
2384 struct NTFS_DE_R *re;
2385 struct INDEX_ROOT *root_r;
2390 rkey.ReparseTag = rtag;
2393 mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_REPARSE);
2396 err = indx_delete_entry(indx, ni, &rkey, sizeof(rkey), NULL);
2406 root_r = indx_get_root(indx, ni, NULL, NULL);
2412 /* 1 - forces to ignore rkey.ReparseTag when comparing keys. */
2413 err = indx_find(indx, ni, root_r, &rkey, sizeof(rkey), (void *)1, &diff,
2414 (struct NTFS_DE **)&re, fnd);
2418 if (memcmp(&re->key.ref, ref, sizeof(*ref))) {
2419 /* Impossible. Looks like volume corrupt? */
2423 memcpy(&rkey, &re->key, sizeof(rkey));
2428 err = indx_delete_entry(indx, ni, &rkey, sizeof(rkey), NULL);
2436 mark_inode_dirty(&ni->vfs_inode);
2442 static inline void ntfs_unmap_and_discard(struct ntfs_sb_info *sbi, CLST lcn,
2445 ntfs_unmap_meta(sbi->sb, lcn, len);
2446 ntfs_discard(sbi, lcn, len);
2449 void mark_as_free_ex(struct ntfs_sb_info *sbi, CLST lcn, CLST len, bool trim)
2451 CLST end, i, zone_len, zlen;
2452 struct wnd_bitmap *wnd = &sbi->used.bitmap;
2455 down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
2456 if (!wnd_is_used(wnd, lcn, len)) {
2457 /* mark volume as dirty out of wnd->rw_lock */
2462 for (i = lcn; i < end; i++) {
2463 if (wnd_is_used(wnd, i, 1)) {
2474 ntfs_unmap_and_discard(sbi, lcn, len);
2476 wnd_set_free(wnd, lcn, len);
2485 ntfs_unmap_and_discard(sbi, lcn, len);
2486 wnd_set_free(wnd, lcn, len);
2488 /* append to MFT zone, if possible. */
2489 zone_len = wnd_zone_len(wnd);
2490 zlen = min(zone_len + len, sbi->zone_max);
2492 if (zlen == zone_len) {
2493 /* MFT zone already has maximum size. */
2494 } else if (!zone_len) {
2495 /* Create MFT zone only if 'zlen' is large enough. */
2496 if (zlen == sbi->zone_max)
2497 wnd_zone_set(wnd, lcn, zlen);
2499 CLST zone_lcn = wnd_zone_bit(wnd);
2501 if (lcn + len == zone_lcn) {
2502 /* Append into head MFT zone. */
2503 wnd_zone_set(wnd, lcn, zlen);
2504 } else if (zone_lcn + zone_len == lcn) {
2505 /* Append into tail MFT zone. */
2506 wnd_zone_set(wnd, zone_lcn, zlen);
2511 up_write(&wnd->rw_lock);
2513 ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
2517 * run_deallocate - Deallocate clusters.
2519 int run_deallocate(struct ntfs_sb_info *sbi, struct runs_tree *run, bool trim)
2524 while (run_get_entry(run, idx++, NULL, &lcn, &len)) {
2525 if (lcn == SPARSE_LCN)
2528 mark_as_free_ex(sbi, lcn, len, trim);