5 * Block allocation handling routines for the OSTA-UDF(tm) filesystem.
8 * This file is distributed under the terms of the GNU General Public
9 * License (GPL). Copies of the GPL can be obtained from:
10 * ftp://prep.ai.mit.edu/pub/gnu/GPL
11 * Each contributing author retains all rights to their own work.
13 * (C) 1999-2001 Ben Fennema
14 * (C) 1999 Stelias Computing Inc
18 * 02/24/99 blf Created.
24 #include <linux/bitops.h>
29 #define udf_clear_bit __test_and_clear_bit_le
30 #define udf_set_bit __test_and_set_bit_le
31 #define udf_test_bit test_bit_le
32 #define udf_find_next_one_bit find_next_bit_le
34 static int read_block_bitmap(struct super_block *sb,
35 struct udf_bitmap *bitmap, unsigned int block,
36 unsigned long bitmap_nr)
38 struct buffer_head *bh = NULL;
40 int max_bits, off, count;
41 struct kernel_lb_addr loc;
43 loc.logicalBlockNum = bitmap->s_extPosition;
44 loc.partitionReferenceNum = UDF_SB(sb)->s_partition;
46 bh = udf_tread(sb, udf_get_lb_pblock(sb, &loc, block));
47 bitmap->s_block_bitmap[bitmap_nr] = bh;
51 /* Check consistency of Space Bitmap buffer. */
52 max_bits = sb->s_blocksize * 8;
54 off = sizeof(struct spaceBitmapDesc) << 3;
55 count = min(max_bits - off, bitmap->s_nr_groups);
58 * Rough check if bitmap number is too big to have any bitmap
62 (bitmap->s_nr_groups >> (sb->s_blocksize_bits + 3)) + 2)
65 count = bitmap->s_nr_groups - bitmap_nr * max_bits +
66 (sizeof(struct spaceBitmapDesc) << 3);
67 count = min(count, max_bits);
70 for (i = 0; i < count; i++)
71 if (udf_test_bit(i + off, bh->b_data))
76 static int __load_block_bitmap(struct super_block *sb,
77 struct udf_bitmap *bitmap,
78 unsigned int block_group)
81 int nr_groups = bitmap->s_nr_groups;
83 if (block_group >= nr_groups) {
84 udf_debug("block_group (%u) > nr_groups (%d)\n",
85 block_group, nr_groups);
88 if (bitmap->s_block_bitmap[block_group])
91 retval = read_block_bitmap(sb, bitmap, block_group, block_group);
98 static inline int load_block_bitmap(struct super_block *sb,
99 struct udf_bitmap *bitmap,
100 unsigned int block_group)
104 slot = __load_block_bitmap(sb, bitmap, block_group);
109 if (!bitmap->s_block_bitmap[slot])
115 static void udf_add_free_space(struct super_block *sb, u16 partition, u32 cnt)
117 struct udf_sb_info *sbi = UDF_SB(sb);
118 struct logicalVolIntegrityDesc *lvid;
123 lvid = (struct logicalVolIntegrityDesc *)sbi->s_lvid_bh->b_data;
124 le32_add_cpu(&lvid->freeSpaceTable[partition], cnt);
125 udf_updated_lvid(sb);
128 static void udf_bitmap_free_blocks(struct super_block *sb,
129 struct udf_bitmap *bitmap,
130 struct kernel_lb_addr *bloc,
134 struct udf_sb_info *sbi = UDF_SB(sb);
135 struct buffer_head *bh = NULL;
136 struct udf_part_map *partmap;
138 unsigned long block_group;
142 unsigned long overflow;
144 mutex_lock(&sbi->s_alloc_mutex);
145 partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
146 if (bloc->logicalBlockNum + count < count ||
147 (bloc->logicalBlockNum + count) > partmap->s_partition_len) {
148 udf_debug("%u < %d || %u + %u > %u\n",
149 bloc->logicalBlockNum, 0,
150 bloc->logicalBlockNum, count,
151 partmap->s_partition_len);
155 block = bloc->logicalBlockNum + offset +
156 (sizeof(struct spaceBitmapDesc) << 3);
160 block_group = block >> (sb->s_blocksize_bits + 3);
161 bit = block % (sb->s_blocksize << 3);
164 * Check to see if we are freeing blocks across a group boundary.
166 if (bit + count > (sb->s_blocksize << 3)) {
167 overflow = bit + count - (sb->s_blocksize << 3);
170 bitmap_nr = load_block_bitmap(sb, bitmap, block_group);
174 bh = bitmap->s_block_bitmap[bitmap_nr];
175 for (i = 0; i < count; i++) {
176 if (udf_set_bit(bit + i, bh->b_data)) {
177 udf_debug("bit %lu already set\n", bit + i);
178 udf_debug("byte=%2x\n",
179 ((__u8 *)bh->b_data)[(bit + i) >> 3]);
182 udf_add_free_space(sb, sbi->s_partition, count);
183 mark_buffer_dirty(bh);
191 mutex_unlock(&sbi->s_alloc_mutex);
194 static int udf_bitmap_prealloc_blocks(struct super_block *sb,
195 struct udf_bitmap *bitmap,
196 uint16_t partition, uint32_t first_block,
197 uint32_t block_count)
199 struct udf_sb_info *sbi = UDF_SB(sb);
201 int bit, block, block_group;
203 struct buffer_head *bh;
206 mutex_lock(&sbi->s_alloc_mutex);
207 part_len = sbi->s_partmaps[partition].s_partition_len;
208 if (first_block >= part_len)
211 if (first_block + block_count > part_len)
212 block_count = part_len - first_block;
215 block = first_block + (sizeof(struct spaceBitmapDesc) << 3);
216 block_group = block >> (sb->s_blocksize_bits + 3);
218 bitmap_nr = load_block_bitmap(sb, bitmap, block_group);
221 bh = bitmap->s_block_bitmap[bitmap_nr];
223 bit = block % (sb->s_blocksize << 3);
225 while (bit < (sb->s_blocksize << 3) && block_count > 0) {
226 if (!udf_clear_bit(bit, bh->b_data))
233 mark_buffer_dirty(bh);
234 } while (block_count > 0);
237 udf_add_free_space(sb, partition, -alloc_count);
238 mutex_unlock(&sbi->s_alloc_mutex);
242 static udf_pblk_t udf_bitmap_new_block(struct super_block *sb,
243 struct udf_bitmap *bitmap, uint16_t partition,
244 uint32_t goal, int *err)
246 struct udf_sb_info *sbi = UDF_SB(sb);
249 int block_group, group_start;
250 int end_goal, nr_groups, bitmap_nr, i;
251 struct buffer_head *bh = NULL;
253 udf_pblk_t newblock = 0;
256 mutex_lock(&sbi->s_alloc_mutex);
259 if (goal >= sbi->s_partmaps[partition].s_partition_len)
262 nr_groups = bitmap->s_nr_groups;
263 block = goal + (sizeof(struct spaceBitmapDesc) << 3);
264 block_group = block >> (sb->s_blocksize_bits + 3);
265 group_start = block_group ? 0 : sizeof(struct spaceBitmapDesc);
267 bitmap_nr = load_block_bitmap(sb, bitmap, block_group);
270 bh = bitmap->s_block_bitmap[bitmap_nr];
271 ptr = memscan((char *)bh->b_data + group_start, 0xFF,
272 sb->s_blocksize - group_start);
274 if ((ptr - ((char *)bh->b_data)) < sb->s_blocksize) {
275 bit = block % (sb->s_blocksize << 3);
276 if (udf_test_bit(bit, bh->b_data))
279 end_goal = (bit + 63) & ~63;
280 bit = udf_find_next_one_bit(bh->b_data, end_goal, bit);
284 ptr = memscan((char *)bh->b_data + (bit >> 3), 0xFF,
285 sb->s_blocksize - ((bit + 7) >> 3));
286 newbit = (ptr - ((char *)bh->b_data)) << 3;
287 if (newbit < sb->s_blocksize << 3) {
292 newbit = udf_find_next_one_bit(bh->b_data,
293 sb->s_blocksize << 3, bit);
294 if (newbit < sb->s_blocksize << 3) {
300 for (i = 0; i < (nr_groups * 2); i++) {
302 if (block_group >= nr_groups)
304 group_start = block_group ? 0 : sizeof(struct spaceBitmapDesc);
306 bitmap_nr = load_block_bitmap(sb, bitmap, block_group);
309 bh = bitmap->s_block_bitmap[bitmap_nr];
311 ptr = memscan((char *)bh->b_data + group_start, 0xFF,
312 sb->s_blocksize - group_start);
313 if ((ptr - ((char *)bh->b_data)) < sb->s_blocksize) {
314 bit = (ptr - ((char *)bh->b_data)) << 3;
318 bit = udf_find_next_one_bit(bh->b_data,
319 sb->s_blocksize << 3,
321 if (bit < sb->s_blocksize << 3)
325 if (i >= (nr_groups * 2)) {
326 mutex_unlock(&sbi->s_alloc_mutex);
329 if (bit < sb->s_blocksize << 3)
332 bit = udf_find_next_one_bit(bh->b_data, sb->s_blocksize << 3,
334 if (bit >= sb->s_blocksize << 3) {
335 mutex_unlock(&sbi->s_alloc_mutex);
341 while (i < 7 && bit > (group_start << 3) &&
342 udf_test_bit(bit - 1, bh->b_data)) {
348 newblock = bit + (block_group << (sb->s_blocksize_bits + 3)) -
349 (sizeof(struct spaceBitmapDesc) << 3);
351 if (newblock >= sbi->s_partmaps[partition].s_partition_len) {
353 * Ran off the end of the bitmap, and bits following are
354 * non-compliant (not all zero)
356 udf_err(sb, "bitmap for partition %d corrupted (block %u marked"
357 " as free, partition length is %u)\n", partition,
358 newblock, sbi->s_partmaps[partition].s_partition_len);
362 if (!udf_clear_bit(bit, bh->b_data)) {
363 udf_debug("bit already cleared for block %d\n", bit);
367 mark_buffer_dirty(bh);
369 udf_add_free_space(sb, partition, -1);
370 mutex_unlock(&sbi->s_alloc_mutex);
376 mutex_unlock(&sbi->s_alloc_mutex);
380 static void udf_table_free_blocks(struct super_block *sb,
382 struct kernel_lb_addr *bloc,
386 struct udf_sb_info *sbi = UDF_SB(sb);
387 struct udf_part_map *partmap;
390 struct kernel_lb_addr eloc;
391 struct extent_position oepos, epos;
393 struct udf_inode_info *iinfo;
395 mutex_lock(&sbi->s_alloc_mutex);
396 partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
397 if (bloc->logicalBlockNum + count < count ||
398 (bloc->logicalBlockNum + count) > partmap->s_partition_len) {
399 udf_debug("%u < %d || %u + %u > %u\n",
400 bloc->logicalBlockNum, 0,
401 bloc->logicalBlockNum, count,
402 partmap->s_partition_len);
406 iinfo = UDF_I(table);
407 udf_add_free_space(sb, sbi->s_partition, count);
409 start = bloc->logicalBlockNum + offset;
410 end = bloc->logicalBlockNum + offset + count - 1;
412 epos.offset = oepos.offset = sizeof(struct unallocSpaceEntry);
414 epos.block = oepos.block = iinfo->i_location;
415 epos.bh = oepos.bh = NULL;
418 (etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) {
419 if (((eloc.logicalBlockNum +
420 (elen >> sb->s_blocksize_bits)) == start)) {
421 if ((0x3FFFFFFF - elen) <
422 (count << sb->s_blocksize_bits)) {
423 uint32_t tmp = ((0x3FFFFFFF - elen) >>
424 sb->s_blocksize_bits);
427 elen = (etype << 30) |
428 (0x40000000 - sb->s_blocksize);
430 elen = (etype << 30) |
432 (count << sb->s_blocksize_bits));
436 udf_write_aext(table, &oepos, &eloc, elen, 1);
437 } else if (eloc.logicalBlockNum == (end + 1)) {
438 if ((0x3FFFFFFF - elen) <
439 (count << sb->s_blocksize_bits)) {
440 uint32_t tmp = ((0x3FFFFFFF - elen) >>
441 sb->s_blocksize_bits);
444 eloc.logicalBlockNum -= tmp;
445 elen = (etype << 30) |
446 (0x40000000 - sb->s_blocksize);
448 eloc.logicalBlockNum = start;
449 elen = (etype << 30) |
451 (count << sb->s_blocksize_bits));
455 udf_write_aext(table, &oepos, &eloc, elen, 1);
458 if (epos.bh != oepos.bh) {
459 oepos.block = epos.block;
465 oepos.offset = epos.offset;
471 * NOTE: we CANNOT use udf_add_aext here, as it can try to
472 * allocate a new block, and since we hold the super block
473 * lock already very bad things would happen :)
475 * We copy the behavior of udf_add_aext, but instead of
476 * trying to allocate a new block close to the existing one,
477 * we just steal a block from the extent we are trying to add.
479 * It would be nice if the blocks were close together, but it
485 eloc.logicalBlockNum = start;
486 elen = EXT_RECORDED_ALLOCATED |
487 (count << sb->s_blocksize_bits);
489 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
490 adsize = sizeof(struct short_ad);
491 else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
492 adsize = sizeof(struct long_ad);
499 if (epos.offset + (2 * adsize) > sb->s_blocksize) {
500 /* Steal a block from the extent being free'd */
501 udf_setup_indirect_aext(table, eloc.logicalBlockNum,
504 eloc.logicalBlockNum++;
505 elen -= sb->s_blocksize;
508 /* It's possible that stealing the block emptied the extent */
510 __udf_add_aext(table, &epos, &eloc, elen, 1);
517 mutex_unlock(&sbi->s_alloc_mutex);
521 static int udf_table_prealloc_blocks(struct super_block *sb,
522 struct inode *table, uint16_t partition,
523 uint32_t first_block, uint32_t block_count)
525 struct udf_sb_info *sbi = UDF_SB(sb);
527 uint32_t elen, adsize;
528 struct kernel_lb_addr eloc;
529 struct extent_position epos;
531 struct udf_inode_info *iinfo;
533 if (first_block >= sbi->s_partmaps[partition].s_partition_len)
536 iinfo = UDF_I(table);
537 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
538 adsize = sizeof(struct short_ad);
539 else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
540 adsize = sizeof(struct long_ad);
544 mutex_lock(&sbi->s_alloc_mutex);
545 epos.offset = sizeof(struct unallocSpaceEntry);
546 epos.block = iinfo->i_location;
548 eloc.logicalBlockNum = 0xFFFFFFFF;
550 while (first_block != eloc.logicalBlockNum &&
551 (etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) {
552 udf_debug("eloc=%u, elen=%u, first_block=%u\n",
553 eloc.logicalBlockNum, elen, first_block);
554 ; /* empty loop body */
557 if (first_block == eloc.logicalBlockNum) {
558 epos.offset -= adsize;
560 alloc_count = (elen >> sb->s_blocksize_bits);
561 if (alloc_count > block_count) {
562 alloc_count = block_count;
563 eloc.logicalBlockNum += alloc_count;
564 elen -= (alloc_count << sb->s_blocksize_bits);
565 udf_write_aext(table, &epos, &eloc,
566 (etype << 30) | elen, 1);
568 udf_delete_aext(table, epos);
576 udf_add_free_space(sb, partition, -alloc_count);
577 mutex_unlock(&sbi->s_alloc_mutex);
581 static udf_pblk_t udf_table_new_block(struct super_block *sb,
582 struct inode *table, uint16_t partition,
583 uint32_t goal, int *err)
585 struct udf_sb_info *sbi = UDF_SB(sb);
586 uint32_t spread = 0xFFFFFFFF, nspread = 0xFFFFFFFF;
587 udf_pblk_t newblock = 0;
589 uint32_t elen, goal_elen = 0;
590 struct kernel_lb_addr eloc, goal_eloc;
591 struct extent_position epos, goal_epos;
593 struct udf_inode_info *iinfo = UDF_I(table);
597 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
598 adsize = sizeof(struct short_ad);
599 else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
600 adsize = sizeof(struct long_ad);
604 mutex_lock(&sbi->s_alloc_mutex);
605 if (goal >= sbi->s_partmaps[partition].s_partition_len)
608 /* We search for the closest matching block to goal. If we find
609 a exact hit, we stop. Otherwise we keep going till we run out
610 of extents. We store the buffer_head, bloc, and extoffset
611 of the current closest match and use that when we are done.
613 epos.offset = sizeof(struct unallocSpaceEntry);
614 epos.block = iinfo->i_location;
615 epos.bh = goal_epos.bh = NULL;
618 (etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) {
619 if (goal >= eloc.logicalBlockNum) {
620 if (goal < eloc.logicalBlockNum +
621 (elen >> sb->s_blocksize_bits))
624 nspread = goal - eloc.logicalBlockNum -
625 (elen >> sb->s_blocksize_bits);
627 nspread = eloc.logicalBlockNum - goal;
630 if (nspread < spread) {
632 if (goal_epos.bh != epos.bh) {
633 brelse(goal_epos.bh);
634 goal_epos.bh = epos.bh;
635 get_bh(goal_epos.bh);
637 goal_epos.block = epos.block;
638 goal_epos.offset = epos.offset - adsize;
640 goal_elen = (etype << 30) | elen;
646 if (spread == 0xFFFFFFFF) {
647 brelse(goal_epos.bh);
648 mutex_unlock(&sbi->s_alloc_mutex);
652 /* Only allocate blocks from the beginning of the extent.
653 That way, we only delete (empty) extents, never have to insert an
654 extent because of splitting */
655 /* This works, but very poorly.... */
657 newblock = goal_eloc.logicalBlockNum;
658 goal_eloc.logicalBlockNum++;
659 goal_elen -= sb->s_blocksize;
662 udf_write_aext(table, &goal_epos, &goal_eloc, goal_elen, 1);
664 udf_delete_aext(table, goal_epos);
665 brelse(goal_epos.bh);
667 udf_add_free_space(sb, partition, -1);
669 mutex_unlock(&sbi->s_alloc_mutex);
674 void udf_free_blocks(struct super_block *sb, struct inode *inode,
675 struct kernel_lb_addr *bloc, uint32_t offset,
678 uint16_t partition = bloc->partitionReferenceNum;
679 struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition];
681 if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP) {
682 udf_bitmap_free_blocks(sb, map->s_uspace.s_bitmap,
683 bloc, offset, count);
684 } else if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE) {
685 udf_table_free_blocks(sb, map->s_uspace.s_table,
686 bloc, offset, count);
690 inode_sub_bytes(inode,
691 ((sector_t)count) << sb->s_blocksize_bits);
695 inline int udf_prealloc_blocks(struct super_block *sb,
697 uint16_t partition, uint32_t first_block,
698 uint32_t block_count)
700 struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition];
703 if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP)
704 allocated = udf_bitmap_prealloc_blocks(sb,
705 map->s_uspace.s_bitmap,
706 partition, first_block,
708 else if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE)
709 allocated = udf_table_prealloc_blocks(sb,
710 map->s_uspace.s_table,
711 partition, first_block,
716 if (inode && allocated > 0)
717 inode_add_bytes(inode, allocated << sb->s_blocksize_bits);
721 inline udf_pblk_t udf_new_block(struct super_block *sb,
723 uint16_t partition, uint32_t goal, int *err)
725 struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition];
728 if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP)
729 block = udf_bitmap_new_block(sb,
730 map->s_uspace.s_bitmap,
731 partition, goal, err);
732 else if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE)
733 block = udf_table_new_block(sb,
734 map->s_uspace.s_table,
735 partition, goal, err);
741 inode_add_bytes(inode, sb->s_blocksize);