5 * Block allocation handling routines for the OSTA-UDF(tm) filesystem.
8 * This file is distributed under the terms of the GNU General Public
9 * License (GPL). Copies of the GPL can be obtained from:
10 * ftp://prep.ai.mit.edu/pub/gnu/GPL
11 * Each contributing author retains all rights to their own work.
13 * (C) 1999-2001 Ben Fennema
14 * (C) 1999 Stelias Computing Inc
18 * 02/24/99 blf Created.
24 #include <linux/bitops.h>
29 #define udf_clear_bit __test_and_clear_bit_le
30 #define udf_set_bit __test_and_set_bit_le
31 #define udf_test_bit test_bit_le
32 #define udf_find_next_one_bit find_next_bit_le
34 static int read_block_bitmap(struct super_block *sb,
35 struct udf_bitmap *bitmap, unsigned int block,
36 unsigned long bitmap_nr)
38 struct buffer_head *bh = NULL;
40 int max_bits, off, count;
41 struct kernel_lb_addr loc;
43 loc.logicalBlockNum = bitmap->s_extPosition;
44 loc.partitionReferenceNum = UDF_SB(sb)->s_partition;
46 bh = udf_tread(sb, udf_get_lb_pblock(sb, &loc, block));
47 bitmap->s_block_bitmap[bitmap_nr] = bh;
51 /* Check consistency of Space Bitmap buffer. */
52 max_bits = sb->s_blocksize * 8;
54 off = sizeof(struct spaceBitmapDesc) << 3;
55 count = min(max_bits - off, bitmap->s_nr_groups);
58 * Rough check if bitmap number is too big to have any bitmap
62 (bitmap->s_nr_groups >> (sb->s_blocksize_bits + 3)) + 2)
65 count = bitmap->s_nr_groups - bitmap_nr * max_bits +
66 (sizeof(struct spaceBitmapDesc) << 3);
67 count = min(count, max_bits);
70 for (i = 0; i < count; i++)
71 if (udf_test_bit(i + off, bh->b_data))
76 static int __load_block_bitmap(struct super_block *sb,
77 struct udf_bitmap *bitmap,
78 unsigned int block_group)
81 int nr_groups = bitmap->s_nr_groups;
83 if (block_group >= nr_groups) {
84 udf_debug("block_group (%u) > nr_groups (%d)\n",
85 block_group, nr_groups);
88 if (bitmap->s_block_bitmap[block_group])
91 retval = read_block_bitmap(sb, bitmap, block_group, block_group);
98 static inline int load_block_bitmap(struct super_block *sb,
99 struct udf_bitmap *bitmap,
100 unsigned int block_group)
104 slot = __load_block_bitmap(sb, bitmap, block_group);
109 if (!bitmap->s_block_bitmap[slot])
115 static void udf_add_free_space(struct super_block *sb, u16 partition, u32 cnt)
117 struct udf_sb_info *sbi = UDF_SB(sb);
118 struct logicalVolIntegrityDesc *lvid;
123 lvid = (struct logicalVolIntegrityDesc *)sbi->s_lvid_bh->b_data;
124 le32_add_cpu(&lvid->freeSpaceTable[partition], cnt);
125 udf_updated_lvid(sb);
128 static void udf_bitmap_free_blocks(struct super_block *sb,
129 struct udf_bitmap *bitmap,
130 struct kernel_lb_addr *bloc,
134 struct udf_sb_info *sbi = UDF_SB(sb);
135 struct buffer_head *bh = NULL;
136 struct udf_part_map *partmap;
138 unsigned long block_group;
142 unsigned long overflow;
144 mutex_lock(&sbi->s_alloc_mutex);
145 partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
146 if (bloc->logicalBlockNum + count < count ||
147 (bloc->logicalBlockNum + count) > partmap->s_partition_len) {
148 udf_debug("%u < %d || %u + %u > %u\n",
149 bloc->logicalBlockNum, 0,
150 bloc->logicalBlockNum, count,
151 partmap->s_partition_len);
155 block = bloc->logicalBlockNum + offset +
156 (sizeof(struct spaceBitmapDesc) << 3);
160 block_group = block >> (sb->s_blocksize_bits + 3);
161 bit = block % (sb->s_blocksize << 3);
164 * Check to see if we are freeing blocks across a group boundary.
166 if (bit + count > (sb->s_blocksize << 3)) {
167 overflow = bit + count - (sb->s_blocksize << 3);
170 bitmap_nr = load_block_bitmap(sb, bitmap, block_group);
174 bh = bitmap->s_block_bitmap[bitmap_nr];
175 for (i = 0; i < count; i++) {
176 if (udf_set_bit(bit + i, bh->b_data)) {
177 udf_debug("bit %lu already set\n", bit + i);
178 udf_debug("byte=%2x\n",
179 ((__u8 *)bh->b_data)[(bit + i) >> 3]);
182 udf_add_free_space(sb, sbi->s_partition, count);
183 mark_buffer_dirty(bh);
191 mutex_unlock(&sbi->s_alloc_mutex);
194 static int udf_bitmap_prealloc_blocks(struct super_block *sb,
195 struct udf_bitmap *bitmap,
196 uint16_t partition, uint32_t first_block,
197 uint32_t block_count)
199 struct udf_sb_info *sbi = UDF_SB(sb);
201 int bit, block, block_group, group_start;
202 int nr_groups, bitmap_nr;
203 struct buffer_head *bh;
206 mutex_lock(&sbi->s_alloc_mutex);
207 part_len = sbi->s_partmaps[partition].s_partition_len;
208 if (first_block >= part_len)
211 if (first_block + block_count > part_len)
212 block_count = part_len - first_block;
215 nr_groups = udf_compute_nr_groups(sb, partition);
216 block = first_block + (sizeof(struct spaceBitmapDesc) << 3);
217 block_group = block >> (sb->s_blocksize_bits + 3);
218 group_start = block_group ? 0 : sizeof(struct spaceBitmapDesc);
220 bitmap_nr = load_block_bitmap(sb, bitmap, block_group);
223 bh = bitmap->s_block_bitmap[bitmap_nr];
225 bit = block % (sb->s_blocksize << 3);
227 while (bit < (sb->s_blocksize << 3) && block_count > 0) {
228 if (!udf_clear_bit(bit, bh->b_data))
235 mark_buffer_dirty(bh);
236 } while (block_count > 0);
239 udf_add_free_space(sb, partition, -alloc_count);
240 mutex_unlock(&sbi->s_alloc_mutex);
244 static udf_pblk_t udf_bitmap_new_block(struct super_block *sb,
245 struct udf_bitmap *bitmap, uint16_t partition,
246 uint32_t goal, int *err)
248 struct udf_sb_info *sbi = UDF_SB(sb);
251 int block_group, group_start;
252 int end_goal, nr_groups, bitmap_nr, i;
253 struct buffer_head *bh = NULL;
255 udf_pblk_t newblock = 0;
258 mutex_lock(&sbi->s_alloc_mutex);
261 if (goal >= sbi->s_partmaps[partition].s_partition_len)
264 nr_groups = bitmap->s_nr_groups;
265 block = goal + (sizeof(struct spaceBitmapDesc) << 3);
266 block_group = block >> (sb->s_blocksize_bits + 3);
267 group_start = block_group ? 0 : sizeof(struct spaceBitmapDesc);
269 bitmap_nr = load_block_bitmap(sb, bitmap, block_group);
272 bh = bitmap->s_block_bitmap[bitmap_nr];
273 ptr = memscan((char *)bh->b_data + group_start, 0xFF,
274 sb->s_blocksize - group_start);
276 if ((ptr - ((char *)bh->b_data)) < sb->s_blocksize) {
277 bit = block % (sb->s_blocksize << 3);
278 if (udf_test_bit(bit, bh->b_data))
281 end_goal = (bit + 63) & ~63;
282 bit = udf_find_next_one_bit(bh->b_data, end_goal, bit);
286 ptr = memscan((char *)bh->b_data + (bit >> 3), 0xFF,
287 sb->s_blocksize - ((bit + 7) >> 3));
288 newbit = (ptr - ((char *)bh->b_data)) << 3;
289 if (newbit < sb->s_blocksize << 3) {
294 newbit = udf_find_next_one_bit(bh->b_data,
295 sb->s_blocksize << 3, bit);
296 if (newbit < sb->s_blocksize << 3) {
302 for (i = 0; i < (nr_groups * 2); i++) {
304 if (block_group >= nr_groups)
306 group_start = block_group ? 0 : sizeof(struct spaceBitmapDesc);
308 bitmap_nr = load_block_bitmap(sb, bitmap, block_group);
311 bh = bitmap->s_block_bitmap[bitmap_nr];
313 ptr = memscan((char *)bh->b_data + group_start, 0xFF,
314 sb->s_blocksize - group_start);
315 if ((ptr - ((char *)bh->b_data)) < sb->s_blocksize) {
316 bit = (ptr - ((char *)bh->b_data)) << 3;
320 bit = udf_find_next_one_bit(bh->b_data,
321 sb->s_blocksize << 3,
323 if (bit < sb->s_blocksize << 3)
327 if (i >= (nr_groups * 2)) {
328 mutex_unlock(&sbi->s_alloc_mutex);
331 if (bit < sb->s_blocksize << 3)
334 bit = udf_find_next_one_bit(bh->b_data, sb->s_blocksize << 3,
336 if (bit >= sb->s_blocksize << 3) {
337 mutex_unlock(&sbi->s_alloc_mutex);
343 while (i < 7 && bit > (group_start << 3) &&
344 udf_test_bit(bit - 1, bh->b_data)) {
350 newblock = bit + (block_group << (sb->s_blocksize_bits + 3)) -
351 (sizeof(struct spaceBitmapDesc) << 3);
353 if (!udf_clear_bit(bit, bh->b_data)) {
354 udf_debug("bit already cleared for block %d\n", bit);
358 mark_buffer_dirty(bh);
360 udf_add_free_space(sb, partition, -1);
361 mutex_unlock(&sbi->s_alloc_mutex);
367 mutex_unlock(&sbi->s_alloc_mutex);
371 static void udf_table_free_blocks(struct super_block *sb,
373 struct kernel_lb_addr *bloc,
377 struct udf_sb_info *sbi = UDF_SB(sb);
378 struct udf_part_map *partmap;
381 struct kernel_lb_addr eloc;
382 struct extent_position oepos, epos;
384 struct udf_inode_info *iinfo;
386 mutex_lock(&sbi->s_alloc_mutex);
387 partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
388 if (bloc->logicalBlockNum + count < count ||
389 (bloc->logicalBlockNum + count) > partmap->s_partition_len) {
390 udf_debug("%u < %d || %u + %u > %u\n",
391 bloc->logicalBlockNum, 0,
392 bloc->logicalBlockNum, count,
393 partmap->s_partition_len);
397 iinfo = UDF_I(table);
398 udf_add_free_space(sb, sbi->s_partition, count);
400 start = bloc->logicalBlockNum + offset;
401 end = bloc->logicalBlockNum + offset + count - 1;
403 epos.offset = oepos.offset = sizeof(struct unallocSpaceEntry);
405 epos.block = oepos.block = iinfo->i_location;
406 epos.bh = oepos.bh = NULL;
409 (etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) {
410 if (((eloc.logicalBlockNum +
411 (elen >> sb->s_blocksize_bits)) == start)) {
412 if ((0x3FFFFFFF - elen) <
413 (count << sb->s_blocksize_bits)) {
414 uint32_t tmp = ((0x3FFFFFFF - elen) >>
415 sb->s_blocksize_bits);
418 elen = (etype << 30) |
419 (0x40000000 - sb->s_blocksize);
421 elen = (etype << 30) |
423 (count << sb->s_blocksize_bits));
427 udf_write_aext(table, &oepos, &eloc, elen, 1);
428 } else if (eloc.logicalBlockNum == (end + 1)) {
429 if ((0x3FFFFFFF - elen) <
430 (count << sb->s_blocksize_bits)) {
431 uint32_t tmp = ((0x3FFFFFFF - elen) >>
432 sb->s_blocksize_bits);
435 eloc.logicalBlockNum -= tmp;
436 elen = (etype << 30) |
437 (0x40000000 - sb->s_blocksize);
439 eloc.logicalBlockNum = start;
440 elen = (etype << 30) |
442 (count << sb->s_blocksize_bits));
446 udf_write_aext(table, &oepos, &eloc, elen, 1);
449 if (epos.bh != oepos.bh) {
450 oepos.block = epos.block;
456 oepos.offset = epos.offset;
462 * NOTE: we CANNOT use udf_add_aext here, as it can try to
463 * allocate a new block, and since we hold the super block
464 * lock already very bad things would happen :)
466 * We copy the behavior of udf_add_aext, but instead of
467 * trying to allocate a new block close to the existing one,
468 * we just steal a block from the extent we are trying to add.
470 * It would be nice if the blocks were close together, but it
476 eloc.logicalBlockNum = start;
477 elen = EXT_RECORDED_ALLOCATED |
478 (count << sb->s_blocksize_bits);
480 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
481 adsize = sizeof(struct short_ad);
482 else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
483 adsize = sizeof(struct long_ad);
490 if (epos.offset + (2 * adsize) > sb->s_blocksize) {
491 /* Steal a block from the extent being free'd */
492 udf_setup_indirect_aext(table, eloc.logicalBlockNum,
495 eloc.logicalBlockNum++;
496 elen -= sb->s_blocksize;
499 /* It's possible that stealing the block emptied the extent */
501 __udf_add_aext(table, &epos, &eloc, elen, 1);
508 mutex_unlock(&sbi->s_alloc_mutex);
512 static int udf_table_prealloc_blocks(struct super_block *sb,
513 struct inode *table, uint16_t partition,
514 uint32_t first_block, uint32_t block_count)
516 struct udf_sb_info *sbi = UDF_SB(sb);
518 uint32_t elen, adsize;
519 struct kernel_lb_addr eloc;
520 struct extent_position epos;
522 struct udf_inode_info *iinfo;
524 if (first_block >= sbi->s_partmaps[partition].s_partition_len)
527 iinfo = UDF_I(table);
528 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
529 adsize = sizeof(struct short_ad);
530 else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
531 adsize = sizeof(struct long_ad);
535 mutex_lock(&sbi->s_alloc_mutex);
536 epos.offset = sizeof(struct unallocSpaceEntry);
537 epos.block = iinfo->i_location;
539 eloc.logicalBlockNum = 0xFFFFFFFF;
541 while (first_block != eloc.logicalBlockNum &&
542 (etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) {
543 udf_debug("eloc=%u, elen=%u, first_block=%u\n",
544 eloc.logicalBlockNum, elen, first_block);
545 ; /* empty loop body */
548 if (first_block == eloc.logicalBlockNum) {
549 epos.offset -= adsize;
551 alloc_count = (elen >> sb->s_blocksize_bits);
552 if (alloc_count > block_count) {
553 alloc_count = block_count;
554 eloc.logicalBlockNum += alloc_count;
555 elen -= (alloc_count << sb->s_blocksize_bits);
556 udf_write_aext(table, &epos, &eloc,
557 (etype << 30) | elen, 1);
559 udf_delete_aext(table, epos);
567 udf_add_free_space(sb, partition, -alloc_count);
568 mutex_unlock(&sbi->s_alloc_mutex);
572 static udf_pblk_t udf_table_new_block(struct super_block *sb,
573 struct inode *table, uint16_t partition,
574 uint32_t goal, int *err)
576 struct udf_sb_info *sbi = UDF_SB(sb);
577 uint32_t spread = 0xFFFFFFFF, nspread = 0xFFFFFFFF;
578 udf_pblk_t newblock = 0;
580 uint32_t elen, goal_elen = 0;
581 struct kernel_lb_addr eloc, goal_eloc;
582 struct extent_position epos, goal_epos;
584 struct udf_inode_info *iinfo = UDF_I(table);
588 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
589 adsize = sizeof(struct short_ad);
590 else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
591 adsize = sizeof(struct long_ad);
595 mutex_lock(&sbi->s_alloc_mutex);
596 if (goal >= sbi->s_partmaps[partition].s_partition_len)
599 /* We search for the closest matching block to goal. If we find
600 a exact hit, we stop. Otherwise we keep going till we run out
601 of extents. We store the buffer_head, bloc, and extoffset
602 of the current closest match and use that when we are done.
604 epos.offset = sizeof(struct unallocSpaceEntry);
605 epos.block = iinfo->i_location;
606 epos.bh = goal_epos.bh = NULL;
609 (etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) {
610 if (goal >= eloc.logicalBlockNum) {
611 if (goal < eloc.logicalBlockNum +
612 (elen >> sb->s_blocksize_bits))
615 nspread = goal - eloc.logicalBlockNum -
616 (elen >> sb->s_blocksize_bits);
618 nspread = eloc.logicalBlockNum - goal;
621 if (nspread < spread) {
623 if (goal_epos.bh != epos.bh) {
624 brelse(goal_epos.bh);
625 goal_epos.bh = epos.bh;
626 get_bh(goal_epos.bh);
628 goal_epos.block = epos.block;
629 goal_epos.offset = epos.offset - adsize;
631 goal_elen = (etype << 30) | elen;
637 if (spread == 0xFFFFFFFF) {
638 brelse(goal_epos.bh);
639 mutex_unlock(&sbi->s_alloc_mutex);
643 /* Only allocate blocks from the beginning of the extent.
644 That way, we only delete (empty) extents, never have to insert an
645 extent because of splitting */
646 /* This works, but very poorly.... */
648 newblock = goal_eloc.logicalBlockNum;
649 goal_eloc.logicalBlockNum++;
650 goal_elen -= sb->s_blocksize;
653 udf_write_aext(table, &goal_epos, &goal_eloc, goal_elen, 1);
655 udf_delete_aext(table, goal_epos);
656 brelse(goal_epos.bh);
658 udf_add_free_space(sb, partition, -1);
660 mutex_unlock(&sbi->s_alloc_mutex);
665 void udf_free_blocks(struct super_block *sb, struct inode *inode,
666 struct kernel_lb_addr *bloc, uint32_t offset,
669 uint16_t partition = bloc->partitionReferenceNum;
670 struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition];
672 if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP) {
673 udf_bitmap_free_blocks(sb, map->s_uspace.s_bitmap,
674 bloc, offset, count);
675 } else if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE) {
676 udf_table_free_blocks(sb, map->s_uspace.s_table,
677 bloc, offset, count);
678 } else if (map->s_partition_flags & UDF_PART_FLAG_FREED_BITMAP) {
679 udf_bitmap_free_blocks(sb, map->s_fspace.s_bitmap,
680 bloc, offset, count);
681 } else if (map->s_partition_flags & UDF_PART_FLAG_FREED_TABLE) {
682 udf_table_free_blocks(sb, map->s_fspace.s_table,
683 bloc, offset, count);
687 inode_sub_bytes(inode,
688 ((sector_t)count) << sb->s_blocksize_bits);
692 inline int udf_prealloc_blocks(struct super_block *sb,
694 uint16_t partition, uint32_t first_block,
695 uint32_t block_count)
697 struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition];
700 if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP)
701 allocated = udf_bitmap_prealloc_blocks(sb,
702 map->s_uspace.s_bitmap,
703 partition, first_block,
705 else if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE)
706 allocated = udf_table_prealloc_blocks(sb,
707 map->s_uspace.s_table,
708 partition, first_block,
710 else if (map->s_partition_flags & UDF_PART_FLAG_FREED_BITMAP)
711 allocated = udf_bitmap_prealloc_blocks(sb,
712 map->s_fspace.s_bitmap,
713 partition, first_block,
715 else if (map->s_partition_flags & UDF_PART_FLAG_FREED_TABLE)
716 allocated = udf_table_prealloc_blocks(sb,
717 map->s_fspace.s_table,
718 partition, first_block,
723 if (inode && allocated > 0)
724 inode_add_bytes(inode, allocated << sb->s_blocksize_bits);
728 inline udf_pblk_t udf_new_block(struct super_block *sb,
730 uint16_t partition, uint32_t goal, int *err)
732 struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition];
735 if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP)
736 block = udf_bitmap_new_block(sb,
737 map->s_uspace.s_bitmap,
738 partition, goal, err);
739 else if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE)
740 block = udf_table_new_block(sb,
741 map->s_uspace.s_table,
742 partition, goal, err);
743 else if (map->s_partition_flags & UDF_PART_FLAG_FREED_BITMAP)
744 block = udf_bitmap_new_block(sb,
745 map->s_fspace.s_bitmap,
746 partition, goal, err);
747 else if (map->s_partition_flags & UDF_PART_FLAG_FREED_TABLE)
748 block = udf_table_new_block(sb,
749 map->s_fspace.s_table,
750 partition, goal, err);
756 inode_add_bytes(inode, sb->s_blocksize);