f416b7fe092fccfc2ddca8d723bb3a9e29e36a24
[releases.git] / balloc.c
1 /*
2  * balloc.c
3  *
4  * PURPOSE
5  *      Block allocation handling routines for the OSTA-UDF(tm) filesystem.
6  *
7  * COPYRIGHT
8  *      This file is distributed under the terms of the GNU General Public
9  *      License (GPL). Copies of the GPL can be obtained from:
10  *              ftp://prep.ai.mit.edu/pub/gnu/GPL
11  *      Each contributing author retains all rights to their own work.
12  *
13  *  (C) 1999-2001 Ben Fennema
14  *  (C) 1999 Stelias Computing Inc
15  *
16  * HISTORY
17  *
18  *  02/24/99 blf  Created.
19  *
20  */
21
22 #include "udfdecl.h"
23
24 #include <linux/bitops.h>
25
26 #include "udf_i.h"
27 #include "udf_sb.h"
28
29 #define udf_clear_bit   __test_and_clear_bit_le
30 #define udf_set_bit     __test_and_set_bit_le
31 #define udf_test_bit    test_bit_le
32 #define udf_find_next_one_bit   find_next_bit_le
33
34 static int read_block_bitmap(struct super_block *sb,
35                              struct udf_bitmap *bitmap, unsigned int block,
36                              unsigned long bitmap_nr)
37 {
38         struct buffer_head *bh = NULL;
39         int i;
40         int max_bits, off, count;
41         struct kernel_lb_addr loc;
42
43         loc.logicalBlockNum = bitmap->s_extPosition;
44         loc.partitionReferenceNum = UDF_SB(sb)->s_partition;
45
46         bh = udf_tread(sb, udf_get_lb_pblock(sb, &loc, block));
47         bitmap->s_block_bitmap[bitmap_nr] = bh;
48         if (!bh)
49                 return -EIO;
50
51         /* Check consistency of Space Bitmap buffer. */
52         max_bits = sb->s_blocksize * 8;
53         if (!bitmap_nr) {
54                 off = sizeof(struct spaceBitmapDesc) << 3;
55                 count = min(max_bits - off, bitmap->s_nr_groups);
56         } else {
57                 /*
58                  * Rough check if bitmap number is too big to have any bitmap
59                  * blocks reserved.
60                  */
61                 if (bitmap_nr >
62                     (bitmap->s_nr_groups >> (sb->s_blocksize_bits + 3)) + 2)
63                         return 0;
64                 off = 0;
65                 count = bitmap->s_nr_groups - bitmap_nr * max_bits +
66                                 (sizeof(struct spaceBitmapDesc) << 3);
67                 count = min(count, max_bits);
68         }
69
70         for (i = 0; i < count; i++)
71                 if (udf_test_bit(i + off, bh->b_data))
72                         return -EFSCORRUPTED;
73         return 0;
74 }
75
76 static int __load_block_bitmap(struct super_block *sb,
77                                struct udf_bitmap *bitmap,
78                                unsigned int block_group)
79 {
80         int retval = 0;
81         int nr_groups = bitmap->s_nr_groups;
82
83         if (block_group >= nr_groups) {
84                 udf_debug("block_group (%u) > nr_groups (%d)\n",
85                           block_group, nr_groups);
86         }
87
88         if (bitmap->s_block_bitmap[block_group])
89                 return block_group;
90
91         retval = read_block_bitmap(sb, bitmap, block_group, block_group);
92         if (retval < 0)
93                 return retval;
94
95         return block_group;
96 }
97
98 static inline int load_block_bitmap(struct super_block *sb,
99                                     struct udf_bitmap *bitmap,
100                                     unsigned int block_group)
101 {
102         int slot;
103
104         slot = __load_block_bitmap(sb, bitmap, block_group);
105
106         if (slot < 0)
107                 return slot;
108
109         if (!bitmap->s_block_bitmap[slot])
110                 return -EIO;
111
112         return slot;
113 }
114
115 static void udf_add_free_space(struct super_block *sb, u16 partition, u32 cnt)
116 {
117         struct udf_sb_info *sbi = UDF_SB(sb);
118         struct logicalVolIntegrityDesc *lvid;
119
120         if (!sbi->s_lvid_bh)
121                 return;
122
123         lvid = (struct logicalVolIntegrityDesc *)sbi->s_lvid_bh->b_data;
124         le32_add_cpu(&lvid->freeSpaceTable[partition], cnt);
125         udf_updated_lvid(sb);
126 }
127
128 static void udf_bitmap_free_blocks(struct super_block *sb,
129                                    struct udf_bitmap *bitmap,
130                                    struct kernel_lb_addr *bloc,
131                                    uint32_t offset,
132                                    uint32_t count)
133 {
134         struct udf_sb_info *sbi = UDF_SB(sb);
135         struct buffer_head *bh = NULL;
136         struct udf_part_map *partmap;
137         unsigned long block;
138         unsigned long block_group;
139         unsigned long bit;
140         unsigned long i;
141         int bitmap_nr;
142         unsigned long overflow;
143
144         mutex_lock(&sbi->s_alloc_mutex);
145         partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
146         if (bloc->logicalBlockNum + count < count ||
147             (bloc->logicalBlockNum + count) > partmap->s_partition_len) {
148                 udf_debug("%u < %d || %u + %u > %u\n",
149                           bloc->logicalBlockNum, 0,
150                           bloc->logicalBlockNum, count,
151                           partmap->s_partition_len);
152                 goto error_return;
153         }
154
155         block = bloc->logicalBlockNum + offset +
156                 (sizeof(struct spaceBitmapDesc) << 3);
157
158         do {
159                 overflow = 0;
160                 block_group = block >> (sb->s_blocksize_bits + 3);
161                 bit = block % (sb->s_blocksize << 3);
162
163                 /*
164                 * Check to see if we are freeing blocks across a group boundary.
165                 */
166                 if (bit + count > (sb->s_blocksize << 3)) {
167                         overflow = bit + count - (sb->s_blocksize << 3);
168                         count -= overflow;
169                 }
170                 bitmap_nr = load_block_bitmap(sb, bitmap, block_group);
171                 if (bitmap_nr < 0)
172                         goto error_return;
173
174                 bh = bitmap->s_block_bitmap[bitmap_nr];
175                 for (i = 0; i < count; i++) {
176                         if (udf_set_bit(bit + i, bh->b_data)) {
177                                 udf_debug("bit %lu already set\n", bit + i);
178                                 udf_debug("byte=%2x\n",
179                                           ((__u8 *)bh->b_data)[(bit + i) >> 3]);
180                         }
181                 }
182                 udf_add_free_space(sb, sbi->s_partition, count);
183                 mark_buffer_dirty(bh);
184                 if (overflow) {
185                         block += count;
186                         count = overflow;
187                 }
188         } while (overflow);
189
190 error_return:
191         mutex_unlock(&sbi->s_alloc_mutex);
192 }
193
194 static int udf_bitmap_prealloc_blocks(struct super_block *sb,
195                                       struct udf_bitmap *bitmap,
196                                       uint16_t partition, uint32_t first_block,
197                                       uint32_t block_count)
198 {
199         struct udf_sb_info *sbi = UDF_SB(sb);
200         int alloc_count = 0;
201         int bit, block, block_group;
202         int bitmap_nr;
203         struct buffer_head *bh;
204         __u32 part_len;
205
206         mutex_lock(&sbi->s_alloc_mutex);
207         part_len = sbi->s_partmaps[partition].s_partition_len;
208         if (first_block >= part_len)
209                 goto out;
210
211         if (first_block + block_count > part_len)
212                 block_count = part_len - first_block;
213
214         do {
215                 block = first_block + (sizeof(struct spaceBitmapDesc) << 3);
216                 block_group = block >> (sb->s_blocksize_bits + 3);
217
218                 bitmap_nr = load_block_bitmap(sb, bitmap, block_group);
219                 if (bitmap_nr < 0)
220                         goto out;
221                 bh = bitmap->s_block_bitmap[bitmap_nr];
222
223                 bit = block % (sb->s_blocksize << 3);
224
225                 while (bit < (sb->s_blocksize << 3) && block_count > 0) {
226                         if (!udf_clear_bit(bit, bh->b_data))
227                                 goto out;
228                         block_count--;
229                         alloc_count++;
230                         bit++;
231                         block++;
232                 }
233                 mark_buffer_dirty(bh);
234         } while (block_count > 0);
235
236 out:
237         udf_add_free_space(sb, partition, -alloc_count);
238         mutex_unlock(&sbi->s_alloc_mutex);
239         return alloc_count;
240 }
241
242 static udf_pblk_t udf_bitmap_new_block(struct super_block *sb,
243                                 struct udf_bitmap *bitmap, uint16_t partition,
244                                 uint32_t goal, int *err)
245 {
246         struct udf_sb_info *sbi = UDF_SB(sb);
247         int newbit, bit = 0;
248         udf_pblk_t block;
249         int block_group, group_start;
250         int end_goal, nr_groups, bitmap_nr, i;
251         struct buffer_head *bh = NULL;
252         char *ptr;
253         udf_pblk_t newblock = 0;
254
255         *err = -ENOSPC;
256         mutex_lock(&sbi->s_alloc_mutex);
257
258 repeat:
259         if (goal >= sbi->s_partmaps[partition].s_partition_len)
260                 goal = 0;
261
262         nr_groups = bitmap->s_nr_groups;
263         block = goal + (sizeof(struct spaceBitmapDesc) << 3);
264         block_group = block >> (sb->s_blocksize_bits + 3);
265         group_start = block_group ? 0 : sizeof(struct spaceBitmapDesc);
266
267         bitmap_nr = load_block_bitmap(sb, bitmap, block_group);
268         if (bitmap_nr < 0)
269                 goto error_return;
270         bh = bitmap->s_block_bitmap[bitmap_nr];
271         ptr = memscan((char *)bh->b_data + group_start, 0xFF,
272                       sb->s_blocksize - group_start);
273
274         if ((ptr - ((char *)bh->b_data)) < sb->s_blocksize) {
275                 bit = block % (sb->s_blocksize << 3);
276                 if (udf_test_bit(bit, bh->b_data))
277                         goto got_block;
278
279                 end_goal = (bit + 63) & ~63;
280                 bit = udf_find_next_one_bit(bh->b_data, end_goal, bit);
281                 if (bit < end_goal)
282                         goto got_block;
283
284                 ptr = memscan((char *)bh->b_data + (bit >> 3), 0xFF,
285                               sb->s_blocksize - ((bit + 7) >> 3));
286                 newbit = (ptr - ((char *)bh->b_data)) << 3;
287                 if (newbit < sb->s_blocksize << 3) {
288                         bit = newbit;
289                         goto search_back;
290                 }
291
292                 newbit = udf_find_next_one_bit(bh->b_data,
293                                                sb->s_blocksize << 3, bit);
294                 if (newbit < sb->s_blocksize << 3) {
295                         bit = newbit;
296                         goto got_block;
297                 }
298         }
299
300         for (i = 0; i < (nr_groups * 2); i++) {
301                 block_group++;
302                 if (block_group >= nr_groups)
303                         block_group = 0;
304                 group_start = block_group ? 0 : sizeof(struct spaceBitmapDesc);
305
306                 bitmap_nr = load_block_bitmap(sb, bitmap, block_group);
307                 if (bitmap_nr < 0)
308                         goto error_return;
309                 bh = bitmap->s_block_bitmap[bitmap_nr];
310                 if (i < nr_groups) {
311                         ptr = memscan((char *)bh->b_data + group_start, 0xFF,
312                                       sb->s_blocksize - group_start);
313                         if ((ptr - ((char *)bh->b_data)) < sb->s_blocksize) {
314                                 bit = (ptr - ((char *)bh->b_data)) << 3;
315                                 break;
316                         }
317                 } else {
318                         bit = udf_find_next_one_bit(bh->b_data,
319                                                     sb->s_blocksize << 3,
320                                                     group_start << 3);
321                         if (bit < sb->s_blocksize << 3)
322                                 break;
323                 }
324         }
325         if (i >= (nr_groups * 2)) {
326                 mutex_unlock(&sbi->s_alloc_mutex);
327                 return newblock;
328         }
329         if (bit < sb->s_blocksize << 3)
330                 goto search_back;
331         else
332                 bit = udf_find_next_one_bit(bh->b_data, sb->s_blocksize << 3,
333                                             group_start << 3);
334         if (bit >= sb->s_blocksize << 3) {
335                 mutex_unlock(&sbi->s_alloc_mutex);
336                 return 0;
337         }
338
339 search_back:
340         i = 0;
341         while (i < 7 && bit > (group_start << 3) &&
342                udf_test_bit(bit - 1, bh->b_data)) {
343                 ++i;
344                 --bit;
345         }
346
347 got_block:
348         newblock = bit + (block_group << (sb->s_blocksize_bits + 3)) -
349                 (sizeof(struct spaceBitmapDesc) << 3);
350
351         if (newblock >= sbi->s_partmaps[partition].s_partition_len) {
352                 /*
353                  * Ran off the end of the bitmap, and bits following are
354                  * non-compliant (not all zero)
355                  */
356                 udf_err(sb, "bitmap for partition %d corrupted (block %u marked"
357                         " as free, partition length is %u)\n", partition,
358                         newblock, sbi->s_partmaps[partition].s_partition_len);
359                 goto error_return;
360         }
361
362         if (!udf_clear_bit(bit, bh->b_data)) {
363                 udf_debug("bit already cleared for block %d\n", bit);
364                 goto repeat;
365         }
366
367         mark_buffer_dirty(bh);
368
369         udf_add_free_space(sb, partition, -1);
370         mutex_unlock(&sbi->s_alloc_mutex);
371         *err = 0;
372         return newblock;
373
374 error_return:
375         *err = -EIO;
376         mutex_unlock(&sbi->s_alloc_mutex);
377         return 0;
378 }
379
380 static void udf_table_free_blocks(struct super_block *sb,
381                                   struct inode *table,
382                                   struct kernel_lb_addr *bloc,
383                                   uint32_t offset,
384                                   uint32_t count)
385 {
386         struct udf_sb_info *sbi = UDF_SB(sb);
387         struct udf_part_map *partmap;
388         uint32_t start, end;
389         uint32_t elen;
390         struct kernel_lb_addr eloc;
391         struct extent_position oepos, epos;
392         int8_t etype;
393         struct udf_inode_info *iinfo;
394
395         mutex_lock(&sbi->s_alloc_mutex);
396         partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
397         if (bloc->logicalBlockNum + count < count ||
398             (bloc->logicalBlockNum + count) > partmap->s_partition_len) {
399                 udf_debug("%u < %d || %u + %u > %u\n",
400                           bloc->logicalBlockNum, 0,
401                           bloc->logicalBlockNum, count,
402                           partmap->s_partition_len);
403                 goto error_return;
404         }
405
406         iinfo = UDF_I(table);
407         udf_add_free_space(sb, sbi->s_partition, count);
408
409         start = bloc->logicalBlockNum + offset;
410         end = bloc->logicalBlockNum + offset + count - 1;
411
412         epos.offset = oepos.offset = sizeof(struct unallocSpaceEntry);
413         elen = 0;
414         epos.block = oepos.block = iinfo->i_location;
415         epos.bh = oepos.bh = NULL;
416
417         while (count &&
418                (etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) {
419                 if (((eloc.logicalBlockNum +
420                         (elen >> sb->s_blocksize_bits)) == start)) {
421                         if ((0x3FFFFFFF - elen) <
422                                         (count << sb->s_blocksize_bits)) {
423                                 uint32_t tmp = ((0x3FFFFFFF - elen) >>
424                                                         sb->s_blocksize_bits);
425                                 count -= tmp;
426                                 start += tmp;
427                                 elen = (etype << 30) |
428                                         (0x40000000 - sb->s_blocksize);
429                         } else {
430                                 elen = (etype << 30) |
431                                         (elen +
432                                         (count << sb->s_blocksize_bits));
433                                 start += count;
434                                 count = 0;
435                         }
436                         udf_write_aext(table, &oepos, &eloc, elen, 1);
437                 } else if (eloc.logicalBlockNum == (end + 1)) {
438                         if ((0x3FFFFFFF - elen) <
439                                         (count << sb->s_blocksize_bits)) {
440                                 uint32_t tmp = ((0x3FFFFFFF - elen) >>
441                                                 sb->s_blocksize_bits);
442                                 count -= tmp;
443                                 end -= tmp;
444                                 eloc.logicalBlockNum -= tmp;
445                                 elen = (etype << 30) |
446                                         (0x40000000 - sb->s_blocksize);
447                         } else {
448                                 eloc.logicalBlockNum = start;
449                                 elen = (etype << 30) |
450                                         (elen +
451                                         (count << sb->s_blocksize_bits));
452                                 end -= count;
453                                 count = 0;
454                         }
455                         udf_write_aext(table, &oepos, &eloc, elen, 1);
456                 }
457
458                 if (epos.bh != oepos.bh) {
459                         oepos.block = epos.block;
460                         brelse(oepos.bh);
461                         get_bh(epos.bh);
462                         oepos.bh = epos.bh;
463                         oepos.offset = 0;
464                 } else {
465                         oepos.offset = epos.offset;
466                 }
467         }
468
469         if (count) {
470                 /*
471                  * NOTE: we CANNOT use udf_add_aext here, as it can try to
472                  * allocate a new block, and since we hold the super block
473                  * lock already very bad things would happen :)
474                  *
475                  * We copy the behavior of udf_add_aext, but instead of
476                  * trying to allocate a new block close to the existing one,
477                  * we just steal a block from the extent we are trying to add.
478                  *
479                  * It would be nice if the blocks were close together, but it
480                  * isn't required.
481                  */
482
483                 int adsize;
484
485                 eloc.logicalBlockNum = start;
486                 elen = EXT_RECORDED_ALLOCATED |
487                         (count << sb->s_blocksize_bits);
488
489                 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
490                         adsize = sizeof(struct short_ad);
491                 else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
492                         adsize = sizeof(struct long_ad);
493                 else {
494                         brelse(oepos.bh);
495                         brelse(epos.bh);
496                         goto error_return;
497                 }
498
499                 if (epos.offset + (2 * adsize) > sb->s_blocksize) {
500                         /* Steal a block from the extent being free'd */
501                         udf_setup_indirect_aext(table, eloc.logicalBlockNum,
502                                                 &epos);
503
504                         eloc.logicalBlockNum++;
505                         elen -= sb->s_blocksize;
506                 }
507
508                 /* It's possible that stealing the block emptied the extent */
509                 if (elen)
510                         __udf_add_aext(table, &epos, &eloc, elen, 1);
511         }
512
513         brelse(epos.bh);
514         brelse(oepos.bh);
515
516 error_return:
517         mutex_unlock(&sbi->s_alloc_mutex);
518         return;
519 }
520
521 static int udf_table_prealloc_blocks(struct super_block *sb,
522                                      struct inode *table, uint16_t partition,
523                                      uint32_t first_block, uint32_t block_count)
524 {
525         struct udf_sb_info *sbi = UDF_SB(sb);
526         int alloc_count = 0;
527         uint32_t elen, adsize;
528         struct kernel_lb_addr eloc;
529         struct extent_position epos;
530         int8_t etype = -1;
531         struct udf_inode_info *iinfo;
532
533         if (first_block >= sbi->s_partmaps[partition].s_partition_len)
534                 return 0;
535
536         iinfo = UDF_I(table);
537         if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
538                 adsize = sizeof(struct short_ad);
539         else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
540                 adsize = sizeof(struct long_ad);
541         else
542                 return 0;
543
544         mutex_lock(&sbi->s_alloc_mutex);
545         epos.offset = sizeof(struct unallocSpaceEntry);
546         epos.block = iinfo->i_location;
547         epos.bh = NULL;
548         eloc.logicalBlockNum = 0xFFFFFFFF;
549
550         while (first_block != eloc.logicalBlockNum &&
551                (etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) {
552                 udf_debug("eloc=%u, elen=%u, first_block=%u\n",
553                           eloc.logicalBlockNum, elen, first_block);
554                 ; /* empty loop body */
555         }
556
557         if (first_block == eloc.logicalBlockNum) {
558                 epos.offset -= adsize;
559
560                 alloc_count = (elen >> sb->s_blocksize_bits);
561                 if (alloc_count > block_count) {
562                         alloc_count = block_count;
563                         eloc.logicalBlockNum += alloc_count;
564                         elen -= (alloc_count << sb->s_blocksize_bits);
565                         udf_write_aext(table, &epos, &eloc,
566                                         (etype << 30) | elen, 1);
567                 } else
568                         udf_delete_aext(table, epos);
569         } else {
570                 alloc_count = 0;
571         }
572
573         brelse(epos.bh);
574
575         if (alloc_count)
576                 udf_add_free_space(sb, partition, -alloc_count);
577         mutex_unlock(&sbi->s_alloc_mutex);
578         return alloc_count;
579 }
580
581 static udf_pblk_t udf_table_new_block(struct super_block *sb,
582                                struct inode *table, uint16_t partition,
583                                uint32_t goal, int *err)
584 {
585         struct udf_sb_info *sbi = UDF_SB(sb);
586         uint32_t spread = 0xFFFFFFFF, nspread = 0xFFFFFFFF;
587         udf_pblk_t newblock = 0;
588         uint32_t adsize;
589         uint32_t elen, goal_elen = 0;
590         struct kernel_lb_addr eloc, goal_eloc;
591         struct extent_position epos, goal_epos;
592         int8_t etype;
593         struct udf_inode_info *iinfo = UDF_I(table);
594
595         *err = -ENOSPC;
596
597         if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
598                 adsize = sizeof(struct short_ad);
599         else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
600                 adsize = sizeof(struct long_ad);
601         else
602                 return newblock;
603
604         mutex_lock(&sbi->s_alloc_mutex);
605         if (goal >= sbi->s_partmaps[partition].s_partition_len)
606                 goal = 0;
607
608         /* We search for the closest matching block to goal. If we find
609            a exact hit, we stop. Otherwise we keep going till we run out
610            of extents. We store the buffer_head, bloc, and extoffset
611            of the current closest match and use that when we are done.
612          */
613         epos.offset = sizeof(struct unallocSpaceEntry);
614         epos.block = iinfo->i_location;
615         epos.bh = goal_epos.bh = NULL;
616
617         while (spread &&
618                (etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) {
619                 if (goal >= eloc.logicalBlockNum) {
620                         if (goal < eloc.logicalBlockNum +
621                                         (elen >> sb->s_blocksize_bits))
622                                 nspread = 0;
623                         else
624                                 nspread = goal - eloc.logicalBlockNum -
625                                         (elen >> sb->s_blocksize_bits);
626                 } else {
627                         nspread = eloc.logicalBlockNum - goal;
628                 }
629
630                 if (nspread < spread) {
631                         spread = nspread;
632                         if (goal_epos.bh != epos.bh) {
633                                 brelse(goal_epos.bh);
634                                 goal_epos.bh = epos.bh;
635                                 get_bh(goal_epos.bh);
636                         }
637                         goal_epos.block = epos.block;
638                         goal_epos.offset = epos.offset - adsize;
639                         goal_eloc = eloc;
640                         goal_elen = (etype << 30) | elen;
641                 }
642         }
643
644         brelse(epos.bh);
645
646         if (spread == 0xFFFFFFFF) {
647                 brelse(goal_epos.bh);
648                 mutex_unlock(&sbi->s_alloc_mutex);
649                 return 0;
650         }
651
652         /* Only allocate blocks from the beginning of the extent.
653            That way, we only delete (empty) extents, never have to insert an
654            extent because of splitting */
655         /* This works, but very poorly.... */
656
657         newblock = goal_eloc.logicalBlockNum;
658         goal_eloc.logicalBlockNum++;
659         goal_elen -= sb->s_blocksize;
660
661         if (goal_elen)
662                 udf_write_aext(table, &goal_epos, &goal_eloc, goal_elen, 1);
663         else
664                 udf_delete_aext(table, goal_epos);
665         brelse(goal_epos.bh);
666
667         udf_add_free_space(sb, partition, -1);
668
669         mutex_unlock(&sbi->s_alloc_mutex);
670         *err = 0;
671         return newblock;
672 }
673
674 void udf_free_blocks(struct super_block *sb, struct inode *inode,
675                      struct kernel_lb_addr *bloc, uint32_t offset,
676                      uint32_t count)
677 {
678         uint16_t partition = bloc->partitionReferenceNum;
679         struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition];
680
681         if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP) {
682                 udf_bitmap_free_blocks(sb, map->s_uspace.s_bitmap,
683                                        bloc, offset, count);
684         } else if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE) {
685                 udf_table_free_blocks(sb, map->s_uspace.s_table,
686                                       bloc, offset, count);
687         }
688
689         if (inode) {
690                 inode_sub_bytes(inode,
691                                 ((sector_t)count) << sb->s_blocksize_bits);
692         }
693 }
694
695 inline int udf_prealloc_blocks(struct super_block *sb,
696                                struct inode *inode,
697                                uint16_t partition, uint32_t first_block,
698                                uint32_t block_count)
699 {
700         struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition];
701         int allocated;
702
703         if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP)
704                 allocated = udf_bitmap_prealloc_blocks(sb,
705                                                        map->s_uspace.s_bitmap,
706                                                        partition, first_block,
707                                                        block_count);
708         else if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE)
709                 allocated = udf_table_prealloc_blocks(sb,
710                                                       map->s_uspace.s_table,
711                                                       partition, first_block,
712                                                       block_count);
713         else
714                 return 0;
715
716         if (inode && allocated > 0)
717                 inode_add_bytes(inode, allocated << sb->s_blocksize_bits);
718         return allocated;
719 }
720
721 inline udf_pblk_t udf_new_block(struct super_block *sb,
722                          struct inode *inode,
723                          uint16_t partition, uint32_t goal, int *err)
724 {
725         struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition];
726         udf_pblk_t block;
727
728         if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP)
729                 block = udf_bitmap_new_block(sb,
730                                              map->s_uspace.s_bitmap,
731                                              partition, goal, err);
732         else if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE)
733                 block = udf_table_new_block(sb,
734                                             map->s_uspace.s_table,
735                                             partition, goal, err);
736         else {
737                 *err = -EIO;
738                 return 0;
739         }
740         if (inode && block)
741                 inode_add_bytes(inode, sb->s_blocksize);
742         return block;
743 }