arm64: dts: qcom: sm8550: add TRNG node
[linux-modified.git] / fs / udf / balloc.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * balloc.c
4  *
5  * PURPOSE
6  *      Block allocation handling routines for the OSTA-UDF(tm) filesystem.
7  *
8  * COPYRIGHT
9  *  (C) 1999-2001 Ben Fennema
10  *  (C) 1999 Stelias Computing Inc
11  *
12  * HISTORY
13  *
14  *  02/24/99 blf  Created.
15  *
16  */
17
18 #include "udfdecl.h"
19
20 #include <linux/bitops.h>
21
22 #include "udf_i.h"
23 #include "udf_sb.h"
24
25 #define udf_clear_bit   __test_and_clear_bit_le
26 #define udf_set_bit     __test_and_set_bit_le
27 #define udf_test_bit    test_bit_le
28 #define udf_find_next_one_bit   find_next_bit_le
29
30 static int read_block_bitmap(struct super_block *sb,
31                              struct udf_bitmap *bitmap, unsigned int block,
32                              unsigned long bitmap_nr)
33 {
34         struct buffer_head *bh = NULL;
35         int i;
36         int max_bits, off, count;
37         struct kernel_lb_addr loc;
38
39         loc.logicalBlockNum = bitmap->s_extPosition;
40         loc.partitionReferenceNum = UDF_SB(sb)->s_partition;
41
42         bh = sb_bread(sb, udf_get_lb_pblock(sb, &loc, block));
43         bitmap->s_block_bitmap[bitmap_nr] = bh;
44         if (!bh)
45                 return -EIO;
46
47         /* Check consistency of Space Bitmap buffer. */
48         max_bits = sb->s_blocksize * 8;
49         if (!bitmap_nr) {
50                 off = sizeof(struct spaceBitmapDesc) << 3;
51                 count = min(max_bits - off, bitmap->s_nr_groups);
52         } else {
53                 /*
54                  * Rough check if bitmap number is too big to have any bitmap
55                  * blocks reserved.
56                  */
57                 if (bitmap_nr >
58                     (bitmap->s_nr_groups >> (sb->s_blocksize_bits + 3)) + 2)
59                         return 0;
60                 off = 0;
61                 count = bitmap->s_nr_groups - bitmap_nr * max_bits +
62                                 (sizeof(struct spaceBitmapDesc) << 3);
63                 count = min(count, max_bits);
64         }
65
66         for (i = 0; i < count; i++)
67                 if (udf_test_bit(i + off, bh->b_data))
68                         return -EFSCORRUPTED;
69         return 0;
70 }
71
72 static int __load_block_bitmap(struct super_block *sb,
73                                struct udf_bitmap *bitmap,
74                                unsigned int block_group)
75 {
76         int retval = 0;
77         int nr_groups = bitmap->s_nr_groups;
78
79         if (block_group >= nr_groups) {
80                 udf_debug("block_group (%u) > nr_groups (%d)\n",
81                           block_group, nr_groups);
82         }
83
84         if (bitmap->s_block_bitmap[block_group])
85                 return block_group;
86
87         retval = read_block_bitmap(sb, bitmap, block_group, block_group);
88         if (retval < 0)
89                 return retval;
90
91         return block_group;
92 }
93
94 static inline int load_block_bitmap(struct super_block *sb,
95                                     struct udf_bitmap *bitmap,
96                                     unsigned int block_group)
97 {
98         int slot;
99
100         slot = __load_block_bitmap(sb, bitmap, block_group);
101
102         if (slot < 0)
103                 return slot;
104
105         if (!bitmap->s_block_bitmap[slot])
106                 return -EIO;
107
108         return slot;
109 }
110
111 static void udf_add_free_space(struct super_block *sb, u16 partition, u32 cnt)
112 {
113         struct udf_sb_info *sbi = UDF_SB(sb);
114         struct logicalVolIntegrityDesc *lvid;
115
116         if (!sbi->s_lvid_bh)
117                 return;
118
119         lvid = (struct logicalVolIntegrityDesc *)sbi->s_lvid_bh->b_data;
120         le32_add_cpu(&lvid->freeSpaceTable[partition], cnt);
121         udf_updated_lvid(sb);
122 }
123
124 static void udf_bitmap_free_blocks(struct super_block *sb,
125                                    struct udf_bitmap *bitmap,
126                                    struct kernel_lb_addr *bloc,
127                                    uint32_t offset,
128                                    uint32_t count)
129 {
130         struct udf_sb_info *sbi = UDF_SB(sb);
131         struct buffer_head *bh = NULL;
132         struct udf_part_map *partmap;
133         unsigned long block;
134         unsigned long block_group;
135         unsigned long bit;
136         unsigned long i;
137         int bitmap_nr;
138         unsigned long overflow;
139
140         mutex_lock(&sbi->s_alloc_mutex);
141         partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
142         if (bloc->logicalBlockNum + count < count ||
143             (bloc->logicalBlockNum + count) > partmap->s_partition_len) {
144                 udf_debug("%u < %d || %u + %u > %u\n",
145                           bloc->logicalBlockNum, 0,
146                           bloc->logicalBlockNum, count,
147                           partmap->s_partition_len);
148                 goto error_return;
149         }
150
151         block = bloc->logicalBlockNum + offset +
152                 (sizeof(struct spaceBitmapDesc) << 3);
153
154         do {
155                 overflow = 0;
156                 block_group = block >> (sb->s_blocksize_bits + 3);
157                 bit = block % (sb->s_blocksize << 3);
158
159                 /*
160                 * Check to see if we are freeing blocks across a group boundary.
161                 */
162                 if (bit + count > (sb->s_blocksize << 3)) {
163                         overflow = bit + count - (sb->s_blocksize << 3);
164                         count -= overflow;
165                 }
166                 bitmap_nr = load_block_bitmap(sb, bitmap, block_group);
167                 if (bitmap_nr < 0)
168                         goto error_return;
169
170                 bh = bitmap->s_block_bitmap[bitmap_nr];
171                 for (i = 0; i < count; i++) {
172                         if (udf_set_bit(bit + i, bh->b_data)) {
173                                 udf_debug("bit %lu already set\n", bit + i);
174                                 udf_debug("byte=%2x\n",
175                                           ((__u8 *)bh->b_data)[(bit + i) >> 3]);
176                         }
177                 }
178                 udf_add_free_space(sb, sbi->s_partition, count);
179                 mark_buffer_dirty(bh);
180                 if (overflow) {
181                         block += count;
182                         count = overflow;
183                 }
184         } while (overflow);
185
186 error_return:
187         mutex_unlock(&sbi->s_alloc_mutex);
188 }
189
190 static int udf_bitmap_prealloc_blocks(struct super_block *sb,
191                                       struct udf_bitmap *bitmap,
192                                       uint16_t partition, uint32_t first_block,
193                                       uint32_t block_count)
194 {
195         struct udf_sb_info *sbi = UDF_SB(sb);
196         int alloc_count = 0;
197         int bit, block, block_group;
198         int bitmap_nr;
199         struct buffer_head *bh;
200         __u32 part_len;
201
202         mutex_lock(&sbi->s_alloc_mutex);
203         part_len = sbi->s_partmaps[partition].s_partition_len;
204         if (first_block >= part_len)
205                 goto out;
206
207         if (first_block + block_count > part_len)
208                 block_count = part_len - first_block;
209
210         do {
211                 block = first_block + (sizeof(struct spaceBitmapDesc) << 3);
212                 block_group = block >> (sb->s_blocksize_bits + 3);
213
214                 bitmap_nr = load_block_bitmap(sb, bitmap, block_group);
215                 if (bitmap_nr < 0)
216                         goto out;
217                 bh = bitmap->s_block_bitmap[bitmap_nr];
218
219                 bit = block % (sb->s_blocksize << 3);
220
221                 while (bit < (sb->s_blocksize << 3) && block_count > 0) {
222                         if (!udf_clear_bit(bit, bh->b_data))
223                                 goto out;
224                         block_count--;
225                         alloc_count++;
226                         bit++;
227                         block++;
228                 }
229                 mark_buffer_dirty(bh);
230         } while (block_count > 0);
231
232 out:
233         udf_add_free_space(sb, partition, -alloc_count);
234         mutex_unlock(&sbi->s_alloc_mutex);
235         return alloc_count;
236 }
237
238 static udf_pblk_t udf_bitmap_new_block(struct super_block *sb,
239                                 struct udf_bitmap *bitmap, uint16_t partition,
240                                 uint32_t goal, int *err)
241 {
242         struct udf_sb_info *sbi = UDF_SB(sb);
243         int newbit, bit = 0;
244         udf_pblk_t block;
245         int block_group, group_start;
246         int end_goal, nr_groups, bitmap_nr, i;
247         struct buffer_head *bh = NULL;
248         char *ptr;
249         udf_pblk_t newblock = 0;
250
251         *err = -ENOSPC;
252         mutex_lock(&sbi->s_alloc_mutex);
253
254 repeat:
255         if (goal >= sbi->s_partmaps[partition].s_partition_len)
256                 goal = 0;
257
258         nr_groups = bitmap->s_nr_groups;
259         block = goal + (sizeof(struct spaceBitmapDesc) << 3);
260         block_group = block >> (sb->s_blocksize_bits + 3);
261         group_start = block_group ? 0 : sizeof(struct spaceBitmapDesc);
262
263         bitmap_nr = load_block_bitmap(sb, bitmap, block_group);
264         if (bitmap_nr < 0)
265                 goto error_return;
266         bh = bitmap->s_block_bitmap[bitmap_nr];
267         ptr = memscan((char *)bh->b_data + group_start, 0xFF,
268                       sb->s_blocksize - group_start);
269
270         if ((ptr - ((char *)bh->b_data)) < sb->s_blocksize) {
271                 bit = block % (sb->s_blocksize << 3);
272                 if (udf_test_bit(bit, bh->b_data))
273                         goto got_block;
274
275                 end_goal = (bit + 63) & ~63;
276                 bit = udf_find_next_one_bit(bh->b_data, end_goal, bit);
277                 if (bit < end_goal)
278                         goto got_block;
279
280                 ptr = memscan((char *)bh->b_data + (bit >> 3), 0xFF,
281                               sb->s_blocksize - ((bit + 7) >> 3));
282                 newbit = (ptr - ((char *)bh->b_data)) << 3;
283                 if (newbit < sb->s_blocksize << 3) {
284                         bit = newbit;
285                         goto search_back;
286                 }
287
288                 newbit = udf_find_next_one_bit(bh->b_data,
289                                                sb->s_blocksize << 3, bit);
290                 if (newbit < sb->s_blocksize << 3) {
291                         bit = newbit;
292                         goto got_block;
293                 }
294         }
295
296         for (i = 0; i < (nr_groups * 2); i++) {
297                 block_group++;
298                 if (block_group >= nr_groups)
299                         block_group = 0;
300                 group_start = block_group ? 0 : sizeof(struct spaceBitmapDesc);
301
302                 bitmap_nr = load_block_bitmap(sb, bitmap, block_group);
303                 if (bitmap_nr < 0)
304                         goto error_return;
305                 bh = bitmap->s_block_bitmap[bitmap_nr];
306                 if (i < nr_groups) {
307                         ptr = memscan((char *)bh->b_data + group_start, 0xFF,
308                                       sb->s_blocksize - group_start);
309                         if ((ptr - ((char *)bh->b_data)) < sb->s_blocksize) {
310                                 bit = (ptr - ((char *)bh->b_data)) << 3;
311                                 break;
312                         }
313                 } else {
314                         bit = udf_find_next_one_bit(bh->b_data,
315                                                     sb->s_blocksize << 3,
316                                                     group_start << 3);
317                         if (bit < sb->s_blocksize << 3)
318                                 break;
319                 }
320         }
321         if (i >= (nr_groups * 2)) {
322                 mutex_unlock(&sbi->s_alloc_mutex);
323                 return newblock;
324         }
325         if (bit < sb->s_blocksize << 3)
326                 goto search_back;
327         else
328                 bit = udf_find_next_one_bit(bh->b_data, sb->s_blocksize << 3,
329                                             group_start << 3);
330         if (bit >= sb->s_blocksize << 3) {
331                 mutex_unlock(&sbi->s_alloc_mutex);
332                 return 0;
333         }
334
335 search_back:
336         i = 0;
337         while (i < 7 && bit > (group_start << 3) &&
338                udf_test_bit(bit - 1, bh->b_data)) {
339                 ++i;
340                 --bit;
341         }
342
343 got_block:
344         newblock = bit + (block_group << (sb->s_blocksize_bits + 3)) -
345                 (sizeof(struct spaceBitmapDesc) << 3);
346
347         if (newblock >= sbi->s_partmaps[partition].s_partition_len) {
348                 /*
349                  * Ran off the end of the bitmap, and bits following are
350                  * non-compliant (not all zero)
351                  */
352                 udf_err(sb, "bitmap for partition %d corrupted (block %u marked"
353                         " as free, partition length is %u)\n", partition,
354                         newblock, sbi->s_partmaps[partition].s_partition_len);
355                 goto error_return;
356         }
357
358         if (!udf_clear_bit(bit, bh->b_data)) {
359                 udf_debug("bit already cleared for block %d\n", bit);
360                 goto repeat;
361         }
362
363         mark_buffer_dirty(bh);
364
365         udf_add_free_space(sb, partition, -1);
366         mutex_unlock(&sbi->s_alloc_mutex);
367         *err = 0;
368         return newblock;
369
370 error_return:
371         *err = -EIO;
372         mutex_unlock(&sbi->s_alloc_mutex);
373         return 0;
374 }
375
376 static void udf_table_free_blocks(struct super_block *sb,
377                                   struct inode *table,
378                                   struct kernel_lb_addr *bloc,
379                                   uint32_t offset,
380                                   uint32_t count)
381 {
382         struct udf_sb_info *sbi = UDF_SB(sb);
383         struct udf_part_map *partmap;
384         uint32_t start, end;
385         uint32_t elen;
386         struct kernel_lb_addr eloc;
387         struct extent_position oepos, epos;
388         int8_t etype;
389         struct udf_inode_info *iinfo;
390
391         mutex_lock(&sbi->s_alloc_mutex);
392         partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
393         if (bloc->logicalBlockNum + count < count ||
394             (bloc->logicalBlockNum + count) > partmap->s_partition_len) {
395                 udf_debug("%u < %d || %u + %u > %u\n",
396                           bloc->logicalBlockNum, 0,
397                           bloc->logicalBlockNum, count,
398                           partmap->s_partition_len);
399                 goto error_return;
400         }
401
402         iinfo = UDF_I(table);
403         udf_add_free_space(sb, sbi->s_partition, count);
404
405         start = bloc->logicalBlockNum + offset;
406         end = bloc->logicalBlockNum + offset + count - 1;
407
408         epos.offset = oepos.offset = sizeof(struct unallocSpaceEntry);
409         elen = 0;
410         epos.block = oepos.block = iinfo->i_location;
411         epos.bh = oepos.bh = NULL;
412
413         while (count &&
414                (etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) {
415                 if (((eloc.logicalBlockNum +
416                         (elen >> sb->s_blocksize_bits)) == start)) {
417                         if ((0x3FFFFFFF - elen) <
418                                         (count << sb->s_blocksize_bits)) {
419                                 uint32_t tmp = ((0x3FFFFFFF - elen) >>
420                                                         sb->s_blocksize_bits);
421                                 count -= tmp;
422                                 start += tmp;
423                                 elen = (etype << 30) |
424                                         (0x40000000 - sb->s_blocksize);
425                         } else {
426                                 elen = (etype << 30) |
427                                         (elen +
428                                         (count << sb->s_blocksize_bits));
429                                 start += count;
430                                 count = 0;
431                         }
432                         udf_write_aext(table, &oepos, &eloc, elen, 1);
433                 } else if (eloc.logicalBlockNum == (end + 1)) {
434                         if ((0x3FFFFFFF - elen) <
435                                         (count << sb->s_blocksize_bits)) {
436                                 uint32_t tmp = ((0x3FFFFFFF - elen) >>
437                                                 sb->s_blocksize_bits);
438                                 count -= tmp;
439                                 end -= tmp;
440                                 eloc.logicalBlockNum -= tmp;
441                                 elen = (etype << 30) |
442                                         (0x40000000 - sb->s_blocksize);
443                         } else {
444                                 eloc.logicalBlockNum = start;
445                                 elen = (etype << 30) |
446                                         (elen +
447                                         (count << sb->s_blocksize_bits));
448                                 end -= count;
449                                 count = 0;
450                         }
451                         udf_write_aext(table, &oepos, &eloc, elen, 1);
452                 }
453
454                 if (epos.bh != oepos.bh) {
455                         oepos.block = epos.block;
456                         brelse(oepos.bh);
457                         get_bh(epos.bh);
458                         oepos.bh = epos.bh;
459                         oepos.offset = 0;
460                 } else {
461                         oepos.offset = epos.offset;
462                 }
463         }
464
465         if (count) {
466                 /*
467                  * NOTE: we CANNOT use udf_add_aext here, as it can try to
468                  * allocate a new block, and since we hold the super block
469                  * lock already very bad things would happen :)
470                  *
471                  * We copy the behavior of udf_add_aext, but instead of
472                  * trying to allocate a new block close to the existing one,
473                  * we just steal a block from the extent we are trying to add.
474                  *
475                  * It would be nice if the blocks were close together, but it
476                  * isn't required.
477                  */
478
479                 int adsize;
480
481                 eloc.logicalBlockNum = start;
482                 elen = EXT_RECORDED_ALLOCATED |
483                         (count << sb->s_blocksize_bits);
484
485                 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
486                         adsize = sizeof(struct short_ad);
487                 else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
488                         adsize = sizeof(struct long_ad);
489                 else {
490                         brelse(oepos.bh);
491                         brelse(epos.bh);
492                         goto error_return;
493                 }
494
495                 if (epos.offset + (2 * adsize) > sb->s_blocksize) {
496                         /* Steal a block from the extent being free'd */
497                         udf_setup_indirect_aext(table, eloc.logicalBlockNum,
498                                                 &epos);
499
500                         eloc.logicalBlockNum++;
501                         elen -= sb->s_blocksize;
502                 }
503
504                 /* It's possible that stealing the block emptied the extent */
505                 if (elen)
506                         __udf_add_aext(table, &epos, &eloc, elen, 1);
507         }
508
509         brelse(epos.bh);
510         brelse(oepos.bh);
511
512 error_return:
513         mutex_unlock(&sbi->s_alloc_mutex);
514         return;
515 }
516
517 static int udf_table_prealloc_blocks(struct super_block *sb,
518                                      struct inode *table, uint16_t partition,
519                                      uint32_t first_block, uint32_t block_count)
520 {
521         struct udf_sb_info *sbi = UDF_SB(sb);
522         int alloc_count = 0;
523         uint32_t elen, adsize;
524         struct kernel_lb_addr eloc;
525         struct extent_position epos;
526         int8_t etype = -1;
527         struct udf_inode_info *iinfo;
528
529         if (first_block >= sbi->s_partmaps[partition].s_partition_len)
530                 return 0;
531
532         iinfo = UDF_I(table);
533         if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
534                 adsize = sizeof(struct short_ad);
535         else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
536                 adsize = sizeof(struct long_ad);
537         else
538                 return 0;
539
540         mutex_lock(&sbi->s_alloc_mutex);
541         epos.offset = sizeof(struct unallocSpaceEntry);
542         epos.block = iinfo->i_location;
543         epos.bh = NULL;
544         eloc.logicalBlockNum = 0xFFFFFFFF;
545
546         while (first_block != eloc.logicalBlockNum &&
547                (etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) {
548                 udf_debug("eloc=%u, elen=%u, first_block=%u\n",
549                           eloc.logicalBlockNum, elen, first_block);
550                 ; /* empty loop body */
551         }
552
553         if (first_block == eloc.logicalBlockNum) {
554                 epos.offset -= adsize;
555
556                 alloc_count = (elen >> sb->s_blocksize_bits);
557                 if (alloc_count > block_count) {
558                         alloc_count = block_count;
559                         eloc.logicalBlockNum += alloc_count;
560                         elen -= (alloc_count << sb->s_blocksize_bits);
561                         udf_write_aext(table, &epos, &eloc,
562                                         (etype << 30) | elen, 1);
563                 } else
564                         udf_delete_aext(table, epos);
565         } else {
566                 alloc_count = 0;
567         }
568
569         brelse(epos.bh);
570
571         if (alloc_count)
572                 udf_add_free_space(sb, partition, -alloc_count);
573         mutex_unlock(&sbi->s_alloc_mutex);
574         return alloc_count;
575 }
576
577 static udf_pblk_t udf_table_new_block(struct super_block *sb,
578                                struct inode *table, uint16_t partition,
579                                uint32_t goal, int *err)
580 {
581         struct udf_sb_info *sbi = UDF_SB(sb);
582         uint32_t spread = 0xFFFFFFFF, nspread = 0xFFFFFFFF;
583         udf_pblk_t newblock = 0;
584         uint32_t adsize;
585         uint32_t elen, goal_elen = 0;
586         struct kernel_lb_addr eloc, goal_eloc;
587         struct extent_position epos, goal_epos;
588         int8_t etype;
589         struct udf_inode_info *iinfo = UDF_I(table);
590
591         *err = -ENOSPC;
592
593         if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
594                 adsize = sizeof(struct short_ad);
595         else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
596                 adsize = sizeof(struct long_ad);
597         else
598                 return newblock;
599
600         mutex_lock(&sbi->s_alloc_mutex);
601         if (goal >= sbi->s_partmaps[partition].s_partition_len)
602                 goal = 0;
603
604         /* We search for the closest matching block to goal. If we find
605            a exact hit, we stop. Otherwise we keep going till we run out
606            of extents. We store the buffer_head, bloc, and extoffset
607            of the current closest match and use that when we are done.
608          */
609         epos.offset = sizeof(struct unallocSpaceEntry);
610         epos.block = iinfo->i_location;
611         epos.bh = goal_epos.bh = NULL;
612
613         while (spread &&
614                (etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) {
615                 if (goal >= eloc.logicalBlockNum) {
616                         if (goal < eloc.logicalBlockNum +
617                                         (elen >> sb->s_blocksize_bits))
618                                 nspread = 0;
619                         else
620                                 nspread = goal - eloc.logicalBlockNum -
621                                         (elen >> sb->s_blocksize_bits);
622                 } else {
623                         nspread = eloc.logicalBlockNum - goal;
624                 }
625
626                 if (nspread < spread) {
627                         spread = nspread;
628                         if (goal_epos.bh != epos.bh) {
629                                 brelse(goal_epos.bh);
630                                 goal_epos.bh = epos.bh;
631                                 get_bh(goal_epos.bh);
632                         }
633                         goal_epos.block = epos.block;
634                         goal_epos.offset = epos.offset - adsize;
635                         goal_eloc = eloc;
636                         goal_elen = (etype << 30) | elen;
637                 }
638         }
639
640         brelse(epos.bh);
641
642         if (spread == 0xFFFFFFFF) {
643                 brelse(goal_epos.bh);
644                 mutex_unlock(&sbi->s_alloc_mutex);
645                 return 0;
646         }
647
648         /* Only allocate blocks from the beginning of the extent.
649            That way, we only delete (empty) extents, never have to insert an
650            extent because of splitting */
651         /* This works, but very poorly.... */
652
653         newblock = goal_eloc.logicalBlockNum;
654         goal_eloc.logicalBlockNum++;
655         goal_elen -= sb->s_blocksize;
656
657         if (goal_elen)
658                 udf_write_aext(table, &goal_epos, &goal_eloc, goal_elen, 1);
659         else
660                 udf_delete_aext(table, goal_epos);
661         brelse(goal_epos.bh);
662
663         udf_add_free_space(sb, partition, -1);
664
665         mutex_unlock(&sbi->s_alloc_mutex);
666         *err = 0;
667         return newblock;
668 }
669
670 void udf_free_blocks(struct super_block *sb, struct inode *inode,
671                      struct kernel_lb_addr *bloc, uint32_t offset,
672                      uint32_t count)
673 {
674         uint16_t partition = bloc->partitionReferenceNum;
675         struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition];
676
677         if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP) {
678                 udf_bitmap_free_blocks(sb, map->s_uspace.s_bitmap,
679                                        bloc, offset, count);
680         } else if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE) {
681                 udf_table_free_blocks(sb, map->s_uspace.s_table,
682                                       bloc, offset, count);
683         }
684
685         if (inode) {
686                 inode_sub_bytes(inode,
687                                 ((sector_t)count) << sb->s_blocksize_bits);
688         }
689 }
690
691 inline int udf_prealloc_blocks(struct super_block *sb,
692                                struct inode *inode,
693                                uint16_t partition, uint32_t first_block,
694                                uint32_t block_count)
695 {
696         struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition];
697         int allocated;
698
699         if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP)
700                 allocated = udf_bitmap_prealloc_blocks(sb,
701                                                        map->s_uspace.s_bitmap,
702                                                        partition, first_block,
703                                                        block_count);
704         else if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE)
705                 allocated = udf_table_prealloc_blocks(sb,
706                                                       map->s_uspace.s_table,
707                                                       partition, first_block,
708                                                       block_count);
709         else
710                 return 0;
711
712         if (inode && allocated > 0)
713                 inode_add_bytes(inode, allocated << sb->s_blocksize_bits);
714         return allocated;
715 }
716
717 inline udf_pblk_t udf_new_block(struct super_block *sb,
718                          struct inode *inode,
719                          uint16_t partition, uint32_t goal, int *err)
720 {
721         struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition];
722         udf_pblk_t block;
723
724         if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP)
725                 block = udf_bitmap_new_block(sb,
726                                              map->s_uspace.s_bitmap,
727                                              partition, goal, err);
728         else if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE)
729                 block = udf_table_new_block(sb,
730                                             map->s_uspace.s_table,
731                                             partition, goal, err);
732         else {
733                 *err = -EIO;
734                 return 0;
735         }
736         if (inode && block)
737                 inode_add_bytes(inode, sb->s_blocksize);
738         return block;
739 }