2 * linux/fs/hfsplus/bitmap.c
5 * Brad Boyer (flar@allandria.com)
6 * (C) 2003 Ardis Technologies <roman@ardistech.com>
8 * Handling of allocation file
11 #include <linux/pagemap.h>
13 #include "hfsplus_fs.h"
14 #include "hfsplus_raw.h"
16 #define PAGE_CACHE_BITS (PAGE_SIZE * 8)
18 int hfsplus_block_allocate(struct super_block *sb, u32 size,
21 struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
23 struct address_space *mapping;
24 __be32 *pptr, *curr, *end;
25 u32 mask, start, len, n;
33 hfs_dbg(BITMAP, "block_allocate: %u,%u,%u\n", size, offset, len);
34 mutex_lock(&sbi->alloc_mutex);
35 mapping = sbi->alloc_file->i_mapping;
36 page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS, NULL);
42 curr = pptr + (offset & (PAGE_CACHE_BITS - 1)) / 32;
44 offset &= ~(PAGE_CACHE_BITS - 1);
45 if ((size ^ offset) / PAGE_CACHE_BITS)
46 end = pptr + PAGE_CACHE_BITS / 32;
48 end = pptr + ((size + 31) & (PAGE_CACHE_BITS - 1)) / 32;
50 /* scan the first partial u32 for zero bits */
54 mask = (1U << 31) >> i;
55 for (; i < 32; mask >>= 1, i++) {
62 /* scan complete u32s for the first zero bit */
69 for (i = 0; i < 32; mask >>= 1, i++) {
77 offset += PAGE_CACHE_BITS;
80 page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS,
86 curr = pptr = kmap(page);
87 if ((size ^ offset) / PAGE_CACHE_BITS)
88 end = pptr + PAGE_CACHE_BITS / 32;
90 end = pptr + ((size + 31) & (PAGE_CACHE_BITS - 1)) / 32;
92 hfs_dbg(BITMAP, "bitmap full\n");
97 start = offset + (curr - pptr) * 32 + i;
99 hfs_dbg(BITMAP, "bitmap full\n");
102 /* do any partial u32 at the start */
103 len = min(size - start, len);
109 if (!--len || n & mask)
114 *curr++ = cpu_to_be32(n);
118 n = be32_to_cpu(*curr);
125 *curr++ = cpu_to_be32(0xffffffff);
128 set_page_dirty(page);
130 offset += PAGE_CACHE_BITS;
131 page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS,
139 end = pptr + PAGE_CACHE_BITS / 32;
142 /* do any partial u32 at end */
144 for (i = 0; i < len; i++) {
151 *curr = cpu_to_be32(n);
152 set_page_dirty(page);
154 *max = offset + (curr - pptr) * 32 + i - start;
155 sbi->free_blocks -= *max;
156 hfsplus_mark_mdb_dirty(sb);
157 hfs_dbg(BITMAP, "-> %u,%u\n", start, *max);
159 mutex_unlock(&sbi->alloc_mutex);
163 int hfsplus_block_free(struct super_block *sb, u32 offset, u32 count)
165 struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
167 struct address_space *mapping;
168 __be32 *pptr, *curr, *end;
172 /* is there any actual work to be done? */
176 hfs_dbg(BITMAP, "block_free: %u,%u\n", offset, count);
177 /* are all of the bits in range? */
178 if ((offset + count) > sbi->total_blocks)
181 mutex_lock(&sbi->alloc_mutex);
182 mapping = sbi->alloc_file->i_mapping;
183 pnr = offset / PAGE_CACHE_BITS;
184 page = read_mapping_page(mapping, pnr, NULL);
188 curr = pptr + (offset & (PAGE_CACHE_BITS - 1)) / 32;
189 end = pptr + PAGE_CACHE_BITS / 32;
192 /* do any partial u32 at the start */
196 mask = 0xffffffffU << j;
198 mask |= 0xffffffffU >> (i + count);
199 *curr++ &= cpu_to_be32(mask);
202 *curr++ &= cpu_to_be32(mask);
216 set_page_dirty(page);
218 page = read_mapping_page(mapping, ++pnr, NULL);
223 end = pptr + PAGE_CACHE_BITS / 32;
226 /* do any partial u32 at end */
228 mask = 0xffffffffU >> count;
229 *curr &= cpu_to_be32(mask);
232 set_page_dirty(page);
234 sbi->free_blocks += len;
235 hfsplus_mark_mdb_dirty(sb);
236 mutex_unlock(&sbi->alloc_mutex);
241 pr_crit("unable to mark blocks free: error %ld\n", PTR_ERR(page));
242 mutex_unlock(&sbi->alloc_mutex);