1 // SPDX-License-Identifier: GPL-2.0
3 * linux/fs/ufs/balloc.c
6 * Daniel Pirkl <daniel.pirkl@email.cz>
7 * Charles University, Faculty of Mathematics and Physics
9 * UFS2 write support Evgeniy Dushistov <dushistov@mail.ru>, 2007
13 #include <linux/stat.h>
14 #include <linux/time.h>
15 #include <linux/string.h>
16 #include <linux/buffer_head.h>
17 #include <linux/capability.h>
18 #include <linux/bitops.h>
19 #include <linux/bio.h>
20 #include <asm/byteorder.h>
27 #define INVBLOCK ((u64)-1L)
29 static u64 ufs_add_fragments(struct inode *, u64, unsigned, unsigned);
30 static u64 ufs_alloc_fragments(struct inode *, unsigned, u64, unsigned, int *);
31 static u64 ufs_alloccg_block(struct inode *, struct ufs_cg_private_info *, u64, int *);
32 static u64 ufs_bitmap_search (struct super_block *, struct ufs_cg_private_info *, u64, unsigned);
33 static unsigned char ufs_fragtable_8fpb[], ufs_fragtable_other[];
34 static void ufs_clusteracct(struct super_block *, struct ufs_cg_private_info *, unsigned, int);
37 * Free 'count' fragments from fragment number 'fragment'
39 void ufs_free_fragments(struct inode *inode, u64 fragment, unsigned count)
41 struct super_block * sb;
42 struct ufs_sb_private_info * uspi;
43 struct ufs_cg_private_info * ucpi;
44 struct ufs_cylinder_group * ucg;
45 unsigned cgno, bit, end_bit, bbase, blkmap, i;
49 uspi = UFS_SB(sb)->s_uspi;
51 UFSD("ENTER, fragment %llu, count %u\n",
52 (unsigned long long)fragment, count);
54 if (ufs_fragnum(fragment) + count > uspi->s_fpg)
55 ufs_error (sb, "ufs_free_fragments", "internal error");
57 mutex_lock(&UFS_SB(sb)->s_lock);
59 cgno = ufs_dtog(uspi, fragment);
60 bit = ufs_dtogd(uspi, fragment);
61 if (cgno >= uspi->s_ncg) {
62 ufs_panic (sb, "ufs_free_fragments", "freeing blocks are outside device");
66 ucpi = ufs_load_cylinder (sb, cgno);
69 ucg = ubh_get_ucg (UCPI_UBH(ucpi));
70 if (!ufs_cg_chkmagic(sb, ucg)) {
71 ufs_panic (sb, "ufs_free_fragments", "internal error, bad magic number on cg %u", cgno);
75 end_bit = bit + count;
76 bbase = ufs_blknum (bit);
77 blkmap = ubh_blkmap (UCPI_UBH(ucpi), ucpi->c_freeoff, bbase);
78 ufs_fragacct (sb, blkmap, ucg->cg_frsum, -1);
79 for (i = bit; i < end_bit; i++) {
80 if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, i))
81 ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, i);
83 ufs_error (sb, "ufs_free_fragments",
84 "bit already cleared for fragment %u", i);
87 inode_sub_bytes(inode, count << uspi->s_fshift);
88 fs32_add(sb, &ucg->cg_cs.cs_nffree, count);
89 uspi->cs_total.cs_nffree += count;
90 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
91 blkmap = ubh_blkmap (UCPI_UBH(ucpi), ucpi->c_freeoff, bbase);
92 ufs_fragacct(sb, blkmap, ucg->cg_frsum, 1);
95 * Trying to reassemble free fragments into block
97 blkno = ufs_fragstoblks (bbase);
98 if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno)) {
99 fs32_sub(sb, &ucg->cg_cs.cs_nffree, uspi->s_fpb);
100 uspi->cs_total.cs_nffree -= uspi->s_fpb;
101 fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, uspi->s_fpb);
102 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
103 ufs_clusteracct (sb, ucpi, blkno, 1);
104 fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
105 uspi->cs_total.cs_nbfree++;
106 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1);
107 if (uspi->fs_magic != UFS2_MAGIC) {
108 unsigned cylno = ufs_cbtocylno (bbase);
110 fs16_add(sb, &ubh_cg_blks(ucpi, cylno,
111 ufs_cbtorpos(bbase)), 1);
112 fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1);
116 ubh_mark_buffer_dirty (USPI_UBH(uspi));
117 ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
118 if (sb->s_flags & SB_SYNCHRONOUS)
119 ubh_sync_block(UCPI_UBH(ucpi));
120 ufs_mark_sb_dirty(sb);
122 mutex_unlock(&UFS_SB(sb)->s_lock);
127 mutex_unlock(&UFS_SB(sb)->s_lock);
128 UFSD("EXIT (FAILED)\n");
133 * Free 'count' fragments from fragment number 'fragment' (free whole blocks)
135 void ufs_free_blocks(struct inode *inode, u64 fragment, unsigned count)
137 struct super_block * sb;
138 struct ufs_sb_private_info * uspi;
139 struct ufs_cg_private_info * ucpi;
140 struct ufs_cylinder_group * ucg;
141 unsigned overflow, cgno, bit, end_bit, i;
145 uspi = UFS_SB(sb)->s_uspi;
147 UFSD("ENTER, fragment %llu, count %u\n",
148 (unsigned long long)fragment, count);
150 if ((fragment & uspi->s_fpbmask) || (count & uspi->s_fpbmask)) {
151 ufs_error (sb, "ufs_free_blocks", "internal error, "
152 "fragment %llu, count %u\n",
153 (unsigned long long)fragment, count);
157 mutex_lock(&UFS_SB(sb)->s_lock);
161 cgno = ufs_dtog(uspi, fragment);
162 bit = ufs_dtogd(uspi, fragment);
163 if (cgno >= uspi->s_ncg) {
164 ufs_panic (sb, "ufs_free_blocks", "freeing blocks are outside device");
167 end_bit = bit + count;
168 if (end_bit > uspi->s_fpg) {
169 overflow = bit + count - uspi->s_fpg;
174 ucpi = ufs_load_cylinder (sb, cgno);
177 ucg = ubh_get_ucg (UCPI_UBH(ucpi));
178 if (!ufs_cg_chkmagic(sb, ucg)) {
179 ufs_panic (sb, "ufs_free_blocks", "internal error, bad magic number on cg %u", cgno);
183 for (i = bit; i < end_bit; i += uspi->s_fpb) {
184 blkno = ufs_fragstoblks(i);
185 if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno)) {
186 ufs_error(sb, "ufs_free_blocks", "freeing free fragment");
188 ubh_setblock(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
189 inode_sub_bytes(inode, uspi->s_fpb << uspi->s_fshift);
190 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
191 ufs_clusteracct (sb, ucpi, blkno, 1);
193 fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
194 uspi->cs_total.cs_nbfree++;
195 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1);
197 if (uspi->fs_magic != UFS2_MAGIC) {
198 unsigned cylno = ufs_cbtocylno(i);
200 fs16_add(sb, &ubh_cg_blks(ucpi, cylno,
201 ufs_cbtorpos(i)), 1);
202 fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1);
206 ubh_mark_buffer_dirty (USPI_UBH(uspi));
207 ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
208 if (sb->s_flags & SB_SYNCHRONOUS)
209 ubh_sync_block(UCPI_UBH(ucpi));
217 ufs_mark_sb_dirty(sb);
218 mutex_unlock(&UFS_SB(sb)->s_lock);
223 mutex_unlock(&UFS_SB(sb)->s_lock);
225 UFSD("EXIT (FAILED)\n");
230 * Modify inode page cache in such way:
231 * have - blocks with b_blocknr equal to oldb...oldb+count-1
232 * get - blocks with b_blocknr equal to newb...newb+count-1
233 * also we suppose that oldb...oldb+count-1 blocks
234 * situated at the end of file.
236 * We can come here from ufs_writepage or ufs_prepare_write,
237 * locked_page is argument of these functions, so we already lock it.
239 static void ufs_change_blocknr(struct inode *inode, sector_t beg,
240 unsigned int count, sector_t oldb,
241 sector_t newb, struct page *locked_page)
243 const unsigned blks_per_page =
244 1 << (PAGE_SHIFT - inode->i_blkbits);
245 const unsigned mask = blks_per_page - 1;
246 struct address_space * const mapping = inode->i_mapping;
247 pgoff_t index, cur_index, last_index;
248 unsigned pos, j, lblock;
251 struct buffer_head *head, *bh;
253 UFSD("ENTER, ino %lu, count %u, oldb %llu, newb %llu\n",
255 (unsigned long long)oldb, (unsigned long long)newb);
257 BUG_ON(!locked_page);
258 BUG_ON(!PageLocked(locked_page));
260 cur_index = locked_page->index;
262 last_index = end >> (PAGE_SHIFT - inode->i_blkbits);
263 for (i = beg; i < end; i = (i | mask) + 1) {
264 index = i >> (PAGE_SHIFT - inode->i_blkbits);
266 if (likely(cur_index != index)) {
267 page = ufs_get_locked_page(mapping, index);
268 if (!page)/* it was truncated */
270 if (IS_ERR(page)) {/* or EIO */
271 ufs_error(inode->i_sb, __func__,
272 "read of page %llu failed\n",
273 (unsigned long long)index);
279 head = page_buffers(page);
282 for (j = 0; j < pos; ++j)
283 bh = bh->b_this_page;
286 if (unlikely(index == last_index))
289 lblock = blks_per_page;
296 if (!buffer_mapped(bh))
297 map_bh(bh, inode->i_sb, oldb + pos);
298 if (bh_read(bh, 0) < 0) {
299 ufs_error(inode->i_sb, __func__,
300 "read of block failed\n");
304 UFSD(" change from %llu to %llu, pos %u\n",
305 (unsigned long long)(pos + oldb),
306 (unsigned long long)(pos + newb), pos);
308 bh->b_blocknr = newb + pos;
309 clean_bdev_bh_alias(bh);
310 mark_buffer_dirty(bh);
312 bh = bh->b_this_page;
313 } while (bh != head);
315 if (likely(cur_index != index))
316 ufs_put_locked_page(page);
321 static void ufs_clear_frags(struct inode *inode, sector_t beg, unsigned int n,
324 struct buffer_head *bh;
325 sector_t end = beg + n;
327 for (; beg < end; ++beg) {
328 bh = sb_getblk(inode->i_sb, beg);
330 memset(bh->b_data, 0, inode->i_sb->s_blocksize);
331 set_buffer_uptodate(bh);
332 mark_buffer_dirty(bh);
334 if (IS_SYNC(inode) || sync)
335 sync_dirty_buffer(bh);
340 u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
341 u64 goal, unsigned count, int *err,
342 struct page *locked_page)
344 struct super_block * sb;
345 struct ufs_sb_private_info * uspi;
346 struct ufs_super_block_first * usb1;
347 unsigned cgno, oldcount, newcount;
348 u64 tmp, request, result;
350 UFSD("ENTER, ino %lu, fragment %llu, goal %llu, count %u\n",
351 inode->i_ino, (unsigned long long)fragment,
352 (unsigned long long)goal, count);
355 uspi = UFS_SB(sb)->s_uspi;
356 usb1 = ubh_get_usb_first(uspi);
359 mutex_lock(&UFS_SB(sb)->s_lock);
360 tmp = ufs_data_ptr_to_cpu(sb, p);
362 if (count + ufs_fragnum(fragment) > uspi->s_fpb) {
363 ufs_warning(sb, "ufs_new_fragments", "internal warning"
364 " fragment %llu, count %u",
365 (unsigned long long)fragment, count);
366 count = uspi->s_fpb - ufs_fragnum(fragment);
368 oldcount = ufs_fragnum (fragment);
369 newcount = oldcount + count;
372 * Somebody else has just allocated our fragments
376 ufs_error(sb, "ufs_new_fragments", "internal error, "
377 "fragment %llu, tmp %llu\n",
378 (unsigned long long)fragment,
379 (unsigned long long)tmp);
380 mutex_unlock(&UFS_SB(sb)->s_lock);
383 if (fragment < UFS_I(inode)->i_lastfrag) {
384 UFSD("EXIT (ALREADY ALLOCATED)\n");
385 mutex_unlock(&UFS_SB(sb)->s_lock);
391 UFSD("EXIT (ALREADY ALLOCATED)\n");
392 mutex_unlock(&UFS_SB(sb)->s_lock);
398 * There is not enough space for user on the device
400 if (unlikely(ufs_freefrags(uspi) <= uspi->s_root_blocks)) {
401 if (!capable(CAP_SYS_RESOURCE)) {
402 mutex_unlock(&UFS_SB(sb)->s_lock);
403 UFSD("EXIT (FAILED)\n");
408 if (goal >= uspi->s_size)
411 cgno = ufs_inotocg (inode->i_ino);
413 cgno = ufs_dtog(uspi, goal);
416 * allocate new fragment
419 result = ufs_alloc_fragments (inode, cgno, goal, count, err);
421 ufs_clear_frags(inode, result + oldcount,
422 newcount - oldcount, locked_page != NULL);
424 write_seqlock(&UFS_I(inode)->meta_lock);
425 ufs_cpu_to_data_ptr(sb, p, result);
426 UFS_I(inode)->i_lastfrag =
427 max(UFS_I(inode)->i_lastfrag, fragment + count);
428 write_sequnlock(&UFS_I(inode)->meta_lock);
430 mutex_unlock(&UFS_SB(sb)->s_lock);
431 UFSD("EXIT, result %llu\n", (unsigned long long)result);
438 result = ufs_add_fragments(inode, tmp, oldcount, newcount);
441 read_seqlock_excl(&UFS_I(inode)->meta_lock);
442 UFS_I(inode)->i_lastfrag = max(UFS_I(inode)->i_lastfrag,
444 read_sequnlock_excl(&UFS_I(inode)->meta_lock);
445 ufs_clear_frags(inode, result + oldcount, newcount - oldcount,
446 locked_page != NULL);
447 mutex_unlock(&UFS_SB(sb)->s_lock);
448 UFSD("EXIT, result %llu\n", (unsigned long long)result);
453 * allocate new block and move data
455 if (fs32_to_cpu(sb, usb1->fs_optim) == UFS_OPTSPACE) {
457 if (uspi->cs_total.cs_nffree < uspi->s_space_to_time)
458 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
460 request = uspi->s_fpb;
461 if (uspi->cs_total.cs_nffree > uspi->s_time_to_space)
462 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTSPACE);
464 result = ufs_alloc_fragments (inode, cgno, goal, request, err);
466 ufs_clear_frags(inode, result + oldcount, newcount - oldcount,
467 locked_page != NULL);
468 mutex_unlock(&UFS_SB(sb)->s_lock);
469 ufs_change_blocknr(inode, fragment - oldcount, oldcount,
470 uspi->s_sbbase + tmp,
471 uspi->s_sbbase + result, locked_page);
473 write_seqlock(&UFS_I(inode)->meta_lock);
474 ufs_cpu_to_data_ptr(sb, p, result);
475 UFS_I(inode)->i_lastfrag = max(UFS_I(inode)->i_lastfrag,
477 write_sequnlock(&UFS_I(inode)->meta_lock);
478 if (newcount < request)
479 ufs_free_fragments (inode, result + newcount, request - newcount);
480 ufs_free_fragments (inode, tmp, oldcount);
481 UFSD("EXIT, result %llu\n", (unsigned long long)result);
485 mutex_unlock(&UFS_SB(sb)->s_lock);
486 UFSD("EXIT (FAILED)\n");
490 static bool try_add_frags(struct inode *inode, unsigned frags)
492 unsigned size = frags * i_blocksize(inode);
493 spin_lock(&inode->i_lock);
494 __inode_add_bytes(inode, size);
495 if (unlikely((u32)inode->i_blocks != inode->i_blocks)) {
496 __inode_sub_bytes(inode, size);
497 spin_unlock(&inode->i_lock);
500 spin_unlock(&inode->i_lock);
504 static u64 ufs_add_fragments(struct inode *inode, u64 fragment,
505 unsigned oldcount, unsigned newcount)
507 struct super_block * sb;
508 struct ufs_sb_private_info * uspi;
509 struct ufs_cg_private_info * ucpi;
510 struct ufs_cylinder_group * ucg;
511 unsigned cgno, fragno, fragoff, count, fragsize, i;
513 UFSD("ENTER, fragment %llu, oldcount %u, newcount %u\n",
514 (unsigned long long)fragment, oldcount, newcount);
517 uspi = UFS_SB(sb)->s_uspi;
518 count = newcount - oldcount;
520 cgno = ufs_dtog(uspi, fragment);
521 if (fs32_to_cpu(sb, UFS_SB(sb)->fs_cs(cgno).cs_nffree) < count)
523 if ((ufs_fragnum (fragment) + newcount) > uspi->s_fpb)
525 ucpi = ufs_load_cylinder (sb, cgno);
528 ucg = ubh_get_ucg (UCPI_UBH(ucpi));
529 if (!ufs_cg_chkmagic(sb, ucg)) {
530 ufs_panic (sb, "ufs_add_fragments",
531 "internal error, bad magic number on cg %u", cgno);
535 fragno = ufs_dtogd(uspi, fragment);
536 fragoff = ufs_fragnum (fragno);
537 for (i = oldcount; i < newcount; i++)
538 if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i))
541 if (!try_add_frags(inode, count))
544 * Block can be extended
546 ucg->cg_time = ufs_get_seconds(sb);
547 for (i = newcount; i < (uspi->s_fpb - fragoff); i++)
548 if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i))
550 fragsize = i - oldcount;
551 if (!fs32_to_cpu(sb, ucg->cg_frsum[fragsize]))
552 ufs_panic (sb, "ufs_add_fragments",
553 "internal error or corrupted bitmap on cg %u", cgno);
554 fs32_sub(sb, &ucg->cg_frsum[fragsize], 1);
555 if (fragsize != count)
556 fs32_add(sb, &ucg->cg_frsum[fragsize - count], 1);
557 for (i = oldcount; i < newcount; i++)
558 ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i);
560 fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
561 fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
562 uspi->cs_total.cs_nffree -= count;
564 ubh_mark_buffer_dirty (USPI_UBH(uspi));
565 ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
566 if (sb->s_flags & SB_SYNCHRONOUS)
567 ubh_sync_block(UCPI_UBH(ucpi));
568 ufs_mark_sb_dirty(sb);
570 UFSD("EXIT, fragment %llu\n", (unsigned long long)fragment);
575 #define UFS_TEST_FREE_SPACE_CG \
576 ucg = (struct ufs_cylinder_group *) UFS_SB(sb)->s_ucg[cgno]->b_data; \
577 if (fs32_to_cpu(sb, ucg->cg_cs.cs_nbfree)) \
579 for (k = count; k < uspi->s_fpb; k++) \
580 if (fs32_to_cpu(sb, ucg->cg_frsum[k])) \
583 static u64 ufs_alloc_fragments(struct inode *inode, unsigned cgno,
584 u64 goal, unsigned count, int *err)
586 struct super_block * sb;
587 struct ufs_sb_private_info * uspi;
588 struct ufs_cg_private_info * ucpi;
589 struct ufs_cylinder_group * ucg;
590 unsigned oldcg, i, j, k, allocsize;
593 UFSD("ENTER, ino %lu, cgno %u, goal %llu, count %u\n",
594 inode->i_ino, cgno, (unsigned long long)goal, count);
597 uspi = UFS_SB(sb)->s_uspi;
601 * 1. searching on preferred cylinder group
603 UFS_TEST_FREE_SPACE_CG
606 * 2. quadratic rehash
608 for (j = 1; j < uspi->s_ncg; j *= 2) {
610 if (cgno >= uspi->s_ncg)
612 UFS_TEST_FREE_SPACE_CG
616 * 3. brute force search
617 * We start at i = 2 ( 0 is checked at 1.step, 1 at 2.step )
619 cgno = (oldcg + 1) % uspi->s_ncg;
620 for (j = 2; j < uspi->s_ncg; j++) {
622 if (cgno >= uspi->s_ncg)
624 UFS_TEST_FREE_SPACE_CG
627 UFSD("EXIT (FAILED)\n");
631 ucpi = ufs_load_cylinder (sb, cgno);
634 ucg = ubh_get_ucg (UCPI_UBH(ucpi));
635 if (!ufs_cg_chkmagic(sb, ucg))
636 ufs_panic (sb, "ufs_alloc_fragments",
637 "internal error, bad magic number on cg %u", cgno);
638 ucg->cg_time = ufs_get_seconds(sb);
640 if (count == uspi->s_fpb) {
641 result = ufs_alloccg_block (inode, ucpi, goal, err);
642 if (result == INVBLOCK)
647 for (allocsize = count; allocsize < uspi->s_fpb; allocsize++)
648 if (fs32_to_cpu(sb, ucg->cg_frsum[allocsize]) != 0)
651 if (allocsize == uspi->s_fpb) {
652 result = ufs_alloccg_block (inode, ucpi, goal, err);
653 if (result == INVBLOCK)
655 goal = ufs_dtogd(uspi, result);
656 for (i = count; i < uspi->s_fpb; i++)
657 ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, goal + i);
658 i = uspi->s_fpb - count;
660 inode_sub_bytes(inode, i << uspi->s_fshift);
661 fs32_add(sb, &ucg->cg_cs.cs_nffree, i);
662 uspi->cs_total.cs_nffree += i;
663 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, i);
664 fs32_add(sb, &ucg->cg_frsum[i], 1);
668 result = ufs_bitmap_search (sb, ucpi, goal, allocsize);
669 if (result == INVBLOCK)
671 if (!try_add_frags(inode, count))
673 for (i = 0; i < count; i++)
674 ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, result + i);
676 fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
677 uspi->cs_total.cs_nffree -= count;
678 fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
679 fs32_sub(sb, &ucg->cg_frsum[allocsize], 1);
681 if (count != allocsize)
682 fs32_add(sb, &ucg->cg_frsum[allocsize - count], 1);
685 ubh_mark_buffer_dirty (USPI_UBH(uspi));
686 ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
687 if (sb->s_flags & SB_SYNCHRONOUS)
688 ubh_sync_block(UCPI_UBH(ucpi));
689 ufs_mark_sb_dirty(sb);
691 result += cgno * uspi->s_fpg;
692 UFSD("EXIT3, result %llu\n", (unsigned long long)result);
696 static u64 ufs_alloccg_block(struct inode *inode,
697 struct ufs_cg_private_info *ucpi,
700 struct super_block * sb;
701 struct ufs_sb_private_info * uspi;
702 struct ufs_cylinder_group * ucg;
705 UFSD("ENTER, goal %llu\n", (unsigned long long)goal);
708 uspi = UFS_SB(sb)->s_uspi;
709 ucg = ubh_get_ucg(UCPI_UBH(ucpi));
712 goal = ucpi->c_rotor;
715 goal = ufs_blknum (goal);
716 goal = ufs_dtogd(uspi, goal);
719 * If the requested block is available, use it.
721 if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, ufs_fragstoblks(goal))) {
727 result = ufs_bitmap_search (sb, ucpi, goal, uspi->s_fpb);
728 if (result == INVBLOCK)
730 ucpi->c_rotor = result;
732 if (!try_add_frags(inode, uspi->s_fpb))
734 blkno = ufs_fragstoblks(result);
735 ubh_clrblock (UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
736 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
737 ufs_clusteracct (sb, ucpi, blkno, -1);
739 fs32_sub(sb, &ucg->cg_cs.cs_nbfree, 1);
740 uspi->cs_total.cs_nbfree--;
741 fs32_sub(sb, &UFS_SB(sb)->fs_cs(ucpi->c_cgx).cs_nbfree, 1);
743 if (uspi->fs_magic != UFS2_MAGIC) {
744 unsigned cylno = ufs_cbtocylno((unsigned)result);
746 fs16_sub(sb, &ubh_cg_blks(ucpi, cylno,
747 ufs_cbtorpos((unsigned)result)), 1);
748 fs32_sub(sb, &ubh_cg_blktot(ucpi, cylno), 1);
751 UFSD("EXIT, result %llu\n", (unsigned long long)result);
756 static unsigned ubh_scanc(struct ufs_sb_private_info *uspi,
757 struct ufs_buffer_head *ubh,
758 unsigned begin, unsigned size,
759 unsigned char *table, unsigned char mask)
761 unsigned rest, offset;
765 offset = begin & ~uspi->s_fmask;
766 begin >>= uspi->s_fshift;
768 if ((offset + size) < uspi->s_fsize)
771 rest = uspi->s_fsize - offset;
773 cp = ubh->bh[begin]->b_data + offset;
774 while ((table[*cp++] & mask) == 0 && --rest)
781 return (size + rest);
785 * Find a block of the specified size in the specified cylinder group.
786 * @sp: pointer to super block
787 * @ucpi: pointer to cylinder group info
788 * @goal: near which block we want find new one
789 * @count: specified size
791 static u64 ufs_bitmap_search(struct super_block *sb,
792 struct ufs_cg_private_info *ucpi,
793 u64 goal, unsigned count)
796 * Bit patterns for identifying fragments in the block map
797 * used as ((map & mask_arr) == want_arr)
799 static const int mask_arr[9] = {
800 0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f, 0xff, 0x1ff, 0x3ff
802 static const int want_arr[9] = {
803 0x0, 0x2, 0x6, 0xe, 0x1e, 0x3e, 0x7e, 0xfe, 0x1fe
805 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
806 unsigned start, length, loc;
807 unsigned pos, want, blockmap, mask, end;
810 UFSD("ENTER, cg %u, goal %llu, count %u\n", ucpi->c_cgx,
811 (unsigned long long)goal, count);
814 start = ufs_dtogd(uspi, goal) >> 3;
816 start = ucpi->c_frotor >> 3;
818 length = ((uspi->s_fpg + 7) >> 3) - start;
819 loc = ubh_scanc(uspi, UCPI_UBH(ucpi), ucpi->c_freeoff + start, length,
820 (uspi->s_fpb == 8) ? ufs_fragtable_8fpb : ufs_fragtable_other,
821 1 << (count - 1 + (uspi->s_fpb & 7)));
824 loc = ubh_scanc(uspi, UCPI_UBH(ucpi), ucpi->c_freeoff, length,
825 (uspi->s_fpb == 8) ? ufs_fragtable_8fpb :
827 1 << (count - 1 + (uspi->s_fpb & 7)));
829 ufs_error(sb, "ufs_bitmap_search",
830 "bitmap corrupted on cg %u, start %u,"
831 " length %u, count %u, freeoff %u\n",
832 ucpi->c_cgx, start, length, count,
838 result = (start + length - loc) << 3;
839 ucpi->c_frotor = result;
842 * found the byte in the map
845 for (end = result + 8; result < end; result += uspi->s_fpb) {
846 blockmap = ubh_blkmap(UCPI_UBH(ucpi), ucpi->c_freeoff, result);
848 mask = mask_arr[count];
849 want = want_arr[count];
850 for (pos = 0; pos <= uspi->s_fpb - count; pos++) {
851 if ((blockmap & mask) == want) {
852 UFSD("EXIT, result %llu\n",
853 (unsigned long long)result);
861 ufs_error(sb, "ufs_bitmap_search", "block not in map on cg %u\n",
863 UFSD("EXIT (FAILED)\n");
867 static void ufs_clusteracct(struct super_block * sb,
868 struct ufs_cg_private_info * ucpi, unsigned blkno, int cnt)
870 struct ufs_sb_private_info * uspi;
871 int i, start, end, forw, back;
873 uspi = UFS_SB(sb)->s_uspi;
874 if (uspi->s_contigsumsize <= 0)
878 ubh_setbit(UCPI_UBH(ucpi), ucpi->c_clusteroff, blkno);
880 ubh_clrbit(UCPI_UBH(ucpi), ucpi->c_clusteroff, blkno);
883 * Find the size of the cluster going forward.
886 end = start + uspi->s_contigsumsize;
887 if ( end >= ucpi->c_nclusterblks)
888 end = ucpi->c_nclusterblks;
889 i = ubh_find_next_zero_bit (UCPI_UBH(ucpi), ucpi->c_clusteroff, end, start);
895 * Find the size of the cluster going backward.
898 end = start - uspi->s_contigsumsize;
901 i = ubh_find_last_zero_bit (UCPI_UBH(ucpi), ucpi->c_clusteroff, start, end);
907 * Account for old cluster and the possibly new forward and
911 if (i > uspi->s_contigsumsize)
912 i = uspi->s_contigsumsize;
913 fs32_add(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (i << 2)), cnt);
915 fs32_sub(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (back << 2)), cnt);
917 fs32_sub(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (forw << 2)), cnt);
921 static unsigned char ufs_fragtable_8fpb[] = {
922 0x00, 0x01, 0x01, 0x02, 0x01, 0x01, 0x02, 0x04, 0x01, 0x01, 0x01, 0x03, 0x02, 0x03, 0x04, 0x08,
923 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x02, 0x03, 0x03, 0x02, 0x04, 0x05, 0x08, 0x10,
924 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
925 0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x04, 0x05, 0x05, 0x06, 0x08, 0x09, 0x10, 0x20,
926 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
927 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
928 0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
929 0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x08, 0x09, 0x09, 0x0A, 0x10, 0x11, 0x20, 0x40,
930 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
931 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
932 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
933 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x05, 0x05, 0x05, 0x07, 0x09, 0x09, 0x11, 0x21,
934 0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
935 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x02, 0x03, 0x03, 0x02, 0x06, 0x07, 0x0A, 0x12,
936 0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x05, 0x05, 0x05, 0x07, 0x06, 0x07, 0x04, 0x0C,
937 0x08, 0x09, 0x09, 0x0A, 0x09, 0x09, 0x0A, 0x0C, 0x10, 0x11, 0x11, 0x12, 0x20, 0x21, 0x40, 0x80,
940 static unsigned char ufs_fragtable_other[] = {
941 0x00, 0x16, 0x16, 0x2A, 0x16, 0x16, 0x26, 0x4E, 0x16, 0x16, 0x16, 0x3E, 0x2A, 0x3E, 0x4E, 0x8A,
942 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
943 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
944 0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
945 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
946 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
947 0x26, 0x36, 0x36, 0x2E, 0x36, 0x36, 0x26, 0x6E, 0x36, 0x36, 0x36, 0x3E, 0x2E, 0x3E, 0x6E, 0xAE,
948 0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
949 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
950 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
951 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
952 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
953 0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
954 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
955 0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
956 0x8A, 0x9E, 0x9E, 0xAA, 0x9E, 0x9E, 0xAE, 0xCE, 0x9E, 0x9E, 0x9E, 0xBE, 0xAA, 0xBE, 0xCE, 0x8A,