2 * linux/fs/ufs/balloc.c
5 * Daniel Pirkl <daniel.pirkl@email.cz>
6 * Charles University, Faculty of Mathematics and Physics
10 #include <linux/ufs_fs.h>
11 #include <linux/stat.h>
12 #include <linux/time.h>
13 #include <linux/string.h>
14 #include <linux/quotaops.h>
15 #include <linux/buffer_head.h>
16 #include <linux/capability.h>
17 #include <linux/sched.h>
18 #include <linux/bitops.h>
19 #include <asm/byteorder.h>
24 static unsigned ufs_add_fragments (struct inode *, unsigned, unsigned, unsigned, int *);
25 static unsigned ufs_alloc_fragments (struct inode *, unsigned, unsigned, unsigned, int *);
26 static unsigned ufs_alloccg_block (struct inode *, struct ufs_cg_private_info *, unsigned, int *);
27 static unsigned ufs_bitmap_search (struct super_block *, struct ufs_cg_private_info *, unsigned, unsigned);
28 static unsigned char ufs_fragtable_8fpb[], ufs_fragtable_other[];
29 static void ufs_clusteracct(struct super_block *, struct ufs_cg_private_info *, unsigned, int);
32 * Free 'count' fragments from fragment number 'fragment'
34 void ufs_free_fragments(struct inode *inode, unsigned fragment, unsigned count)
36 struct super_block * sb;
37 struct ufs_sb_private_info * uspi;
38 struct ufs_super_block_first * usb1;
39 struct ufs_cg_private_info * ucpi;
40 struct ufs_cylinder_group * ucg;
41 unsigned cgno, bit, end_bit, bbase, blkmap, i, blkno, cylno;
44 uspi = UFS_SB(sb)->s_uspi;
45 usb1 = ubh_get_usb_first(uspi);
47 UFSD("ENTER, fragment %u, count %u\n", fragment, count);
49 if (ufs_fragnum(fragment) + count > uspi->s_fpg)
50 ufs_error (sb, "ufs_free_fragments", "internal error");
54 cgno = ufs_dtog(fragment);
55 bit = ufs_dtogd(fragment);
56 if (cgno >= uspi->s_ncg) {
57 ufs_panic (sb, "ufs_free_fragments", "freeing blocks are outside device");
61 ucpi = ufs_load_cylinder (sb, cgno);
64 ucg = ubh_get_ucg (UCPI_UBH(ucpi));
65 if (!ufs_cg_chkmagic(sb, ucg)) {
66 ufs_panic (sb, "ufs_free_fragments", "internal error, bad magic number on cg %u", cgno);
70 end_bit = bit + count;
71 bbase = ufs_blknum (bit);
72 blkmap = ubh_blkmap (UCPI_UBH(ucpi), ucpi->c_freeoff, bbase);
73 ufs_fragacct (sb, blkmap, ucg->cg_frsum, -1);
74 for (i = bit; i < end_bit; i++) {
75 if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, i))
76 ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, i);
78 ufs_error (sb, "ufs_free_fragments",
79 "bit already cleared for fragment %u", i);
82 DQUOT_FREE_BLOCK (inode, count);
85 fs32_add(sb, &ucg->cg_cs.cs_nffree, count);
86 fs32_add(sb, &usb1->fs_cstotal.cs_nffree, count);
87 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
88 blkmap = ubh_blkmap (UCPI_UBH(ucpi), ucpi->c_freeoff, bbase);
89 ufs_fragacct(sb, blkmap, ucg->cg_frsum, 1);
92 * Trying to reassemble free fragments into block
94 blkno = ufs_fragstoblks (bbase);
95 if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno)) {
96 fs32_sub(sb, &ucg->cg_cs.cs_nffree, uspi->s_fpb);
97 fs32_sub(sb, &usb1->fs_cstotal.cs_nffree, uspi->s_fpb);
98 fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, uspi->s_fpb);
99 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
100 ufs_clusteracct (sb, ucpi, blkno, 1);
101 fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
102 fs32_add(sb, &usb1->fs_cstotal.cs_nbfree, 1);
103 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1);
104 cylno = ufs_cbtocylno (bbase);
105 fs16_add(sb, &ubh_cg_blks(ucpi, cylno, ufs_cbtorpos(bbase)), 1);
106 fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1);
109 ubh_mark_buffer_dirty (USPI_UBH(uspi));
110 ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
111 if (sb->s_flags & MS_SYNCHRONOUS) {
112 ubh_ll_rw_block (SWRITE, 1, (struct ufs_buffer_head **)&ucpi);
113 ubh_wait_on_buffer (UCPI_UBH(ucpi));
123 UFSD("EXIT (FAILED)\n");
128 * Free 'count' fragments from fragment number 'fragment' (free whole blocks)
130 void ufs_free_blocks(struct inode *inode, unsigned fragment, unsigned count)
132 struct super_block * sb;
133 struct ufs_sb_private_info * uspi;
134 struct ufs_super_block_first * usb1;
135 struct ufs_cg_private_info * ucpi;
136 struct ufs_cylinder_group * ucg;
137 unsigned overflow, cgno, bit, end_bit, blkno, i, cylno;
140 uspi = UFS_SB(sb)->s_uspi;
141 usb1 = ubh_get_usb_first(uspi);
143 UFSD("ENTER, fragment %u, count %u\n", fragment, count);
145 if ((fragment & uspi->s_fpbmask) || (count & uspi->s_fpbmask)) {
146 ufs_error (sb, "ufs_free_blocks", "internal error, "
147 "fragment %u, count %u\n", fragment, count);
155 cgno = ufs_dtog (fragment);
156 bit = ufs_dtogd (fragment);
157 if (cgno >= uspi->s_ncg) {
158 ufs_panic (sb, "ufs_free_blocks", "freeing blocks are outside device");
161 end_bit = bit + count;
162 if (end_bit > uspi->s_fpg) {
163 overflow = bit + count - uspi->s_fpg;
168 ucpi = ufs_load_cylinder (sb, cgno);
171 ucg = ubh_get_ucg (UCPI_UBH(ucpi));
172 if (!ufs_cg_chkmagic(sb, ucg)) {
173 ufs_panic (sb, "ufs_free_blocks", "internal error, bad magic number on cg %u", cgno);
177 for (i = bit; i < end_bit; i += uspi->s_fpb) {
178 blkno = ufs_fragstoblks(i);
179 if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno)) {
180 ufs_error(sb, "ufs_free_blocks", "freeing free fragment");
182 ubh_setblock(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
183 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
184 ufs_clusteracct (sb, ucpi, blkno, 1);
185 DQUOT_FREE_BLOCK(inode, uspi->s_fpb);
187 fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
188 fs32_add(sb, &usb1->fs_cstotal.cs_nbfree, 1);
189 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1);
190 cylno = ufs_cbtocylno(i);
191 fs16_add(sb, &ubh_cg_blks(ucpi, cylno, ufs_cbtorpos(i)), 1);
192 fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1);
195 ubh_mark_buffer_dirty (USPI_UBH(uspi));
196 ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
197 if (sb->s_flags & MS_SYNCHRONOUS) {
198 ubh_ll_rw_block (SWRITE, 1, (struct ufs_buffer_head **)&ucpi);
199 ubh_wait_on_buffer (UCPI_UBH(ucpi));
215 UFSD("EXIT (FAILED)\n");
219 static struct page *ufs_get_locked_page(struct address_space *mapping,
225 page = find_lock_page(mapping, index);
227 page = read_cache_page(mapping, index,
228 (filler_t*)mapping->a_ops->readpage,
231 printk(KERN_ERR "ufs_change_blocknr: "
232 "read_cache_page error: ino %lu, index: %lu\n",
233 mapping->host->i_ino, index);
239 if (!PageUptodate(page) || PageError(page)) {
241 page_cache_release(page);
243 printk(KERN_ERR "ufs_change_blocknr: "
244 "can not read page: ino %lu, index: %lu\n",
245 mapping->host->i_ino, index);
247 page = ERR_PTR(-EIO);
252 if (unlikely(!page->mapping || !page_has_buffers(page))) {
254 page_cache_release(page);
255 goto try_again;/*we really need these buffers*/
262 * Modify inode page cache in such way:
263 * have - blocks with b_blocknr equal to oldb...oldb+count-1
264 * get - blocks with b_blocknr equal to newb...newb+count-1
265 * also we suppose that oldb...oldb+count-1 blocks
266 * situated at the end of file.
268 * We can come here from ufs_writepage or ufs_prepare_write,
269 * locked_page is argument of these functions, so we already lock it.
271 static void ufs_change_blocknr(struct inode *inode, unsigned int count,
272 unsigned int oldb, unsigned int newb,
273 struct page *locked_page)
275 unsigned int blk_per_page = 1 << (PAGE_CACHE_SHIFT - inode->i_blkbits);
277 struct address_space *mapping = inode->i_mapping;
278 pgoff_t index, cur_index = locked_page->index;
281 struct buffer_head *head, *bh;
283 baseblk = ((i_size_read(inode) - 1) >> inode->i_blkbits) + 1 - count;
285 UFSD("ENTER, ino %lu, count %u, oldb %u, newb %u\n",
286 inode->i_ino, count, oldb, newb);
288 BUG_ON(!PageLocked(locked_page));
290 for (i = 0; i < count; i += blk_per_page) {
291 index = (baseblk+i) >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
293 if (likely(cur_index != index)) {
294 page = ufs_get_locked_page(mapping, index);
301 head = page_buffers(page);
304 if (likely(bh->b_blocknr == j + oldb && j < count)) {
305 unmap_underlying_metadata(bh->b_bdev,
307 bh->b_blocknr = newb + j++;
308 mark_buffer_dirty(bh);
311 bh = bh->b_this_page;
312 } while (bh != head);
314 set_page_dirty(page);
316 if (likely(cur_index != index)) {
318 page_cache_release(page);
324 unsigned ufs_new_fragments(struct inode * inode, __fs32 * p, unsigned fragment,
325 unsigned goal, unsigned count, int * err, struct page *locked_page)
327 struct super_block * sb;
328 struct ufs_sb_private_info * uspi;
329 struct ufs_super_block_first * usb1;
330 unsigned cgno, oldcount, newcount, tmp, request, result;
332 UFSD("ENTER, ino %lu, fragment %u, goal %u, count %u\n", inode->i_ino, fragment, goal, count);
335 uspi = UFS_SB(sb)->s_uspi;
336 usb1 = ubh_get_usb_first(uspi);
341 tmp = fs32_to_cpu(sb, *p);
342 if (count + ufs_fragnum(fragment) > uspi->s_fpb) {
343 ufs_warning (sb, "ufs_new_fragments", "internal warning"
344 " fragment %u, count %u", fragment, count);
345 count = uspi->s_fpb - ufs_fragnum(fragment);
347 oldcount = ufs_fragnum (fragment);
348 newcount = oldcount + count;
351 * Somebody else has just allocated our fragments
355 ufs_error (sb, "ufs_new_fragments", "internal error, "
356 "fragment %u, tmp %u\n", fragment, tmp);
360 if (fragment < UFS_I(inode)->i_lastfrag) {
361 UFSD("EXIT (ALREADY ALLOCATED)\n");
368 UFSD("EXIT (ALREADY ALLOCATED)\n");
375 * There is not enough space for user on the device
377 if (!capable(CAP_SYS_RESOURCE) && ufs_freespace(usb1, UFS_MINFREE) <= 0) {
379 UFSD("EXIT (FAILED)\n");
383 if (goal >= uspi->s_size)
386 cgno = ufs_inotocg (inode->i_ino);
388 cgno = ufs_dtog (goal);
391 * allocate new fragment
394 result = ufs_alloc_fragments (inode, cgno, goal, count, err);
396 *p = cpu_to_fs32(sb, result);
398 UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count);
401 UFSD("EXIT, result %u\n", result);
408 result = ufs_add_fragments (inode, tmp, oldcount, newcount, err);
411 UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count);
413 UFSD("EXIT, result %u\n", result);
418 * allocate new block and move data
420 switch (fs32_to_cpu(sb, usb1->fs_optim)) {
423 if (uspi->s_minfree < 5 || fs32_to_cpu(sb, usb1->fs_cstotal.cs_nffree)
424 > uspi->s_dsize * uspi->s_minfree / (2 * 100) )
426 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
429 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
432 request = uspi->s_fpb;
433 if (fs32_to_cpu(sb, usb1->fs_cstotal.cs_nffree) < uspi->s_dsize *
434 (uspi->s_minfree - 2) / 100)
436 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
439 result = ufs_alloc_fragments (inode, cgno, goal, request, err);
441 ufs_change_blocknr(inode, oldcount, tmp, result, locked_page);
443 *p = cpu_to_fs32(sb, result);
445 UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count);
447 if (newcount < request)
448 ufs_free_fragments (inode, result + newcount, request - newcount);
449 ufs_free_fragments (inode, tmp, oldcount);
450 UFSD("EXIT, result %u\n", result);
455 UFSD("EXIT (FAILED)\n");
460 ufs_add_fragments (struct inode * inode, unsigned fragment,
461 unsigned oldcount, unsigned newcount, int * err)
463 struct super_block * sb;
464 struct ufs_sb_private_info * uspi;
465 struct ufs_super_block_first * usb1;
466 struct ufs_cg_private_info * ucpi;
467 struct ufs_cylinder_group * ucg;
468 unsigned cgno, fragno, fragoff, count, fragsize, i;
470 UFSD("ENTER, fragment %u, oldcount %u, newcount %u\n", fragment, oldcount, newcount);
473 uspi = UFS_SB(sb)->s_uspi;
474 usb1 = ubh_get_usb_first (uspi);
475 count = newcount - oldcount;
477 cgno = ufs_dtog(fragment);
478 if (fs32_to_cpu(sb, UFS_SB(sb)->fs_cs(cgno).cs_nffree) < count)
480 if ((ufs_fragnum (fragment) + newcount) > uspi->s_fpb)
482 ucpi = ufs_load_cylinder (sb, cgno);
485 ucg = ubh_get_ucg (UCPI_UBH(ucpi));
486 if (!ufs_cg_chkmagic(sb, ucg)) {
487 ufs_panic (sb, "ufs_add_fragments",
488 "internal error, bad magic number on cg %u", cgno);
492 fragno = ufs_dtogd (fragment);
493 fragoff = ufs_fragnum (fragno);
494 for (i = oldcount; i < newcount; i++)
495 if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i))
498 * Block can be extended
500 ucg->cg_time = cpu_to_fs32(sb, get_seconds());
501 for (i = newcount; i < (uspi->s_fpb - fragoff); i++)
502 if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i))
504 fragsize = i - oldcount;
505 if (!fs32_to_cpu(sb, ucg->cg_frsum[fragsize]))
506 ufs_panic (sb, "ufs_add_fragments",
507 "internal error or corrupted bitmap on cg %u", cgno);
508 fs32_sub(sb, &ucg->cg_frsum[fragsize], 1);
509 if (fragsize != count)
510 fs32_add(sb, &ucg->cg_frsum[fragsize - count], 1);
511 for (i = oldcount; i < newcount; i++)
512 ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i);
513 if(DQUOT_ALLOC_BLOCK(inode, count)) {
518 fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
519 fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
520 fs32_sub(sb, &usb1->fs_cstotal.cs_nffree, count);
522 ubh_mark_buffer_dirty (USPI_UBH(uspi));
523 ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
524 if (sb->s_flags & MS_SYNCHRONOUS) {
525 ubh_ll_rw_block (SWRITE, 1, (struct ufs_buffer_head **)&ucpi);
526 ubh_wait_on_buffer (UCPI_UBH(ucpi));
530 UFSD("EXIT, fragment %u\n", fragment);
535 #define UFS_TEST_FREE_SPACE_CG \
536 ucg = (struct ufs_cylinder_group *) UFS_SB(sb)->s_ucg[cgno]->b_data; \
537 if (fs32_to_cpu(sb, ucg->cg_cs.cs_nbfree)) \
539 for (k = count; k < uspi->s_fpb; k++) \
540 if (fs32_to_cpu(sb, ucg->cg_frsum[k])) \
543 static unsigned ufs_alloc_fragments (struct inode * inode, unsigned cgno,
544 unsigned goal, unsigned count, int * err)
546 struct super_block * sb;
547 struct ufs_sb_private_info * uspi;
548 struct ufs_super_block_first * usb1;
549 struct ufs_cg_private_info * ucpi;
550 struct ufs_cylinder_group * ucg;
551 unsigned oldcg, i, j, k, result, allocsize;
553 UFSD("ENTER, ino %lu, cgno %u, goal %u, count %u\n", inode->i_ino, cgno, goal, count);
556 uspi = UFS_SB(sb)->s_uspi;
557 usb1 = ubh_get_usb_first(uspi);
561 * 1. searching on preferred cylinder group
563 UFS_TEST_FREE_SPACE_CG
566 * 2. quadratic rehash
568 for (j = 1; j < uspi->s_ncg; j *= 2) {
570 if (cgno >= uspi->s_ncg)
572 UFS_TEST_FREE_SPACE_CG
576 * 3. brute force search
577 * We start at i = 2 ( 0 is checked at 1.step, 1 at 2.step )
579 cgno = (oldcg + 1) % uspi->s_ncg;
580 for (j = 2; j < uspi->s_ncg; j++) {
582 if (cgno >= uspi->s_ncg)
584 UFS_TEST_FREE_SPACE_CG
587 UFSD("EXIT (FAILED)\n");
591 ucpi = ufs_load_cylinder (sb, cgno);
594 ucg = ubh_get_ucg (UCPI_UBH(ucpi));
595 if (!ufs_cg_chkmagic(sb, ucg))
596 ufs_panic (sb, "ufs_alloc_fragments",
597 "internal error, bad magic number on cg %u", cgno);
598 ucg->cg_time = cpu_to_fs32(sb, get_seconds());
600 if (count == uspi->s_fpb) {
601 result = ufs_alloccg_block (inode, ucpi, goal, err);
602 if (result == (unsigned)-1)
607 for (allocsize = count; allocsize < uspi->s_fpb; allocsize++)
608 if (fs32_to_cpu(sb, ucg->cg_frsum[allocsize]) != 0)
611 if (allocsize == uspi->s_fpb) {
612 result = ufs_alloccg_block (inode, ucpi, goal, err);
613 if (result == (unsigned)-1)
615 goal = ufs_dtogd (result);
616 for (i = count; i < uspi->s_fpb; i++)
617 ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, goal + i);
618 i = uspi->s_fpb - count;
619 DQUOT_FREE_BLOCK(inode, i);
621 fs32_add(sb, &ucg->cg_cs.cs_nffree, i);
622 fs32_add(sb, &usb1->fs_cstotal.cs_nffree, i);
623 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, i);
624 fs32_add(sb, &ucg->cg_frsum[i], 1);
628 result = ufs_bitmap_search (sb, ucpi, goal, allocsize);
629 if (result == (unsigned)-1)
631 if(DQUOT_ALLOC_BLOCK(inode, count)) {
635 for (i = 0; i < count; i++)
636 ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, result + i);
638 fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
639 fs32_sub(sb, &usb1->fs_cstotal.cs_nffree, count);
640 fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
641 fs32_sub(sb, &ucg->cg_frsum[allocsize], 1);
643 if (count != allocsize)
644 fs32_add(sb, &ucg->cg_frsum[allocsize - count], 1);
647 ubh_mark_buffer_dirty (USPI_UBH(uspi));
648 ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
649 if (sb->s_flags & MS_SYNCHRONOUS) {
650 ubh_ll_rw_block (SWRITE, 1, (struct ufs_buffer_head **)&ucpi);
651 ubh_wait_on_buffer (UCPI_UBH(ucpi));
655 result += cgno * uspi->s_fpg;
656 UFSD("EXIT3, result %u\n", result);
660 static unsigned ufs_alloccg_block (struct inode * inode,
661 struct ufs_cg_private_info * ucpi, unsigned goal, int * err)
663 struct super_block * sb;
664 struct ufs_sb_private_info * uspi;
665 struct ufs_super_block_first * usb1;
666 struct ufs_cylinder_group * ucg;
667 unsigned result, cylno, blkno;
669 UFSD("ENTER, goal %u\n", goal);
672 uspi = UFS_SB(sb)->s_uspi;
673 usb1 = ubh_get_usb_first(uspi);
674 ucg = ubh_get_ucg(UCPI_UBH(ucpi));
677 goal = ucpi->c_rotor;
680 goal = ufs_blknum (goal);
681 goal = ufs_dtogd (goal);
684 * If the requested block is available, use it.
686 if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, ufs_fragstoblks(goal))) {
692 result = ufs_bitmap_search (sb, ucpi, goal, uspi->s_fpb);
693 if (result == (unsigned)-1)
695 ucpi->c_rotor = result;
697 blkno = ufs_fragstoblks(result);
698 ubh_clrblock (UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
699 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
700 ufs_clusteracct (sb, ucpi, blkno, -1);
701 if(DQUOT_ALLOC_BLOCK(inode, uspi->s_fpb)) {
706 fs32_sub(sb, &ucg->cg_cs.cs_nbfree, 1);
707 fs32_sub(sb, &usb1->fs_cstotal.cs_nbfree, 1);
708 fs32_sub(sb, &UFS_SB(sb)->fs_cs(ucpi->c_cgx).cs_nbfree, 1);
709 cylno = ufs_cbtocylno(result);
710 fs16_sub(sb, &ubh_cg_blks(ucpi, cylno, ufs_cbtorpos(result)), 1);
711 fs32_sub(sb, &ubh_cg_blktot(ucpi, cylno), 1);
713 UFSD("EXIT, result %u\n", result);
718 static unsigned ubh_scanc(struct ufs_sb_private_info *uspi,
719 struct ufs_buffer_head *ubh,
720 unsigned begin, unsigned size,
721 unsigned char *table, unsigned char mask)
723 unsigned rest, offset;
727 offset = begin & ~uspi->s_fmask;
728 begin >>= uspi->s_fshift;
730 if ((offset + size) < uspi->s_fsize)
733 rest = uspi->s_fsize - offset;
735 cp = ubh->bh[begin]->b_data + offset;
736 while ((table[*cp++] & mask) == 0 && --rest)
743 return (size + rest);
747 * Find a block of the specified size in the specified cylinder group.
748 * @sp: pointer to super block
749 * @ucpi: pointer to cylinder group info
750 * @goal: near which block we want find new one
751 * @count: specified size
753 static unsigned ufs_bitmap_search(struct super_block *sb,
754 struct ufs_cg_private_info *ucpi,
755 unsigned goal, unsigned count)
758 * Bit patterns for identifying fragments in the block map
759 * used as ((map & mask_arr) == want_arr)
761 static const int mask_arr[9] = {
762 0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f, 0xff, 0x1ff, 0x3ff
764 static const int want_arr[9] = {
765 0x0, 0x2, 0x6, 0xe, 0x1e, 0x3e, 0x7e, 0xfe, 0x1fe
767 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
768 struct ufs_super_block_first *usb1;
769 struct ufs_cylinder_group *ucg;
770 unsigned start, length, loc, result;
771 unsigned pos, want, blockmap, mask, end;
773 UFSD("ENTER, cg %u, goal %u, count %u\n", ucpi->c_cgx, goal, count);
775 usb1 = ubh_get_usb_first (uspi);
776 ucg = ubh_get_ucg(UCPI_UBH(ucpi));
779 start = ufs_dtogd(goal) >> 3;
781 start = ucpi->c_frotor >> 3;
783 length = ((uspi->s_fpg + 7) >> 3) - start;
784 loc = ubh_scanc(uspi, UCPI_UBH(ucpi), ucpi->c_freeoff + start, length,
785 (uspi->s_fpb == 8) ? ufs_fragtable_8fpb : ufs_fragtable_other,
786 1 << (count - 1 + (uspi->s_fpb & 7)));
789 loc = ubh_scanc(uspi, UCPI_UBH(ucpi), ucpi->c_freeoff, length,
790 (uspi->s_fpb == 8) ? ufs_fragtable_8fpb :
792 1 << (count - 1 + (uspi->s_fpb & 7)));
794 ufs_error(sb, "ufs_bitmap_search",
795 "bitmap corrupted on cg %u, start %u,"
796 " length %u, count %u, freeoff %u\n",
797 ucpi->c_cgx, start, length, count,
803 result = (start + length - loc) << 3;
804 ucpi->c_frotor = result;
807 * found the byte in the map
810 for (end = result + 8; result < end; result += uspi->s_fpb) {
811 blockmap = ubh_blkmap(UCPI_UBH(ucpi), ucpi->c_freeoff, result);
813 mask = mask_arr[count];
814 want = want_arr[count];
815 for (pos = 0; pos <= uspi->s_fpb - count; pos++) {
816 if ((blockmap & mask) == want) {
817 UFSD("EXIT, result %u\n", result);
825 ufs_error(sb, "ufs_bitmap_search", "block not in map on cg %u\n",
827 UFSD("EXIT (FAILED)\n");
831 static void ufs_clusteracct(struct super_block * sb,
832 struct ufs_cg_private_info * ucpi, unsigned blkno, int cnt)
834 struct ufs_sb_private_info * uspi;
835 int i, start, end, forw, back;
837 uspi = UFS_SB(sb)->s_uspi;
838 if (uspi->s_contigsumsize <= 0)
842 ubh_setbit(UCPI_UBH(ucpi), ucpi->c_clusteroff, blkno);
844 ubh_clrbit(UCPI_UBH(ucpi), ucpi->c_clusteroff, blkno);
847 * Find the size of the cluster going forward.
850 end = start + uspi->s_contigsumsize;
851 if ( end >= ucpi->c_nclusterblks)
852 end = ucpi->c_nclusterblks;
853 i = ubh_find_next_zero_bit (UCPI_UBH(ucpi), ucpi->c_clusteroff, end, start);
859 * Find the size of the cluster going backward.
862 end = start - uspi->s_contigsumsize;
865 i = ubh_find_last_zero_bit (UCPI_UBH(ucpi), ucpi->c_clusteroff, start, end);
871 * Account for old cluster and the possibly new forward and
875 if (i > uspi->s_contigsumsize)
876 i = uspi->s_contigsumsize;
877 fs32_add(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (i << 2)), cnt);
879 fs32_sub(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (back << 2)), cnt);
881 fs32_sub(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (forw << 2)), cnt);
885 static unsigned char ufs_fragtable_8fpb[] = {
886 0x00, 0x01, 0x01, 0x02, 0x01, 0x01, 0x02, 0x04, 0x01, 0x01, 0x01, 0x03, 0x02, 0x03, 0x04, 0x08,
887 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x02, 0x03, 0x03, 0x02, 0x04, 0x05, 0x08, 0x10,
888 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
889 0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x04, 0x05, 0x05, 0x06, 0x08, 0x09, 0x10, 0x20,
890 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
891 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
892 0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
893 0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x08, 0x09, 0x09, 0x0A, 0x10, 0x11, 0x20, 0x40,
894 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
895 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
896 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
897 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x05, 0x05, 0x05, 0x07, 0x09, 0x09, 0x11, 0x21,
898 0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
899 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x02, 0x03, 0x03, 0x02, 0x06, 0x07, 0x0A, 0x12,
900 0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x05, 0x05, 0x05, 0x07, 0x06, 0x07, 0x04, 0x0C,
901 0x08, 0x09, 0x09, 0x0A, 0x09, 0x09, 0x0A, 0x0C, 0x10, 0x11, 0x11, 0x12, 0x20, 0x21, 0x40, 0x80,
904 static unsigned char ufs_fragtable_other[] = {
905 0x00, 0x16, 0x16, 0x2A, 0x16, 0x16, 0x26, 0x4E, 0x16, 0x16, 0x16, 0x3E, 0x2A, 0x3E, 0x4E, 0x8A,
906 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
907 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
908 0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
909 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
910 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
911 0x26, 0x36, 0x36, 0x2E, 0x36, 0x36, 0x26, 0x6E, 0x36, 0x36, 0x36, 0x3E, 0x2E, 0x3E, 0x6E, 0xAE,
912 0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
913 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
914 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
915 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
916 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
917 0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
918 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
919 0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
920 0x8A, 0x9E, 0x9E, 0xAA, 0x9E, 0x9E, 0xAE, 0xCE, 0x9E, 0x9E, 0x9E, 0xBE, 0xAA, 0xBE, 0xCE, 0x8A,