2 * linux/fs/ufs/balloc.c
5 * Daniel Pirkl <daniel.pirkl@email.cz>
6 * Charles University, Faculty of Mathematics and Physics
10 #include <linux/ufs_fs.h>
11 #include <linux/stat.h>
12 #include <linux/time.h>
13 #include <linux/string.h>
14 #include <linux/quotaops.h>
15 #include <linux/buffer_head.h>
16 #include <linux/capability.h>
17 #include <linux/sched.h>
18 #include <linux/bitops.h>
19 #include <asm/byteorder.h>
24 #undef UFS_BALLOC_DEBUG
26 #ifdef UFS_BALLOC_DEBUG
27 #define UFSD(x) printk("(%s, %d), %s:", __FILE__, __LINE__, __FUNCTION__); printk x;
32 static unsigned ufs_add_fragments (struct inode *, unsigned, unsigned, unsigned, int *);
33 static unsigned ufs_alloc_fragments (struct inode *, unsigned, unsigned, unsigned, int *);
34 static unsigned ufs_alloccg_block (struct inode *, struct ufs_cg_private_info *, unsigned, int *);
35 static unsigned ufs_bitmap_search (struct super_block *, struct ufs_cg_private_info *, unsigned, unsigned);
36 static unsigned char ufs_fragtable_8fpb[], ufs_fragtable_other[];
37 static void ufs_clusteracct(struct super_block *, struct ufs_cg_private_info *, unsigned, int);
40 * Free 'count' fragments from fragment number 'fragment'
42 void ufs_free_fragments (struct inode * inode, unsigned fragment, unsigned count) {
43 struct super_block * sb;
44 struct ufs_sb_private_info * uspi;
45 struct ufs_super_block_first * usb1;
46 struct ufs_cg_private_info * ucpi;
47 struct ufs_cylinder_group * ucg;
48 unsigned cgno, bit, end_bit, bbase, blkmap, i, blkno, cylno;
51 uspi = UFS_SB(sb)->s_uspi;
52 usb1 = ubh_get_usb_first(uspi);
54 UFSD(("ENTER, fragment %u, count %u\n", fragment, count))
56 if (ufs_fragnum(fragment) + count > uspi->s_fpg)
57 ufs_error (sb, "ufs_free_fragments", "internal error");
61 cgno = ufs_dtog(fragment);
62 bit = ufs_dtogd(fragment);
63 if (cgno >= uspi->s_ncg) {
64 ufs_panic (sb, "ufs_free_fragments", "freeing blocks are outside device");
68 ucpi = ufs_load_cylinder (sb, cgno);
71 ucg = ubh_get_ucg (UCPI_UBH);
72 if (!ufs_cg_chkmagic(sb, ucg)) {
73 ufs_panic (sb, "ufs_free_fragments", "internal error, bad magic number on cg %u", cgno);
77 end_bit = bit + count;
78 bbase = ufs_blknum (bit);
79 blkmap = ubh_blkmap (UCPI_UBH, ucpi->c_freeoff, bbase);
80 ufs_fragacct (sb, blkmap, ucg->cg_frsum, -1);
81 for (i = bit; i < end_bit; i++) {
82 if (ubh_isclr (UCPI_UBH, ucpi->c_freeoff, i))
83 ubh_setbit (UCPI_UBH, ucpi->c_freeoff, i);
85 ufs_error (sb, "ufs_free_fragments",
86 "bit already cleared for fragment %u", i);
89 DQUOT_FREE_BLOCK (inode, count);
92 fs32_add(sb, &ucg->cg_cs.cs_nffree, count);
93 fs32_add(sb, &usb1->fs_cstotal.cs_nffree, count);
94 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
95 blkmap = ubh_blkmap (UCPI_UBH, ucpi->c_freeoff, bbase);
96 ufs_fragacct(sb, blkmap, ucg->cg_frsum, 1);
99 * Trying to reassemble free fragments into block
101 blkno = ufs_fragstoblks (bbase);
102 if (ubh_isblockset(UCPI_UBH, ucpi->c_freeoff, blkno)) {
103 fs32_sub(sb, &ucg->cg_cs.cs_nffree, uspi->s_fpb);
104 fs32_sub(sb, &usb1->fs_cstotal.cs_nffree, uspi->s_fpb);
105 fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, uspi->s_fpb);
106 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
107 ufs_clusteracct (sb, ucpi, blkno, 1);
108 fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
109 fs32_add(sb, &usb1->fs_cstotal.cs_nbfree, 1);
110 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1);
111 cylno = ufs_cbtocylno (bbase);
112 fs16_add(sb, &ubh_cg_blks(ucpi, cylno, ufs_cbtorpos(bbase)), 1);
113 fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1);
116 ubh_mark_buffer_dirty (USPI_UBH);
117 ubh_mark_buffer_dirty (UCPI_UBH);
118 if (sb->s_flags & MS_SYNCHRONOUS) {
119 ubh_ll_rw_block (SWRITE, 1, (struct ufs_buffer_head **)&ucpi);
120 ubh_wait_on_buffer (UCPI_UBH);
130 UFSD(("EXIT (FAILED)\n"))
135 * Free 'count' fragments from fragment number 'fragment' (free whole blocks)
137 void ufs_free_blocks (struct inode * inode, unsigned fragment, unsigned count) {
138 struct super_block * sb;
139 struct ufs_sb_private_info * uspi;
140 struct ufs_super_block_first * usb1;
141 struct ufs_cg_private_info * ucpi;
142 struct ufs_cylinder_group * ucg;
143 unsigned overflow, cgno, bit, end_bit, blkno, i, cylno;
146 uspi = UFS_SB(sb)->s_uspi;
147 usb1 = ubh_get_usb_first(uspi);
149 UFSD(("ENTER, fragment %u, count %u\n", fragment, count))
151 if ((fragment & uspi->s_fpbmask) || (count & uspi->s_fpbmask)) {
152 ufs_error (sb, "ufs_free_blocks", "internal error, "
153 "fragment %u, count %u\n", fragment, count);
161 cgno = ufs_dtog (fragment);
162 bit = ufs_dtogd (fragment);
163 if (cgno >= uspi->s_ncg) {
164 ufs_panic (sb, "ufs_free_blocks", "freeing blocks are outside device");
167 end_bit = bit + count;
168 if (end_bit > uspi->s_fpg) {
169 overflow = bit + count - uspi->s_fpg;
174 ucpi = ufs_load_cylinder (sb, cgno);
177 ucg = ubh_get_ucg (UCPI_UBH);
178 if (!ufs_cg_chkmagic(sb, ucg)) {
179 ufs_panic (sb, "ufs_free_blocks", "internal error, bad magic number on cg %u", cgno);
183 for (i = bit; i < end_bit; i += uspi->s_fpb) {
184 blkno = ufs_fragstoblks(i);
185 if (ubh_isblockset(UCPI_UBH, ucpi->c_freeoff, blkno)) {
186 ufs_error(sb, "ufs_free_blocks", "freeing free fragment");
188 ubh_setblock(UCPI_UBH, ucpi->c_freeoff, blkno);
189 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
190 ufs_clusteracct (sb, ucpi, blkno, 1);
191 DQUOT_FREE_BLOCK(inode, uspi->s_fpb);
193 fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
194 fs32_add(sb, &usb1->fs_cstotal.cs_nbfree, 1);
195 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1);
196 cylno = ufs_cbtocylno(i);
197 fs16_add(sb, &ubh_cg_blks(ucpi, cylno, ufs_cbtorpos(i)), 1);
198 fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1);
201 ubh_mark_buffer_dirty (USPI_UBH);
202 ubh_mark_buffer_dirty (UCPI_UBH);
203 if (sb->s_flags & MS_SYNCHRONOUS) {
204 ubh_ll_rw_block (SWRITE, 1, (struct ufs_buffer_head **)&ucpi);
205 ubh_wait_on_buffer (UCPI_UBH);
221 UFSD(("EXIT (FAILED)\n"))
226 unsigned ufs_new_fragments (struct inode * inode, __fs32 * p, unsigned fragment,
227 unsigned goal, unsigned count, int * err )
229 struct super_block * sb;
230 struct ufs_sb_private_info * uspi;
231 struct ufs_super_block_first * usb1;
232 struct buffer_head * bh;
233 unsigned cgno, oldcount, newcount, tmp, request, i, result;
235 UFSD(("ENTER, ino %lu, fragment %u, goal %u, count %u\n", inode->i_ino, fragment, goal, count))
238 uspi = UFS_SB(sb)->s_uspi;
239 usb1 = ubh_get_usb_first(uspi);
244 tmp = fs32_to_cpu(sb, *p);
245 if (count + ufs_fragnum(fragment) > uspi->s_fpb) {
246 ufs_warning (sb, "ufs_new_fragments", "internal warning"
247 " fragment %u, count %u", fragment, count);
248 count = uspi->s_fpb - ufs_fragnum(fragment);
250 oldcount = ufs_fragnum (fragment);
251 newcount = oldcount + count;
254 * Somebody else has just allocated our fragments
258 ufs_error (sb, "ufs_new_fragments", "internal error, "
259 "fragment %u, tmp %u\n", fragment, tmp);
263 if (fragment < UFS_I(inode)->i_lastfrag) {
264 UFSD(("EXIT (ALREADY ALLOCATED)\n"))
271 UFSD(("EXIT (ALREADY ALLOCATED)\n"))
278 * There is not enough space for user on the device
280 if (!capable(CAP_SYS_RESOURCE) && ufs_freespace(usb1, UFS_MINFREE) <= 0) {
282 UFSD(("EXIT (FAILED)\n"))
286 if (goal >= uspi->s_size)
289 cgno = ufs_inotocg (inode->i_ino);
291 cgno = ufs_dtog (goal);
294 * allocate new fragment
297 result = ufs_alloc_fragments (inode, cgno, goal, count, err);
299 *p = cpu_to_fs32(sb, result);
301 inode->i_blocks += count << uspi->s_nspfshift;
302 UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count);
305 UFSD(("EXIT, result %u\n", result))
312 result = ufs_add_fragments (inode, tmp, oldcount, newcount, err);
315 inode->i_blocks += count << uspi->s_nspfshift;
316 UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count);
318 UFSD(("EXIT, result %u\n", result))
323 * allocate new block and move data
325 switch (fs32_to_cpu(sb, usb1->fs_optim)) {
328 if (uspi->s_minfree < 5 || fs32_to_cpu(sb, usb1->fs_cstotal.cs_nffree)
329 > uspi->s_dsize * uspi->s_minfree / (2 * 100) )
331 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
334 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
337 request = uspi->s_fpb;
338 if (fs32_to_cpu(sb, usb1->fs_cstotal.cs_nffree) < uspi->s_dsize *
339 (uspi->s_minfree - 2) / 100)
341 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
344 result = ufs_alloc_fragments (inode, cgno, goal, request, err);
346 for (i = 0; i < oldcount; i++) {
347 bh = sb_bread(sb, tmp + i);
350 clear_buffer_dirty(bh);
351 bh->b_blocknr = result + i;
352 mark_buffer_dirty (bh);
354 sync_dirty_buffer(bh);
359 printk(KERN_ERR "ufs_new_fragments: bread fail\n");
364 *p = cpu_to_fs32(sb, result);
366 inode->i_blocks += count << uspi->s_nspfshift;
367 UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count);
369 if (newcount < request)
370 ufs_free_fragments (inode, result + newcount, request - newcount);
371 ufs_free_fragments (inode, tmp, oldcount);
372 UFSD(("EXIT, result %u\n", result))
377 UFSD(("EXIT (FAILED)\n"))
382 ufs_add_fragments (struct inode * inode, unsigned fragment,
383 unsigned oldcount, unsigned newcount, int * err)
385 struct super_block * sb;
386 struct ufs_sb_private_info * uspi;
387 struct ufs_super_block_first * usb1;
388 struct ufs_cg_private_info * ucpi;
389 struct ufs_cylinder_group * ucg;
390 unsigned cgno, fragno, fragoff, count, fragsize, i;
392 UFSD(("ENTER, fragment %u, oldcount %u, newcount %u\n", fragment, oldcount, newcount))
395 uspi = UFS_SB(sb)->s_uspi;
396 usb1 = ubh_get_usb_first (uspi);
397 count = newcount - oldcount;
399 cgno = ufs_dtog(fragment);
400 if (fs32_to_cpu(sb, UFS_SB(sb)->fs_cs(cgno).cs_nffree) < count)
402 if ((ufs_fragnum (fragment) + newcount) > uspi->s_fpb)
404 ucpi = ufs_load_cylinder (sb, cgno);
407 ucg = ubh_get_ucg (UCPI_UBH);
408 if (!ufs_cg_chkmagic(sb, ucg)) {
409 ufs_panic (sb, "ufs_add_fragments",
410 "internal error, bad magic number on cg %u", cgno);
414 fragno = ufs_dtogd (fragment);
415 fragoff = ufs_fragnum (fragno);
416 for (i = oldcount; i < newcount; i++)
417 if (ubh_isclr (UCPI_UBH, ucpi->c_freeoff, fragno + i))
420 * Block can be extended
422 ucg->cg_time = cpu_to_fs32(sb, get_seconds());
423 for (i = newcount; i < (uspi->s_fpb - fragoff); i++)
424 if (ubh_isclr (UCPI_UBH, ucpi->c_freeoff, fragno + i))
426 fragsize = i - oldcount;
427 if (!fs32_to_cpu(sb, ucg->cg_frsum[fragsize]))
428 ufs_panic (sb, "ufs_add_fragments",
429 "internal error or corrupted bitmap on cg %u", cgno);
430 fs32_sub(sb, &ucg->cg_frsum[fragsize], 1);
431 if (fragsize != count)
432 fs32_add(sb, &ucg->cg_frsum[fragsize - count], 1);
433 for (i = oldcount; i < newcount; i++)
434 ubh_clrbit (UCPI_UBH, ucpi->c_freeoff, fragno + i);
435 if(DQUOT_ALLOC_BLOCK(inode, count)) {
440 fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
441 fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
442 fs32_sub(sb, &usb1->fs_cstotal.cs_nffree, count);
444 ubh_mark_buffer_dirty (USPI_UBH);
445 ubh_mark_buffer_dirty (UCPI_UBH);
446 if (sb->s_flags & MS_SYNCHRONOUS) {
447 ubh_ll_rw_block (SWRITE, 1, (struct ufs_buffer_head **)&ucpi);
448 ubh_wait_on_buffer (UCPI_UBH);
452 UFSD(("EXIT, fragment %u\n", fragment))
457 #define UFS_TEST_FREE_SPACE_CG \
458 ucg = (struct ufs_cylinder_group *) UFS_SB(sb)->s_ucg[cgno]->b_data; \
459 if (fs32_to_cpu(sb, ucg->cg_cs.cs_nbfree)) \
461 for (k = count; k < uspi->s_fpb; k++) \
462 if (fs32_to_cpu(sb, ucg->cg_frsum[k])) \
465 static unsigned ufs_alloc_fragments (struct inode * inode, unsigned cgno,
466 unsigned goal, unsigned count, int * err)
468 struct super_block * sb;
469 struct ufs_sb_private_info * uspi;
470 struct ufs_super_block_first * usb1;
471 struct ufs_cg_private_info * ucpi;
472 struct ufs_cylinder_group * ucg;
473 unsigned oldcg, i, j, k, result, allocsize;
475 UFSD(("ENTER, ino %lu, cgno %u, goal %u, count %u\n", inode->i_ino, cgno, goal, count))
478 uspi = UFS_SB(sb)->s_uspi;
479 usb1 = ubh_get_usb_first(uspi);
483 * 1. searching on preferred cylinder group
485 UFS_TEST_FREE_SPACE_CG
488 * 2. quadratic rehash
490 for (j = 1; j < uspi->s_ncg; j *= 2) {
492 if (cgno >= uspi->s_ncg)
494 UFS_TEST_FREE_SPACE_CG
498 * 3. brute force search
499 * We start at i = 2 ( 0 is checked at 1.step, 1 at 2.step )
501 cgno = (oldcg + 1) % uspi->s_ncg;
502 for (j = 2; j < uspi->s_ncg; j++) {
504 if (cgno >= uspi->s_ncg)
506 UFS_TEST_FREE_SPACE_CG
509 UFSD(("EXIT (FAILED)\n"))
513 ucpi = ufs_load_cylinder (sb, cgno);
516 ucg = ubh_get_ucg (UCPI_UBH);
517 if (!ufs_cg_chkmagic(sb, ucg))
518 ufs_panic (sb, "ufs_alloc_fragments",
519 "internal error, bad magic number on cg %u", cgno);
520 ucg->cg_time = cpu_to_fs32(sb, get_seconds());
522 if (count == uspi->s_fpb) {
523 result = ufs_alloccg_block (inode, ucpi, goal, err);
524 if (result == (unsigned)-1)
529 for (allocsize = count; allocsize < uspi->s_fpb; allocsize++)
530 if (fs32_to_cpu(sb, ucg->cg_frsum[allocsize]) != 0)
533 if (allocsize == uspi->s_fpb) {
534 result = ufs_alloccg_block (inode, ucpi, goal, err);
535 if (result == (unsigned)-1)
537 goal = ufs_dtogd (result);
538 for (i = count; i < uspi->s_fpb; i++)
539 ubh_setbit (UCPI_UBH, ucpi->c_freeoff, goal + i);
540 i = uspi->s_fpb - count;
541 DQUOT_FREE_BLOCK(inode, i);
543 fs32_add(sb, &ucg->cg_cs.cs_nffree, i);
544 fs32_add(sb, &usb1->fs_cstotal.cs_nffree, i);
545 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, i);
546 fs32_add(sb, &ucg->cg_frsum[i], 1);
550 result = ufs_bitmap_search (sb, ucpi, goal, allocsize);
551 if (result == (unsigned)-1)
553 if(DQUOT_ALLOC_BLOCK(inode, count)) {
557 for (i = 0; i < count; i++)
558 ubh_clrbit (UCPI_UBH, ucpi->c_freeoff, result + i);
560 fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
561 fs32_sub(sb, &usb1->fs_cstotal.cs_nffree, count);
562 fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
563 fs32_sub(sb, &ucg->cg_frsum[allocsize], 1);
565 if (count != allocsize)
566 fs32_add(sb, &ucg->cg_frsum[allocsize - count], 1);
569 ubh_mark_buffer_dirty (USPI_UBH);
570 ubh_mark_buffer_dirty (UCPI_UBH);
571 if (sb->s_flags & MS_SYNCHRONOUS) {
572 ubh_ll_rw_block (SWRITE, 1, (struct ufs_buffer_head **)&ucpi);
573 ubh_wait_on_buffer (UCPI_UBH);
577 result += cgno * uspi->s_fpg;
578 UFSD(("EXIT3, result %u\n", result))
582 static unsigned ufs_alloccg_block (struct inode * inode,
583 struct ufs_cg_private_info * ucpi, unsigned goal, int * err)
585 struct super_block * sb;
586 struct ufs_sb_private_info * uspi;
587 struct ufs_super_block_first * usb1;
588 struct ufs_cylinder_group * ucg;
589 unsigned result, cylno, blkno;
591 UFSD(("ENTER, goal %u\n", goal))
594 uspi = UFS_SB(sb)->s_uspi;
595 usb1 = ubh_get_usb_first(uspi);
596 ucg = ubh_get_ucg(UCPI_UBH);
599 goal = ucpi->c_rotor;
602 goal = ufs_blknum (goal);
603 goal = ufs_dtogd (goal);
606 * If the requested block is available, use it.
608 if (ubh_isblockset(UCPI_UBH, ucpi->c_freeoff, ufs_fragstoblks(goal))) {
614 result = ufs_bitmap_search (sb, ucpi, goal, uspi->s_fpb);
615 if (result == (unsigned)-1)
617 ucpi->c_rotor = result;
619 blkno = ufs_fragstoblks(result);
620 ubh_clrblock (UCPI_UBH, ucpi->c_freeoff, blkno);
621 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
622 ufs_clusteracct (sb, ucpi, blkno, -1);
623 if(DQUOT_ALLOC_BLOCK(inode, uspi->s_fpb)) {
628 fs32_sub(sb, &ucg->cg_cs.cs_nbfree, 1);
629 fs32_sub(sb, &usb1->fs_cstotal.cs_nbfree, 1);
630 fs32_sub(sb, &UFS_SB(sb)->fs_cs(ucpi->c_cgx).cs_nbfree, 1);
631 cylno = ufs_cbtocylno(result);
632 fs16_sub(sb, &ubh_cg_blks(ucpi, cylno, ufs_cbtorpos(result)), 1);
633 fs32_sub(sb, &ubh_cg_blktot(ucpi, cylno), 1);
635 UFSD(("EXIT, result %u\n", result))
640 static unsigned ufs_bitmap_search (struct super_block * sb,
641 struct ufs_cg_private_info * ucpi, unsigned goal, unsigned count)
643 struct ufs_sb_private_info * uspi;
644 struct ufs_super_block_first * usb1;
645 struct ufs_cylinder_group * ucg;
646 unsigned start, length, location, result;
647 unsigned possition, fragsize, blockmap, mask;
649 UFSD(("ENTER, cg %u, goal %u, count %u\n", ucpi->c_cgx, goal, count))
651 uspi = UFS_SB(sb)->s_uspi;
652 usb1 = ubh_get_usb_first (uspi);
653 ucg = ubh_get_ucg(UCPI_UBH);
656 start = ufs_dtogd(goal) >> 3;
658 start = ucpi->c_frotor >> 3;
660 length = ((uspi->s_fpg + 7) >> 3) - start;
661 location = ubh_scanc(UCPI_UBH, ucpi->c_freeoff + start, length,
662 (uspi->s_fpb == 8) ? ufs_fragtable_8fpb : ufs_fragtable_other,
663 1 << (count - 1 + (uspi->s_fpb & 7)));
666 location = ubh_scanc(UCPI_UBH, ucpi->c_freeoff, length,
667 (uspi->s_fpb == 8) ? ufs_fragtable_8fpb : ufs_fragtable_other,
668 1 << (count - 1 + (uspi->s_fpb & 7)));
670 ufs_error (sb, "ufs_bitmap_search",
671 "bitmap corrupted on cg %u, start %u, length %u, count %u, freeoff %u\n",
672 ucpi->c_cgx, start, length, count, ucpi->c_freeoff);
677 result = (start + length - location) << 3;
678 ucpi->c_frotor = result;
681 * found the byte in the map
683 blockmap = ubh_blkmap(UCPI_UBH, ucpi->c_freeoff, result);
685 for (possition = 0, mask = 1; possition < 8; possition++, mask <<= 1) {
686 if (blockmap & mask) {
687 if (!(possition & uspi->s_fpbmask))
693 if (fragsize == count) {
694 result += possition - count;
695 UFSD(("EXIT, result %u\n", result))
701 if (fragsize == count) {
702 result += possition - count;
703 UFSD(("EXIT, result %u\n", result))
706 ufs_error (sb, "ufs_bitmap_search", "block not in map on cg %u\n", ucpi->c_cgx);
707 UFSD(("EXIT (FAILED)\n"))
711 static void ufs_clusteracct(struct super_block * sb,
712 struct ufs_cg_private_info * ucpi, unsigned blkno, int cnt)
714 struct ufs_sb_private_info * uspi;
715 int i, start, end, forw, back;
717 uspi = UFS_SB(sb)->s_uspi;
718 if (uspi->s_contigsumsize <= 0)
722 ubh_setbit(UCPI_UBH, ucpi->c_clusteroff, blkno);
724 ubh_clrbit(UCPI_UBH, ucpi->c_clusteroff, blkno);
727 * Find the size of the cluster going forward.
730 end = start + uspi->s_contigsumsize;
731 if ( end >= ucpi->c_nclusterblks)
732 end = ucpi->c_nclusterblks;
733 i = ubh_find_next_zero_bit (UCPI_UBH, ucpi->c_clusteroff, end, start);
739 * Find the size of the cluster going backward.
742 end = start - uspi->s_contigsumsize;
745 i = ubh_find_last_zero_bit (UCPI_UBH, ucpi->c_clusteroff, start, end);
751 * Account for old cluster and the possibly new forward and
755 if (i > uspi->s_contigsumsize)
756 i = uspi->s_contigsumsize;
757 fs32_add(sb, (__fs32*)ubh_get_addr(UCPI_UBH, ucpi->c_clustersumoff + (i << 2)), cnt);
759 fs32_sub(sb, (__fs32*)ubh_get_addr(UCPI_UBH, ucpi->c_clustersumoff + (back << 2)), cnt);
761 fs32_sub(sb, (__fs32*)ubh_get_addr(UCPI_UBH, ucpi->c_clustersumoff + (forw << 2)), cnt);
765 static unsigned char ufs_fragtable_8fpb[] = {
766 0x00, 0x01, 0x01, 0x02, 0x01, 0x01, 0x02, 0x04, 0x01, 0x01, 0x01, 0x03, 0x02, 0x03, 0x04, 0x08,
767 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x02, 0x03, 0x03, 0x02, 0x04, 0x05, 0x08, 0x10,
768 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
769 0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x04, 0x05, 0x05, 0x06, 0x08, 0x09, 0x10, 0x20,
770 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
771 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
772 0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
773 0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x08, 0x09, 0x09, 0x0A, 0x10, 0x11, 0x20, 0x40,
774 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
775 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
776 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
777 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x05, 0x05, 0x05, 0x07, 0x09, 0x09, 0x11, 0x21,
778 0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
779 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x02, 0x03, 0x03, 0x02, 0x06, 0x07, 0x0A, 0x12,
780 0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x05, 0x05, 0x05, 0x07, 0x06, 0x07, 0x04, 0x0C,
781 0x08, 0x09, 0x09, 0x0A, 0x09, 0x09, 0x0A, 0x0C, 0x10, 0x11, 0x11, 0x12, 0x20, 0x21, 0x40, 0x80,
784 static unsigned char ufs_fragtable_other[] = {
785 0x00, 0x16, 0x16, 0x2A, 0x16, 0x16, 0x26, 0x4E, 0x16, 0x16, 0x16, 0x3E, 0x2A, 0x3E, 0x4E, 0x8A,
786 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
787 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
788 0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
789 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
790 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
791 0x26, 0x36, 0x36, 0x2E, 0x36, 0x36, 0x26, 0x6E, 0x36, 0x36, 0x36, 0x3E, 0x2E, 0x3E, 0x6E, 0xAE,
792 0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
793 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
794 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
795 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
796 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
797 0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
798 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
799 0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
800 0x8A, 0x9E, 0x9E, 0xAA, 0x9E, 0x9E, 0xAE, 0xCE, 0x9E, 0x9E, 0x9E, 0xBE, 0xAA, 0xBE, 0xCE, 0x8A,