2 * sufile.c - NILFS segment usage file.
4 * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 * Written by Koji Sato <koji@osrg.net>.
21 * Revised by Ryusuke Konishi <ryusuke@osrg.net>.
24 #include <linux/kernel.h>
26 #include <linux/string.h>
27 #include <linux/buffer_head.h>
28 #include <linux/errno.h>
29 #include <linux/nilfs2_fs.h>
34 struct nilfs_sufile_info {
35 struct nilfs_mdt_info mi;
36 unsigned long ncleansegs;/* number of clean segments */
37 __u64 allocmin; /* lower limit of allocatable segment range */
38 __u64 allocmax; /* upper limit of allocatable segment range */
41 static inline struct nilfs_sufile_info *NILFS_SUI(struct inode *sufile)
43 return (struct nilfs_sufile_info *)NILFS_MDT(sufile);
46 static inline unsigned long
47 nilfs_sufile_segment_usages_per_block(const struct inode *sufile)
49 return NILFS_MDT(sufile)->mi_entries_per_block;
53 nilfs_sufile_get_blkoff(const struct inode *sufile, __u64 segnum)
55 __u64 t = segnum + NILFS_MDT(sufile)->mi_first_entry_offset;
56 do_div(t, nilfs_sufile_segment_usages_per_block(sufile));
57 return (unsigned long)t;
61 nilfs_sufile_get_offset(const struct inode *sufile, __u64 segnum)
63 __u64 t = segnum + NILFS_MDT(sufile)->mi_first_entry_offset;
64 return do_div(t, nilfs_sufile_segment_usages_per_block(sufile));
68 nilfs_sufile_segment_usages_in_block(const struct inode *sufile, __u64 curr,
71 return min_t(unsigned long,
72 nilfs_sufile_segment_usages_per_block(sufile) -
73 nilfs_sufile_get_offset(sufile, curr),
77 static struct nilfs_segment_usage *
78 nilfs_sufile_block_get_segment_usage(const struct inode *sufile, __u64 segnum,
79 struct buffer_head *bh, void *kaddr)
81 return kaddr + bh_offset(bh) +
82 nilfs_sufile_get_offset(sufile, segnum) *
83 NILFS_MDT(sufile)->mi_entry_size;
86 static inline int nilfs_sufile_get_header_block(struct inode *sufile,
87 struct buffer_head **bhp)
89 return nilfs_mdt_get_block(sufile, 0, 0, NULL, bhp);
93 nilfs_sufile_get_segment_usage_block(struct inode *sufile, __u64 segnum,
94 int create, struct buffer_head **bhp)
96 return nilfs_mdt_get_block(sufile,
97 nilfs_sufile_get_blkoff(sufile, segnum),
101 static void nilfs_sufile_mod_counter(struct buffer_head *header_bh,
102 u64 ncleanadd, u64 ndirtyadd)
104 struct nilfs_sufile_header *header;
107 kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
108 header = kaddr + bh_offset(header_bh);
109 le64_add_cpu(&header->sh_ncleansegs, ncleanadd);
110 le64_add_cpu(&header->sh_ndirtysegs, ndirtyadd);
111 kunmap_atomic(kaddr, KM_USER0);
113 nilfs_mdt_mark_buffer_dirty(header_bh);
117 * nilfs_sufile_get_ncleansegs - return the number of clean segments
118 * @sufile: inode of segment usage file
120 unsigned long nilfs_sufile_get_ncleansegs(struct inode *sufile)
122 return NILFS_SUI(sufile)->ncleansegs;
126 * nilfs_sufile_updatev - modify multiple segment usages at a time
127 * @sufile: inode of segment usage file
128 * @segnumv: array of segment numbers
129 * @nsegs: size of @segnumv array
130 * @create: creation flag
131 * @ndone: place to store number of modified segments on @segnumv
132 * @dofunc: primitive operation for the update
134 * Description: nilfs_sufile_updatev() repeatedly calls @dofunc
135 * against the given array of segments. The @dofunc is called with
136 * buffers of a header block and the sufile block in which the target
137 * segment usage entry is contained. If @ndone is given, the number
138 * of successfully modified segments from the head is stored in the
139 * place @ndone points to.
141 * Return Value: On success, zero is returned. On error, one of the
142 * following negative error codes is returned.
146 * %-ENOMEM - Insufficient amount of memory available.
148 * %-ENOENT - Given segment usage is in hole block (may be returned if
151 * %-EINVAL - Invalid segment usage number
153 int nilfs_sufile_updatev(struct inode *sufile, __u64 *segnumv, size_t nsegs,
154 int create, size_t *ndone,
155 void (*dofunc)(struct inode *, __u64,
156 struct buffer_head *,
157 struct buffer_head *))
159 struct buffer_head *header_bh, *bh;
160 unsigned long blkoff, prev_blkoff;
162 size_t nerr = 0, n = 0;
165 if (unlikely(nsegs == 0))
168 down_write(&NILFS_MDT(sufile)->mi_sem);
169 for (seg = segnumv; seg < segnumv + nsegs; seg++) {
170 if (unlikely(*seg >= nilfs_sufile_get_nsegments(sufile))) {
172 "%s: invalid segment number: %llu\n", __func__,
173 (unsigned long long)*seg);
182 ret = nilfs_sufile_get_header_block(sufile, &header_bh);
187 blkoff = nilfs_sufile_get_blkoff(sufile, *seg);
188 ret = nilfs_mdt_get_block(sufile, blkoff, create, NULL, &bh);
193 dofunc(sufile, *seg, header_bh, bh);
195 if (++seg >= segnumv + nsegs)
197 prev_blkoff = blkoff;
198 blkoff = nilfs_sufile_get_blkoff(sufile, *seg);
199 if (blkoff == prev_blkoff)
202 /* get different block */
204 ret = nilfs_mdt_get_block(sufile, blkoff, create, NULL, &bh);
205 if (unlikely(ret < 0))
214 up_write(&NILFS_MDT(sufile)->mi_sem);
221 int nilfs_sufile_update(struct inode *sufile, __u64 segnum, int create,
222 void (*dofunc)(struct inode *, __u64,
223 struct buffer_head *,
224 struct buffer_head *))
226 struct buffer_head *header_bh, *bh;
229 if (unlikely(segnum >= nilfs_sufile_get_nsegments(sufile))) {
230 printk(KERN_WARNING "%s: invalid segment number: %llu\n",
231 __func__, (unsigned long long)segnum);
234 down_write(&NILFS_MDT(sufile)->mi_sem);
236 ret = nilfs_sufile_get_header_block(sufile, &header_bh);
240 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, create, &bh);
242 dofunc(sufile, segnum, header_bh, bh);
248 up_write(&NILFS_MDT(sufile)->mi_sem);
253 * nilfs_sufile_set_alloc_range - limit range of segment to be allocated
254 * @sufile: inode of segment usage file
255 * @start: minimum segment number of allocatable region (inclusive)
256 * @end: maximum segment number of allocatable region (inclusive)
258 * Return Value: On success, 0 is returned. On error, one of the
259 * following negative error codes is returned.
261 * %-ERANGE - invalid segment region
263 int nilfs_sufile_set_alloc_range(struct inode *sufile, __u64 start, __u64 end)
265 struct nilfs_sufile_info *sui = NILFS_SUI(sufile);
269 down_write(&NILFS_MDT(sufile)->mi_sem);
270 nsegs = nilfs_sufile_get_nsegments(sufile);
272 if (start <= end && end < nsegs) {
273 sui->allocmin = start;
277 up_write(&NILFS_MDT(sufile)->mi_sem);
282 * nilfs_sufile_alloc - allocate a segment
283 * @sufile: inode of segment usage file
284 * @segnump: pointer to segment number
286 * Description: nilfs_sufile_alloc() allocates a clean segment.
288 * Return Value: On success, 0 is returned and the segment number of the
289 * allocated segment is stored in the place pointed by @segnump. On error, one
290 * of the following negative error codes is returned.
294 * %-ENOMEM - Insufficient amount of memory available.
296 * %-ENOSPC - No clean segment left.
298 int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump)
300 struct buffer_head *header_bh, *su_bh;
301 struct nilfs_sufile_header *header;
302 struct nilfs_segment_usage *su;
303 struct nilfs_sufile_info *sui = NILFS_SUI(sufile);
304 size_t susz = NILFS_MDT(sufile)->mi_entry_size;
305 __u64 segnum, maxsegnum, last_alloc;
307 unsigned long nsegments, ncleansegs, nsus, cnt;
310 down_write(&NILFS_MDT(sufile)->mi_sem);
312 ret = nilfs_sufile_get_header_block(sufile, &header_bh);
315 kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
316 header = kaddr + bh_offset(header_bh);
317 ncleansegs = le64_to_cpu(header->sh_ncleansegs);
318 last_alloc = le64_to_cpu(header->sh_last_alloc);
319 kunmap_atomic(kaddr, KM_USER0);
321 nsegments = nilfs_sufile_get_nsegments(sufile);
322 maxsegnum = sui->allocmax;
323 segnum = last_alloc + 1;
324 if (segnum < sui->allocmin || segnum > sui->allocmax)
325 segnum = sui->allocmin;
327 for (cnt = 0; cnt < nsegments; cnt += nsus) {
328 if (segnum > maxsegnum) {
329 if (cnt < sui->allocmax - sui->allocmin + 1) {
331 * wrap around in the limited region.
332 * if allocation started from
333 * sui->allocmin, this never happens.
335 segnum = sui->allocmin;
336 maxsegnum = last_alloc;
337 } else if (segnum > sui->allocmin &&
338 sui->allocmax + 1 < nsegments) {
339 segnum = sui->allocmax + 1;
340 maxsegnum = nsegments - 1;
341 } else if (sui->allocmin > 0) {
343 maxsegnum = sui->allocmin - 1;
345 break; /* never happens */
348 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 1,
352 kaddr = kmap_atomic(su_bh->b_page, KM_USER0);
353 su = nilfs_sufile_block_get_segment_usage(
354 sufile, segnum, su_bh, kaddr);
356 nsus = nilfs_sufile_segment_usages_in_block(
357 sufile, segnum, maxsegnum);
358 for (j = 0; j < nsus; j++, su = (void *)su + susz, segnum++) {
359 if (!nilfs_segment_usage_clean(su))
361 /* found a clean segment */
362 nilfs_segment_usage_set_dirty(su);
363 kunmap_atomic(kaddr, KM_USER0);
365 kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
366 header = kaddr + bh_offset(header_bh);
367 le64_add_cpu(&header->sh_ncleansegs, -1);
368 le64_add_cpu(&header->sh_ndirtysegs, 1);
369 header->sh_last_alloc = cpu_to_le64(segnum);
370 kunmap_atomic(kaddr, KM_USER0);
373 nilfs_mdt_mark_buffer_dirty(header_bh);
374 nilfs_mdt_mark_buffer_dirty(su_bh);
375 nilfs_mdt_mark_dirty(sufile);
381 kunmap_atomic(kaddr, KM_USER0);
385 /* no segments left */
392 up_write(&NILFS_MDT(sufile)->mi_sem);
396 void nilfs_sufile_do_cancel_free(struct inode *sufile, __u64 segnum,
397 struct buffer_head *header_bh,
398 struct buffer_head *su_bh)
400 struct nilfs_segment_usage *su;
403 kaddr = kmap_atomic(su_bh->b_page, KM_USER0);
404 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
405 if (unlikely(!nilfs_segment_usage_clean(su))) {
406 printk(KERN_WARNING "%s: segment %llu must be clean\n",
407 __func__, (unsigned long long)segnum);
408 kunmap_atomic(kaddr, KM_USER0);
411 nilfs_segment_usage_set_dirty(su);
412 kunmap_atomic(kaddr, KM_USER0);
414 nilfs_sufile_mod_counter(header_bh, -1, 1);
415 NILFS_SUI(sufile)->ncleansegs--;
417 nilfs_mdt_mark_buffer_dirty(su_bh);
418 nilfs_mdt_mark_dirty(sufile);
421 void nilfs_sufile_do_scrap(struct inode *sufile, __u64 segnum,
422 struct buffer_head *header_bh,
423 struct buffer_head *su_bh)
425 struct nilfs_segment_usage *su;
429 kaddr = kmap_atomic(su_bh->b_page, KM_USER0);
430 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
431 if (su->su_flags == cpu_to_le32(1UL << NILFS_SEGMENT_USAGE_DIRTY) &&
432 su->su_nblocks == cpu_to_le32(0)) {
433 kunmap_atomic(kaddr, KM_USER0);
436 clean = nilfs_segment_usage_clean(su);
437 dirty = nilfs_segment_usage_dirty(su);
439 /* make the segment garbage */
440 su->su_lastmod = cpu_to_le64(0);
441 su->su_nblocks = cpu_to_le32(0);
442 su->su_flags = cpu_to_le32(1UL << NILFS_SEGMENT_USAGE_DIRTY);
443 kunmap_atomic(kaddr, KM_USER0);
445 nilfs_sufile_mod_counter(header_bh, clean ? (u64)-1 : 0, dirty ? 0 : 1);
446 NILFS_SUI(sufile)->ncleansegs -= clean;
448 nilfs_mdt_mark_buffer_dirty(su_bh);
449 nilfs_mdt_mark_dirty(sufile);
452 void nilfs_sufile_do_free(struct inode *sufile, __u64 segnum,
453 struct buffer_head *header_bh,
454 struct buffer_head *su_bh)
456 struct nilfs_segment_usage *su;
460 kaddr = kmap_atomic(su_bh->b_page, KM_USER0);
461 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
462 if (nilfs_segment_usage_clean(su)) {
463 printk(KERN_WARNING "%s: segment %llu is already clean\n",
464 __func__, (unsigned long long)segnum);
465 kunmap_atomic(kaddr, KM_USER0);
468 WARN_ON(nilfs_segment_usage_error(su));
469 WARN_ON(!nilfs_segment_usage_dirty(su));
471 sudirty = nilfs_segment_usage_dirty(su);
472 nilfs_segment_usage_set_clean(su);
473 kunmap_atomic(kaddr, KM_USER0);
474 nilfs_mdt_mark_buffer_dirty(su_bh);
476 nilfs_sufile_mod_counter(header_bh, 1, sudirty ? (u64)-1 : 0);
477 NILFS_SUI(sufile)->ncleansegs++;
479 nilfs_mdt_mark_dirty(sufile);
483 * nilfs_sufile_mark_dirty - mark the buffer having a segment usage dirty
484 * @sufile: inode of segment usage file
485 * @segnum: segment number
487 int nilfs_sufile_mark_dirty(struct inode *sufile, __u64 segnum)
489 struct buffer_head *bh;
492 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0, &bh);
494 nilfs_mdt_mark_buffer_dirty(bh);
495 nilfs_mdt_mark_dirty(sufile);
502 * nilfs_sufile_set_segment_usage - set usage of a segment
503 * @sufile: inode of segment usage file
504 * @segnum: segment number
505 * @nblocks: number of live blocks in the segment
506 * @modtime: modification time (option)
508 int nilfs_sufile_set_segment_usage(struct inode *sufile, __u64 segnum,
509 unsigned long nblocks, time_t modtime)
511 struct buffer_head *bh;
512 struct nilfs_segment_usage *su;
516 down_write(&NILFS_MDT(sufile)->mi_sem);
517 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0, &bh);
521 kaddr = kmap_atomic(bh->b_page, KM_USER0);
522 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, bh, kaddr);
523 WARN_ON(nilfs_segment_usage_error(su));
525 su->su_lastmod = cpu_to_le64(modtime);
526 su->su_nblocks = cpu_to_le32(nblocks);
527 kunmap_atomic(kaddr, KM_USER0);
529 nilfs_mdt_mark_buffer_dirty(bh);
530 nilfs_mdt_mark_dirty(sufile);
534 up_write(&NILFS_MDT(sufile)->mi_sem);
539 * nilfs_sufile_get_stat - get segment usage statistics
540 * @sufile: inode of segment usage file
541 * @stat: pointer to a structure of segment usage statistics
543 * Description: nilfs_sufile_get_stat() returns information about segment
546 * Return Value: On success, 0 is returned, and segment usage information is
547 * stored in the place pointed by @stat. On error, one of the following
548 * negative error codes is returned.
552 * %-ENOMEM - Insufficient amount of memory available.
554 int nilfs_sufile_get_stat(struct inode *sufile, struct nilfs_sustat *sustat)
556 struct buffer_head *header_bh;
557 struct nilfs_sufile_header *header;
558 struct the_nilfs *nilfs = NILFS_I_NILFS(sufile);
562 down_read(&NILFS_MDT(sufile)->mi_sem);
564 ret = nilfs_sufile_get_header_block(sufile, &header_bh);
568 kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
569 header = kaddr + bh_offset(header_bh);
570 sustat->ss_nsegs = nilfs_sufile_get_nsegments(sufile);
571 sustat->ss_ncleansegs = le64_to_cpu(header->sh_ncleansegs);
572 sustat->ss_ndirtysegs = le64_to_cpu(header->sh_ndirtysegs);
573 sustat->ss_ctime = nilfs->ns_ctime;
574 sustat->ss_nongc_ctime = nilfs->ns_nongc_ctime;
575 spin_lock(&nilfs->ns_last_segment_lock);
576 sustat->ss_prot_seq = nilfs->ns_prot_seq;
577 spin_unlock(&nilfs->ns_last_segment_lock);
578 kunmap_atomic(kaddr, KM_USER0);
582 up_read(&NILFS_MDT(sufile)->mi_sem);
586 void nilfs_sufile_do_set_error(struct inode *sufile, __u64 segnum,
587 struct buffer_head *header_bh,
588 struct buffer_head *su_bh)
590 struct nilfs_segment_usage *su;
594 kaddr = kmap_atomic(su_bh->b_page, KM_USER0);
595 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
596 if (nilfs_segment_usage_error(su)) {
597 kunmap_atomic(kaddr, KM_USER0);
600 suclean = nilfs_segment_usage_clean(su);
601 nilfs_segment_usage_set_error(su);
602 kunmap_atomic(kaddr, KM_USER0);
605 nilfs_sufile_mod_counter(header_bh, -1, 0);
606 NILFS_SUI(sufile)->ncleansegs--;
608 nilfs_mdt_mark_buffer_dirty(su_bh);
609 nilfs_mdt_mark_dirty(sufile);
613 * nilfs_sufile_get_suinfo -
614 * @sufile: inode of segment usage file
615 * @segnum: segment number to start looking
616 * @buf: array of suinfo
617 * @sisz: byte size of suinfo
618 * @nsi: size of suinfo array
622 * Return Value: On success, 0 is returned and .... On error, one of the
623 * following negative error codes is returned.
627 * %-ENOMEM - Insufficient amount of memory available.
629 ssize_t nilfs_sufile_get_suinfo(struct inode *sufile, __u64 segnum, void *buf,
630 unsigned sisz, size_t nsi)
632 struct buffer_head *su_bh;
633 struct nilfs_segment_usage *su;
634 struct nilfs_suinfo *si = buf;
635 size_t susz = NILFS_MDT(sufile)->mi_entry_size;
636 struct the_nilfs *nilfs = NILFS_I_NILFS(sufile);
638 unsigned long nsegs, segusages_per_block;
642 down_read(&NILFS_MDT(sufile)->mi_sem);
644 segusages_per_block = nilfs_sufile_segment_usages_per_block(sufile);
645 nsegs = min_t(unsigned long,
646 nilfs_sufile_get_nsegments(sufile) - segnum,
648 for (i = 0; i < nsegs; i += n, segnum += n) {
649 n = min_t(unsigned long,
650 segusages_per_block -
651 nilfs_sufile_get_offset(sufile, segnum),
653 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0,
659 memset(si, 0, sisz * n);
660 si = (void *)si + sisz * n;
664 kaddr = kmap_atomic(su_bh->b_page, KM_USER0);
665 su = nilfs_sufile_block_get_segment_usage(
666 sufile, segnum, su_bh, kaddr);
668 j++, su = (void *)su + susz, si = (void *)si + sisz) {
669 si->sui_lastmod = le64_to_cpu(su->su_lastmod);
670 si->sui_nblocks = le32_to_cpu(su->su_nblocks);
671 si->sui_flags = le32_to_cpu(su->su_flags) &
672 ~(1UL << NILFS_SEGMENT_USAGE_ACTIVE);
673 if (nilfs_segment_is_active(nilfs, segnum + j))
675 (1UL << NILFS_SEGMENT_USAGE_ACTIVE);
677 kunmap_atomic(kaddr, KM_USER0);
683 up_read(&NILFS_MDT(sufile)->mi_sem);
688 * nilfs_sufile_read - read or get sufile inode
689 * @sb: super block instance
690 * @susize: size of a segment usage entry
691 * @raw_inode: on-disk sufile inode
692 * @inodep: buffer to store the inode
694 int nilfs_sufile_read(struct super_block *sb, size_t susize,
695 struct nilfs_inode *raw_inode, struct inode **inodep)
697 struct inode *sufile;
698 struct nilfs_sufile_info *sui;
699 struct buffer_head *header_bh;
700 struct nilfs_sufile_header *header;
704 sufile = nilfs_iget_locked(sb, NULL, NILFS_SUFILE_INO);
705 if (unlikely(!sufile))
707 if (!(sufile->i_state & I_NEW))
710 err = nilfs_mdt_init(sufile, NILFS_MDT_GFP, sizeof(*sui));
714 nilfs_mdt_set_entry_size(sufile, susize,
715 sizeof(struct nilfs_sufile_header));
717 err = nilfs_read_inode_common(sufile, raw_inode);
721 err = nilfs_sufile_get_header_block(sufile, &header_bh);
725 sui = NILFS_SUI(sufile);
726 kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
727 header = kaddr + bh_offset(header_bh);
728 sui->ncleansegs = le64_to_cpu(header->sh_ncleansegs);
729 kunmap_atomic(kaddr, KM_USER0);
732 sui->allocmax = nilfs_sufile_get_nsegments(sufile) - 1;
735 unlock_new_inode(sufile);