]> Pileus Git - ~andy/linux/blob - fs/nilfs2/sufile.c
f4374df00ad585f5137d2189bd37fabf26b2d4c3
[~andy/linux] / fs / nilfs2 / sufile.c
1 /*
2  * sufile.c - NILFS segment usage file.
3  *
4  * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
19  *
20  * Written by Koji Sato <koji@osrg.net>.
21  * Revised by Ryusuke Konishi <ryusuke@osrg.net>.
22  */
23
24 #include <linux/kernel.h>
25 #include <linux/fs.h>
26 #include <linux/string.h>
27 #include <linux/buffer_head.h>
28 #include <linux/errno.h>
29 #include <linux/nilfs2_fs.h>
30 #include "mdt.h"
31 #include "sufile.h"
32
33
34 struct nilfs_sufile_info {
35         struct nilfs_mdt_info mi;
36         unsigned long ncleansegs;/* number of clean segments */
37         __u64 allocmin;         /* lower limit of allocatable segment range */
38         __u64 allocmax;         /* upper limit of allocatable segment range */
39 };
40
41 static inline struct nilfs_sufile_info *NILFS_SUI(struct inode *sufile)
42 {
43         return (struct nilfs_sufile_info *)NILFS_MDT(sufile);
44 }
45
46 static inline unsigned long
47 nilfs_sufile_segment_usages_per_block(const struct inode *sufile)
48 {
49         return NILFS_MDT(sufile)->mi_entries_per_block;
50 }
51
52 static unsigned long
53 nilfs_sufile_get_blkoff(const struct inode *sufile, __u64 segnum)
54 {
55         __u64 t = segnum + NILFS_MDT(sufile)->mi_first_entry_offset;
56         do_div(t, nilfs_sufile_segment_usages_per_block(sufile));
57         return (unsigned long)t;
58 }
59
60 static unsigned long
61 nilfs_sufile_get_offset(const struct inode *sufile, __u64 segnum)
62 {
63         __u64 t = segnum + NILFS_MDT(sufile)->mi_first_entry_offset;
64         return do_div(t, nilfs_sufile_segment_usages_per_block(sufile));
65 }
66
67 static unsigned long
68 nilfs_sufile_segment_usages_in_block(const struct inode *sufile, __u64 curr,
69                                      __u64 max)
70 {
71         return min_t(unsigned long,
72                      nilfs_sufile_segment_usages_per_block(sufile) -
73                      nilfs_sufile_get_offset(sufile, curr),
74                      max - curr + 1);
75 }
76
77 static struct nilfs_segment_usage *
78 nilfs_sufile_block_get_segment_usage(const struct inode *sufile, __u64 segnum,
79                                      struct buffer_head *bh, void *kaddr)
80 {
81         return kaddr + bh_offset(bh) +
82                 nilfs_sufile_get_offset(sufile, segnum) *
83                 NILFS_MDT(sufile)->mi_entry_size;
84 }
85
86 static inline int nilfs_sufile_get_header_block(struct inode *sufile,
87                                                 struct buffer_head **bhp)
88 {
89         return nilfs_mdt_get_block(sufile, 0, 0, NULL, bhp);
90 }
91
92 static inline int
93 nilfs_sufile_get_segment_usage_block(struct inode *sufile, __u64 segnum,
94                                      int create, struct buffer_head **bhp)
95 {
96         return nilfs_mdt_get_block(sufile,
97                                    nilfs_sufile_get_blkoff(sufile, segnum),
98                                    create, NULL, bhp);
99 }
100
101 static void nilfs_sufile_mod_counter(struct buffer_head *header_bh,
102                                      u64 ncleanadd, u64 ndirtyadd)
103 {
104         struct nilfs_sufile_header *header;
105         void *kaddr;
106
107         kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
108         header = kaddr + bh_offset(header_bh);
109         le64_add_cpu(&header->sh_ncleansegs, ncleanadd);
110         le64_add_cpu(&header->sh_ndirtysegs, ndirtyadd);
111         kunmap_atomic(kaddr, KM_USER0);
112
113         nilfs_mdt_mark_buffer_dirty(header_bh);
114 }
115
116 /**
117  * nilfs_sufile_get_ncleansegs - return the number of clean segments
118  * @sufile: inode of segment usage file
119  */
120 unsigned long nilfs_sufile_get_ncleansegs(struct inode *sufile)
121 {
122         return NILFS_SUI(sufile)->ncleansegs;
123 }
124
125 /**
126  * nilfs_sufile_updatev - modify multiple segment usages at a time
127  * @sufile: inode of segment usage file
128  * @segnumv: array of segment numbers
129  * @nsegs: size of @segnumv array
130  * @create: creation flag
131  * @ndone: place to store number of modified segments on @segnumv
132  * @dofunc: primitive operation for the update
133  *
134  * Description: nilfs_sufile_updatev() repeatedly calls @dofunc
135  * against the given array of segments.  The @dofunc is called with
136  * buffers of a header block and the sufile block in which the target
137  * segment usage entry is contained.  If @ndone is given, the number
138  * of successfully modified segments from the head is stored in the
139  * place @ndone points to.
140  *
141  * Return Value: On success, zero is returned.  On error, one of the
142  * following negative error codes is returned.
143  *
144  * %-EIO - I/O error.
145  *
146  * %-ENOMEM - Insufficient amount of memory available.
147  *
148  * %-ENOENT - Given segment usage is in hole block (may be returned if
149  *            @create is zero)
150  *
151  * %-EINVAL - Invalid segment usage number
152  */
153 int nilfs_sufile_updatev(struct inode *sufile, __u64 *segnumv, size_t nsegs,
154                          int create, size_t *ndone,
155                          void (*dofunc)(struct inode *, __u64,
156                                         struct buffer_head *,
157                                         struct buffer_head *))
158 {
159         struct buffer_head *header_bh, *bh;
160         unsigned long blkoff, prev_blkoff;
161         __u64 *seg;
162         size_t nerr = 0, n = 0;
163         int ret = 0;
164
165         if (unlikely(nsegs == 0))
166                 goto out;
167
168         down_write(&NILFS_MDT(sufile)->mi_sem);
169         for (seg = segnumv; seg < segnumv + nsegs; seg++) {
170                 if (unlikely(*seg >= nilfs_sufile_get_nsegments(sufile))) {
171                         printk(KERN_WARNING
172                                "%s: invalid segment number: %llu\n", __func__,
173                                (unsigned long long)*seg);
174                         nerr++;
175                 }
176         }
177         if (nerr > 0) {
178                 ret = -EINVAL;
179                 goto out_sem;
180         }
181
182         ret = nilfs_sufile_get_header_block(sufile, &header_bh);
183         if (ret < 0)
184                 goto out_sem;
185
186         seg = segnumv;
187         blkoff = nilfs_sufile_get_blkoff(sufile, *seg);
188         ret = nilfs_mdt_get_block(sufile, blkoff, create, NULL, &bh);
189         if (ret < 0)
190                 goto out_header;
191
192         for (;;) {
193                 dofunc(sufile, *seg, header_bh, bh);
194
195                 if (++seg >= segnumv + nsegs)
196                         break;
197                 prev_blkoff = blkoff;
198                 blkoff = nilfs_sufile_get_blkoff(sufile, *seg);
199                 if (blkoff == prev_blkoff)
200                         continue;
201
202                 /* get different block */
203                 brelse(bh);
204                 ret = nilfs_mdt_get_block(sufile, blkoff, create, NULL, &bh);
205                 if (unlikely(ret < 0))
206                         goto out_header;
207         }
208         brelse(bh);
209
210  out_header:
211         n = seg - segnumv;
212         brelse(header_bh);
213  out_sem:
214         up_write(&NILFS_MDT(sufile)->mi_sem);
215  out:
216         if (ndone)
217                 *ndone = n;
218         return ret;
219 }
220
221 int nilfs_sufile_update(struct inode *sufile, __u64 segnum, int create,
222                         void (*dofunc)(struct inode *, __u64,
223                                        struct buffer_head *,
224                                        struct buffer_head *))
225 {
226         struct buffer_head *header_bh, *bh;
227         int ret;
228
229         if (unlikely(segnum >= nilfs_sufile_get_nsegments(sufile))) {
230                 printk(KERN_WARNING "%s: invalid segment number: %llu\n",
231                        __func__, (unsigned long long)segnum);
232                 return -EINVAL;
233         }
234         down_write(&NILFS_MDT(sufile)->mi_sem);
235
236         ret = nilfs_sufile_get_header_block(sufile, &header_bh);
237         if (ret < 0)
238                 goto out_sem;
239
240         ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, create, &bh);
241         if (!ret) {
242                 dofunc(sufile, segnum, header_bh, bh);
243                 brelse(bh);
244         }
245         brelse(header_bh);
246
247  out_sem:
248         up_write(&NILFS_MDT(sufile)->mi_sem);
249         return ret;
250 }
251
252 /**
253  * nilfs_sufile_set_alloc_range - limit range of segment to be allocated
254  * @sufile: inode of segment usage file
255  * @start: minimum segment number of allocatable region (inclusive)
256  * @end: maximum segment number of allocatable region (inclusive)
257  *
258  * Return Value: On success, 0 is returned.  On error, one of the
259  * following negative error codes is returned.
260  *
261  * %-ERANGE - invalid segment region
262  */
263 int nilfs_sufile_set_alloc_range(struct inode *sufile, __u64 start, __u64 end)
264 {
265         struct nilfs_sufile_info *sui = NILFS_SUI(sufile);
266         __u64 nsegs;
267         int ret = -ERANGE;
268
269         down_write(&NILFS_MDT(sufile)->mi_sem);
270         nsegs = nilfs_sufile_get_nsegments(sufile);
271
272         if (start <= end && end < nsegs) {
273                 sui->allocmin = start;
274                 sui->allocmax = end;
275                 ret = 0;
276         }
277         up_write(&NILFS_MDT(sufile)->mi_sem);
278         return ret;
279 }
280
281 /**
282  * nilfs_sufile_alloc - allocate a segment
283  * @sufile: inode of segment usage file
284  * @segnump: pointer to segment number
285  *
286  * Description: nilfs_sufile_alloc() allocates a clean segment.
287  *
288  * Return Value: On success, 0 is returned and the segment number of the
289  * allocated segment is stored in the place pointed by @segnump. On error, one
290  * of the following negative error codes is returned.
291  *
292  * %-EIO - I/O error.
293  *
294  * %-ENOMEM - Insufficient amount of memory available.
295  *
296  * %-ENOSPC - No clean segment left.
297  */
298 int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump)
299 {
300         struct buffer_head *header_bh, *su_bh;
301         struct nilfs_sufile_header *header;
302         struct nilfs_segment_usage *su;
303         struct nilfs_sufile_info *sui = NILFS_SUI(sufile);
304         size_t susz = NILFS_MDT(sufile)->mi_entry_size;
305         __u64 segnum, maxsegnum, last_alloc;
306         void *kaddr;
307         unsigned long nsegments, ncleansegs, nsus, cnt;
308         int ret, j;
309
310         down_write(&NILFS_MDT(sufile)->mi_sem);
311
312         ret = nilfs_sufile_get_header_block(sufile, &header_bh);
313         if (ret < 0)
314                 goto out_sem;
315         kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
316         header = kaddr + bh_offset(header_bh);
317         ncleansegs = le64_to_cpu(header->sh_ncleansegs);
318         last_alloc = le64_to_cpu(header->sh_last_alloc);
319         kunmap_atomic(kaddr, KM_USER0);
320
321         nsegments = nilfs_sufile_get_nsegments(sufile);
322         maxsegnum = sui->allocmax;
323         segnum = last_alloc + 1;
324         if (segnum < sui->allocmin || segnum > sui->allocmax)
325                 segnum = sui->allocmin;
326
327         for (cnt = 0; cnt < nsegments; cnt += nsus) {
328                 if (segnum > maxsegnum) {
329                         if (cnt < sui->allocmax - sui->allocmin + 1) {
330                                 /*
331                                  * wrap around in the limited region.
332                                  * if allocation started from
333                                  * sui->allocmin, this never happens.
334                                  */
335                                 segnum = sui->allocmin;
336                                 maxsegnum = last_alloc;
337                         } else if (segnum > sui->allocmin &&
338                                    sui->allocmax + 1 < nsegments) {
339                                 segnum = sui->allocmax + 1;
340                                 maxsegnum = nsegments - 1;
341                         } else if (sui->allocmin > 0)  {
342                                 segnum = 0;
343                                 maxsegnum = sui->allocmin - 1;
344                         } else {
345                                 break; /* never happens */
346                         }
347                 }
348                 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 1,
349                                                            &su_bh);
350                 if (ret < 0)
351                         goto out_header;
352                 kaddr = kmap_atomic(su_bh->b_page, KM_USER0);
353                 su = nilfs_sufile_block_get_segment_usage(
354                         sufile, segnum, su_bh, kaddr);
355
356                 nsus = nilfs_sufile_segment_usages_in_block(
357                         sufile, segnum, maxsegnum);
358                 for (j = 0; j < nsus; j++, su = (void *)su + susz, segnum++) {
359                         if (!nilfs_segment_usage_clean(su))
360                                 continue;
361                         /* found a clean segment */
362                         nilfs_segment_usage_set_dirty(su);
363                         kunmap_atomic(kaddr, KM_USER0);
364
365                         kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
366                         header = kaddr + bh_offset(header_bh);
367                         le64_add_cpu(&header->sh_ncleansegs, -1);
368                         le64_add_cpu(&header->sh_ndirtysegs, 1);
369                         header->sh_last_alloc = cpu_to_le64(segnum);
370                         kunmap_atomic(kaddr, KM_USER0);
371
372                         sui->ncleansegs--;
373                         nilfs_mdt_mark_buffer_dirty(header_bh);
374                         nilfs_mdt_mark_buffer_dirty(su_bh);
375                         nilfs_mdt_mark_dirty(sufile);
376                         brelse(su_bh);
377                         *segnump = segnum;
378                         goto out_header;
379                 }
380
381                 kunmap_atomic(kaddr, KM_USER0);
382                 brelse(su_bh);
383         }
384
385         /* no segments left */
386         ret = -ENOSPC;
387
388  out_header:
389         brelse(header_bh);
390
391  out_sem:
392         up_write(&NILFS_MDT(sufile)->mi_sem);
393         return ret;
394 }
395
396 void nilfs_sufile_do_cancel_free(struct inode *sufile, __u64 segnum,
397                                  struct buffer_head *header_bh,
398                                  struct buffer_head *su_bh)
399 {
400         struct nilfs_segment_usage *su;
401         void *kaddr;
402
403         kaddr = kmap_atomic(su_bh->b_page, KM_USER0);
404         su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
405         if (unlikely(!nilfs_segment_usage_clean(su))) {
406                 printk(KERN_WARNING "%s: segment %llu must be clean\n",
407                        __func__, (unsigned long long)segnum);
408                 kunmap_atomic(kaddr, KM_USER0);
409                 return;
410         }
411         nilfs_segment_usage_set_dirty(su);
412         kunmap_atomic(kaddr, KM_USER0);
413
414         nilfs_sufile_mod_counter(header_bh, -1, 1);
415         NILFS_SUI(sufile)->ncleansegs--;
416
417         nilfs_mdt_mark_buffer_dirty(su_bh);
418         nilfs_mdt_mark_dirty(sufile);
419 }
420
421 void nilfs_sufile_do_scrap(struct inode *sufile, __u64 segnum,
422                            struct buffer_head *header_bh,
423                            struct buffer_head *su_bh)
424 {
425         struct nilfs_segment_usage *su;
426         void *kaddr;
427         int clean, dirty;
428
429         kaddr = kmap_atomic(su_bh->b_page, KM_USER0);
430         su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
431         if (su->su_flags == cpu_to_le32(1UL << NILFS_SEGMENT_USAGE_DIRTY) &&
432             su->su_nblocks == cpu_to_le32(0)) {
433                 kunmap_atomic(kaddr, KM_USER0);
434                 return;
435         }
436         clean = nilfs_segment_usage_clean(su);
437         dirty = nilfs_segment_usage_dirty(su);
438
439         /* make the segment garbage */
440         su->su_lastmod = cpu_to_le64(0);
441         su->su_nblocks = cpu_to_le32(0);
442         su->su_flags = cpu_to_le32(1UL << NILFS_SEGMENT_USAGE_DIRTY);
443         kunmap_atomic(kaddr, KM_USER0);
444
445         nilfs_sufile_mod_counter(header_bh, clean ? (u64)-1 : 0, dirty ? 0 : 1);
446         NILFS_SUI(sufile)->ncleansegs -= clean;
447
448         nilfs_mdt_mark_buffer_dirty(su_bh);
449         nilfs_mdt_mark_dirty(sufile);
450 }
451
452 void nilfs_sufile_do_free(struct inode *sufile, __u64 segnum,
453                           struct buffer_head *header_bh,
454                           struct buffer_head *su_bh)
455 {
456         struct nilfs_segment_usage *su;
457         void *kaddr;
458         int sudirty;
459
460         kaddr = kmap_atomic(su_bh->b_page, KM_USER0);
461         su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
462         if (nilfs_segment_usage_clean(su)) {
463                 printk(KERN_WARNING "%s: segment %llu is already clean\n",
464                        __func__, (unsigned long long)segnum);
465                 kunmap_atomic(kaddr, KM_USER0);
466                 return;
467         }
468         WARN_ON(nilfs_segment_usage_error(su));
469         WARN_ON(!nilfs_segment_usage_dirty(su));
470
471         sudirty = nilfs_segment_usage_dirty(su);
472         nilfs_segment_usage_set_clean(su);
473         kunmap_atomic(kaddr, KM_USER0);
474         nilfs_mdt_mark_buffer_dirty(su_bh);
475
476         nilfs_sufile_mod_counter(header_bh, 1, sudirty ? (u64)-1 : 0);
477         NILFS_SUI(sufile)->ncleansegs++;
478
479         nilfs_mdt_mark_dirty(sufile);
480 }
481
482 /**
483  * nilfs_sufile_mark_dirty - mark the buffer having a segment usage dirty
484  * @sufile: inode of segment usage file
485  * @segnum: segment number
486  */
487 int nilfs_sufile_mark_dirty(struct inode *sufile, __u64 segnum)
488 {
489         struct buffer_head *bh;
490         int ret;
491
492         ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0, &bh);
493         if (!ret) {
494                 nilfs_mdt_mark_buffer_dirty(bh);
495                 nilfs_mdt_mark_dirty(sufile);
496                 brelse(bh);
497         }
498         return ret;
499 }
500
501 /**
502  * nilfs_sufile_set_segment_usage - set usage of a segment
503  * @sufile: inode of segment usage file
504  * @segnum: segment number
505  * @nblocks: number of live blocks in the segment
506  * @modtime: modification time (option)
507  */
508 int nilfs_sufile_set_segment_usage(struct inode *sufile, __u64 segnum,
509                                    unsigned long nblocks, time_t modtime)
510 {
511         struct buffer_head *bh;
512         struct nilfs_segment_usage *su;
513         void *kaddr;
514         int ret;
515
516         down_write(&NILFS_MDT(sufile)->mi_sem);
517         ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0, &bh);
518         if (ret < 0)
519                 goto out_sem;
520
521         kaddr = kmap_atomic(bh->b_page, KM_USER0);
522         su = nilfs_sufile_block_get_segment_usage(sufile, segnum, bh, kaddr);
523         WARN_ON(nilfs_segment_usage_error(su));
524         if (modtime)
525                 su->su_lastmod = cpu_to_le64(modtime);
526         su->su_nblocks = cpu_to_le32(nblocks);
527         kunmap_atomic(kaddr, KM_USER0);
528
529         nilfs_mdt_mark_buffer_dirty(bh);
530         nilfs_mdt_mark_dirty(sufile);
531         brelse(bh);
532
533  out_sem:
534         up_write(&NILFS_MDT(sufile)->mi_sem);
535         return ret;
536 }
537
538 /**
539  * nilfs_sufile_get_stat - get segment usage statistics
540  * @sufile: inode of segment usage file
541  * @stat: pointer to a structure of segment usage statistics
542  *
543  * Description: nilfs_sufile_get_stat() returns information about segment
544  * usage.
545  *
546  * Return Value: On success, 0 is returned, and segment usage information is
547  * stored in the place pointed by @stat. On error, one of the following
548  * negative error codes is returned.
549  *
550  * %-EIO - I/O error.
551  *
552  * %-ENOMEM - Insufficient amount of memory available.
553  */
554 int nilfs_sufile_get_stat(struct inode *sufile, struct nilfs_sustat *sustat)
555 {
556         struct buffer_head *header_bh;
557         struct nilfs_sufile_header *header;
558         struct the_nilfs *nilfs = NILFS_I_NILFS(sufile);
559         void *kaddr;
560         int ret;
561
562         down_read(&NILFS_MDT(sufile)->mi_sem);
563
564         ret = nilfs_sufile_get_header_block(sufile, &header_bh);
565         if (ret < 0)
566                 goto out_sem;
567
568         kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
569         header = kaddr + bh_offset(header_bh);
570         sustat->ss_nsegs = nilfs_sufile_get_nsegments(sufile);
571         sustat->ss_ncleansegs = le64_to_cpu(header->sh_ncleansegs);
572         sustat->ss_ndirtysegs = le64_to_cpu(header->sh_ndirtysegs);
573         sustat->ss_ctime = nilfs->ns_ctime;
574         sustat->ss_nongc_ctime = nilfs->ns_nongc_ctime;
575         spin_lock(&nilfs->ns_last_segment_lock);
576         sustat->ss_prot_seq = nilfs->ns_prot_seq;
577         spin_unlock(&nilfs->ns_last_segment_lock);
578         kunmap_atomic(kaddr, KM_USER0);
579         brelse(header_bh);
580
581  out_sem:
582         up_read(&NILFS_MDT(sufile)->mi_sem);
583         return ret;
584 }
585
586 void nilfs_sufile_do_set_error(struct inode *sufile, __u64 segnum,
587                                struct buffer_head *header_bh,
588                                struct buffer_head *su_bh)
589 {
590         struct nilfs_segment_usage *su;
591         void *kaddr;
592         int suclean;
593
594         kaddr = kmap_atomic(su_bh->b_page, KM_USER0);
595         su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
596         if (nilfs_segment_usage_error(su)) {
597                 kunmap_atomic(kaddr, KM_USER0);
598                 return;
599         }
600         suclean = nilfs_segment_usage_clean(su);
601         nilfs_segment_usage_set_error(su);
602         kunmap_atomic(kaddr, KM_USER0);
603
604         if (suclean) {
605                 nilfs_sufile_mod_counter(header_bh, -1, 0);
606                 NILFS_SUI(sufile)->ncleansegs--;
607         }
608         nilfs_mdt_mark_buffer_dirty(su_bh);
609         nilfs_mdt_mark_dirty(sufile);
610 }
611
612 /**
613  * nilfs_sufile_get_suinfo -
614  * @sufile: inode of segment usage file
615  * @segnum: segment number to start looking
616  * @buf: array of suinfo
617  * @sisz: byte size of suinfo
618  * @nsi: size of suinfo array
619  *
620  * Description:
621  *
622  * Return Value: On success, 0 is returned and .... On error, one of the
623  * following negative error codes is returned.
624  *
625  * %-EIO - I/O error.
626  *
627  * %-ENOMEM - Insufficient amount of memory available.
628  */
629 ssize_t nilfs_sufile_get_suinfo(struct inode *sufile, __u64 segnum, void *buf,
630                                 unsigned sisz, size_t nsi)
631 {
632         struct buffer_head *su_bh;
633         struct nilfs_segment_usage *su;
634         struct nilfs_suinfo *si = buf;
635         size_t susz = NILFS_MDT(sufile)->mi_entry_size;
636         struct the_nilfs *nilfs = NILFS_I_NILFS(sufile);
637         void *kaddr;
638         unsigned long nsegs, segusages_per_block;
639         ssize_t n;
640         int ret, i, j;
641
642         down_read(&NILFS_MDT(sufile)->mi_sem);
643
644         segusages_per_block = nilfs_sufile_segment_usages_per_block(sufile);
645         nsegs = min_t(unsigned long,
646                       nilfs_sufile_get_nsegments(sufile) - segnum,
647                       nsi);
648         for (i = 0; i < nsegs; i += n, segnum += n) {
649                 n = min_t(unsigned long,
650                           segusages_per_block -
651                                   nilfs_sufile_get_offset(sufile, segnum),
652                           nsegs - i);
653                 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0,
654                                                            &su_bh);
655                 if (ret < 0) {
656                         if (ret != -ENOENT)
657                                 goto out;
658                         /* hole */
659                         memset(si, 0, sisz * n);
660                         si = (void *)si + sisz * n;
661                         continue;
662                 }
663
664                 kaddr = kmap_atomic(su_bh->b_page, KM_USER0);
665                 su = nilfs_sufile_block_get_segment_usage(
666                         sufile, segnum, su_bh, kaddr);
667                 for (j = 0; j < n;
668                      j++, su = (void *)su + susz, si = (void *)si + sisz) {
669                         si->sui_lastmod = le64_to_cpu(su->su_lastmod);
670                         si->sui_nblocks = le32_to_cpu(su->su_nblocks);
671                         si->sui_flags = le32_to_cpu(su->su_flags) &
672                                 ~(1UL << NILFS_SEGMENT_USAGE_ACTIVE);
673                         if (nilfs_segment_is_active(nilfs, segnum + j))
674                                 si->sui_flags |=
675                                         (1UL << NILFS_SEGMENT_USAGE_ACTIVE);
676                 }
677                 kunmap_atomic(kaddr, KM_USER0);
678                 brelse(su_bh);
679         }
680         ret = nsegs;
681
682  out:
683         up_read(&NILFS_MDT(sufile)->mi_sem);
684         return ret;
685 }
686
687 /**
688  * nilfs_sufile_read - read or get sufile inode
689  * @sb: super block instance
690  * @susize: size of a segment usage entry
691  * @raw_inode: on-disk sufile inode
692  * @inodep: buffer to store the inode
693  */
694 int nilfs_sufile_read(struct super_block *sb, size_t susize,
695                       struct nilfs_inode *raw_inode, struct inode **inodep)
696 {
697         struct inode *sufile;
698         struct nilfs_sufile_info *sui;
699         struct buffer_head *header_bh;
700         struct nilfs_sufile_header *header;
701         void *kaddr;
702         int err;
703
704         sufile = nilfs_iget_locked(sb, NULL, NILFS_SUFILE_INO);
705         if (unlikely(!sufile))
706                 return -ENOMEM;
707         if (!(sufile->i_state & I_NEW))
708                 goto out;
709
710         err = nilfs_mdt_init(sufile, NILFS_MDT_GFP, sizeof(*sui));
711         if (err)
712                 goto failed;
713
714         nilfs_mdt_set_entry_size(sufile, susize,
715                                  sizeof(struct nilfs_sufile_header));
716
717         err = nilfs_read_inode_common(sufile, raw_inode);
718         if (err)
719                 goto failed;
720
721         err = nilfs_sufile_get_header_block(sufile, &header_bh);
722         if (err)
723                 goto failed;
724
725         sui = NILFS_SUI(sufile);
726         kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
727         header = kaddr + bh_offset(header_bh);
728         sui->ncleansegs = le64_to_cpu(header->sh_ncleansegs);
729         kunmap_atomic(kaddr, KM_USER0);
730         brelse(header_bh);
731
732         sui->allocmax = nilfs_sufile_get_nsegments(sufile) - 1;
733         sui->allocmin = 0;
734
735         unlock_new_inode(sufile);
736  out:
737         *inodep = sufile;
738         return 0;
739  failed:
740         iget_failed(sufile);
741         return err;
742 }