]> Pileus Git - ~andy/linux/blob - fs/f2fs/node.c
xtensa: fixup simdisk driver to work with immutable bio_vecs
[~andy/linux] / fs / f2fs / node.c
1 /*
2  * fs/f2fs/node.c
3  *
4  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5  *             http://www.samsung.com/
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  */
11 #include <linux/fs.h>
12 #include <linux/f2fs_fs.h>
13 #include <linux/mpage.h>
14 #include <linux/backing-dev.h>
15 #include <linux/blkdev.h>
16 #include <linux/pagevec.h>
17 #include <linux/swap.h>
18
19 #include "f2fs.h"
20 #include "node.h"
21 #include "segment.h"
22 #include <trace/events/f2fs.h>
23
24 static struct kmem_cache *nat_entry_slab;
25 static struct kmem_cache *free_nid_slab;
26
27 static void clear_node_page_dirty(struct page *page)
28 {
29         struct address_space *mapping = page->mapping;
30         struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb);
31         unsigned int long flags;
32
33         if (PageDirty(page)) {
34                 spin_lock_irqsave(&mapping->tree_lock, flags);
35                 radix_tree_tag_clear(&mapping->page_tree,
36                                 page_index(page),
37                                 PAGECACHE_TAG_DIRTY);
38                 spin_unlock_irqrestore(&mapping->tree_lock, flags);
39
40                 clear_page_dirty_for_io(page);
41                 dec_page_count(sbi, F2FS_DIRTY_NODES);
42         }
43         ClearPageUptodate(page);
44 }
45
46 static struct page *get_current_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
47 {
48         pgoff_t index = current_nat_addr(sbi, nid);
49         return get_meta_page(sbi, index);
50 }
51
52 static struct page *get_next_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
53 {
54         struct page *src_page;
55         struct page *dst_page;
56         pgoff_t src_off;
57         pgoff_t dst_off;
58         void *src_addr;
59         void *dst_addr;
60         struct f2fs_nm_info *nm_i = NM_I(sbi);
61
62         src_off = current_nat_addr(sbi, nid);
63         dst_off = next_nat_addr(sbi, src_off);
64
65         /* get current nat block page with lock */
66         src_page = get_meta_page(sbi, src_off);
67
68         /* Dirty src_page means that it is already the new target NAT page. */
69         if (PageDirty(src_page))
70                 return src_page;
71
72         dst_page = grab_meta_page(sbi, dst_off);
73
74         src_addr = page_address(src_page);
75         dst_addr = page_address(dst_page);
76         memcpy(dst_addr, src_addr, PAGE_CACHE_SIZE);
77         set_page_dirty(dst_page);
78         f2fs_put_page(src_page, 1);
79
80         set_to_next_nat(nm_i, nid);
81
82         return dst_page;
83 }
84
85 /*
86  * Readahead NAT pages
87  */
88 static void ra_nat_pages(struct f2fs_sb_info *sbi, int nid)
89 {
90         struct address_space *mapping = sbi->meta_inode->i_mapping;
91         struct f2fs_nm_info *nm_i = NM_I(sbi);
92         struct blk_plug plug;
93         struct page *page;
94         pgoff_t index;
95         int i;
96
97         blk_start_plug(&plug);
98
99         for (i = 0; i < FREE_NID_PAGES; i++, nid += NAT_ENTRY_PER_BLOCK) {
100                 if (nid >= nm_i->max_nid)
101                         nid = 0;
102                 index = current_nat_addr(sbi, nid);
103
104                 page = grab_cache_page(mapping, index);
105                 if (!page)
106                         continue;
107                 if (PageUptodate(page)) {
108                         f2fs_put_page(page, 1);
109                         continue;
110                 }
111                 if (f2fs_readpage(sbi, page, index, READ))
112                         continue;
113
114                 f2fs_put_page(page, 0);
115         }
116         blk_finish_plug(&plug);
117 }
118
119 static struct nat_entry *__lookup_nat_cache(struct f2fs_nm_info *nm_i, nid_t n)
120 {
121         return radix_tree_lookup(&nm_i->nat_root, n);
122 }
123
124 static unsigned int __gang_lookup_nat_cache(struct f2fs_nm_info *nm_i,
125                 nid_t start, unsigned int nr, struct nat_entry **ep)
126 {
127         return radix_tree_gang_lookup(&nm_i->nat_root, (void **)ep, start, nr);
128 }
129
130 static void __del_from_nat_cache(struct f2fs_nm_info *nm_i, struct nat_entry *e)
131 {
132         list_del(&e->list);
133         radix_tree_delete(&nm_i->nat_root, nat_get_nid(e));
134         nm_i->nat_cnt--;
135         kmem_cache_free(nat_entry_slab, e);
136 }
137
138 int is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid)
139 {
140         struct f2fs_nm_info *nm_i = NM_I(sbi);
141         struct nat_entry *e;
142         int is_cp = 1;
143
144         read_lock(&nm_i->nat_tree_lock);
145         e = __lookup_nat_cache(nm_i, nid);
146         if (e && !e->checkpointed)
147                 is_cp = 0;
148         read_unlock(&nm_i->nat_tree_lock);
149         return is_cp;
150 }
151
152 static struct nat_entry *grab_nat_entry(struct f2fs_nm_info *nm_i, nid_t nid)
153 {
154         struct nat_entry *new;
155
156         new = kmem_cache_alloc(nat_entry_slab, GFP_ATOMIC);
157         if (!new)
158                 return NULL;
159         if (radix_tree_insert(&nm_i->nat_root, nid, new)) {
160                 kmem_cache_free(nat_entry_slab, new);
161                 return NULL;
162         }
163         memset(new, 0, sizeof(struct nat_entry));
164         nat_set_nid(new, nid);
165         list_add_tail(&new->list, &nm_i->nat_entries);
166         nm_i->nat_cnt++;
167         return new;
168 }
169
170 static void cache_nat_entry(struct f2fs_nm_info *nm_i, nid_t nid,
171                                                 struct f2fs_nat_entry *ne)
172 {
173         struct nat_entry *e;
174 retry:
175         write_lock(&nm_i->nat_tree_lock);
176         e = __lookup_nat_cache(nm_i, nid);
177         if (!e) {
178                 e = grab_nat_entry(nm_i, nid);
179                 if (!e) {
180                         write_unlock(&nm_i->nat_tree_lock);
181                         goto retry;
182                 }
183                 nat_set_blkaddr(e, le32_to_cpu(ne->block_addr));
184                 nat_set_ino(e, le32_to_cpu(ne->ino));
185                 nat_set_version(e, ne->version);
186                 e->checkpointed = true;
187         }
188         write_unlock(&nm_i->nat_tree_lock);
189 }
190
191 static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni,
192                         block_t new_blkaddr)
193 {
194         struct f2fs_nm_info *nm_i = NM_I(sbi);
195         struct nat_entry *e;
196 retry:
197         write_lock(&nm_i->nat_tree_lock);
198         e = __lookup_nat_cache(nm_i, ni->nid);
199         if (!e) {
200                 e = grab_nat_entry(nm_i, ni->nid);
201                 if (!e) {
202                         write_unlock(&nm_i->nat_tree_lock);
203                         goto retry;
204                 }
205                 e->ni = *ni;
206                 e->checkpointed = true;
207                 f2fs_bug_on(ni->blk_addr == NEW_ADDR);
208         } else if (new_blkaddr == NEW_ADDR) {
209                 /*
210                  * when nid is reallocated,
211                  * previous nat entry can be remained in nat cache.
212                  * So, reinitialize it with new information.
213                  */
214                 e->ni = *ni;
215                 f2fs_bug_on(ni->blk_addr != NULL_ADDR);
216         }
217
218         if (new_blkaddr == NEW_ADDR)
219                 e->checkpointed = false;
220
221         /* sanity check */
222         f2fs_bug_on(nat_get_blkaddr(e) != ni->blk_addr);
223         f2fs_bug_on(nat_get_blkaddr(e) == NULL_ADDR &&
224                         new_blkaddr == NULL_ADDR);
225         f2fs_bug_on(nat_get_blkaddr(e) == NEW_ADDR &&
226                         new_blkaddr == NEW_ADDR);
227         f2fs_bug_on(nat_get_blkaddr(e) != NEW_ADDR &&
228                         nat_get_blkaddr(e) != NULL_ADDR &&
229                         new_blkaddr == NEW_ADDR);
230
231         /* increament version no as node is removed */
232         if (nat_get_blkaddr(e) != NEW_ADDR && new_blkaddr == NULL_ADDR) {
233                 unsigned char version = nat_get_version(e);
234                 nat_set_version(e, inc_node_version(version));
235         }
236
237         /* change address */
238         nat_set_blkaddr(e, new_blkaddr);
239         __set_nat_cache_dirty(nm_i, e);
240         write_unlock(&nm_i->nat_tree_lock);
241 }
242
243 int try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink)
244 {
245         struct f2fs_nm_info *nm_i = NM_I(sbi);
246
247         if (nm_i->nat_cnt <= NM_WOUT_THRESHOLD)
248                 return 0;
249
250         write_lock(&nm_i->nat_tree_lock);
251         while (nr_shrink && !list_empty(&nm_i->nat_entries)) {
252                 struct nat_entry *ne;
253                 ne = list_first_entry(&nm_i->nat_entries,
254                                         struct nat_entry, list);
255                 __del_from_nat_cache(nm_i, ne);
256                 nr_shrink--;
257         }
258         write_unlock(&nm_i->nat_tree_lock);
259         return nr_shrink;
260 }
261
262 /*
263  * This function returns always success
264  */
265 void get_node_info(struct f2fs_sb_info *sbi, nid_t nid, struct node_info *ni)
266 {
267         struct f2fs_nm_info *nm_i = NM_I(sbi);
268         struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
269         struct f2fs_summary_block *sum = curseg->sum_blk;
270         nid_t start_nid = START_NID(nid);
271         struct f2fs_nat_block *nat_blk;
272         struct page *page = NULL;
273         struct f2fs_nat_entry ne;
274         struct nat_entry *e;
275         int i;
276
277         memset(&ne, 0, sizeof(struct f2fs_nat_entry));
278         ni->nid = nid;
279
280         /* Check nat cache */
281         read_lock(&nm_i->nat_tree_lock);
282         e = __lookup_nat_cache(nm_i, nid);
283         if (e) {
284                 ni->ino = nat_get_ino(e);
285                 ni->blk_addr = nat_get_blkaddr(e);
286                 ni->version = nat_get_version(e);
287         }
288         read_unlock(&nm_i->nat_tree_lock);
289         if (e)
290                 return;
291
292         /* Check current segment summary */
293         mutex_lock(&curseg->curseg_mutex);
294         i = lookup_journal_in_cursum(sum, NAT_JOURNAL, nid, 0);
295         if (i >= 0) {
296                 ne = nat_in_journal(sum, i);
297                 node_info_from_raw_nat(ni, &ne);
298         }
299         mutex_unlock(&curseg->curseg_mutex);
300         if (i >= 0)
301                 goto cache;
302
303         /* Fill node_info from nat page */
304         page = get_current_nat_page(sbi, start_nid);
305         nat_blk = (struct f2fs_nat_block *)page_address(page);
306         ne = nat_blk->entries[nid - start_nid];
307         node_info_from_raw_nat(ni, &ne);
308         f2fs_put_page(page, 1);
309 cache:
310         /* cache nat entry */
311         cache_nat_entry(NM_I(sbi), nid, &ne);
312 }
313
314 /*
315  * The maximum depth is four.
316  * Offset[0] will have raw inode offset.
317  */
318 static int get_node_path(struct f2fs_inode_info *fi, long block,
319                                 int offset[4], unsigned int noffset[4])
320 {
321         const long direct_index = ADDRS_PER_INODE(fi);
322         const long direct_blks = ADDRS_PER_BLOCK;
323         const long dptrs_per_blk = NIDS_PER_BLOCK;
324         const long indirect_blks = ADDRS_PER_BLOCK * NIDS_PER_BLOCK;
325         const long dindirect_blks = indirect_blks * NIDS_PER_BLOCK;
326         int n = 0;
327         int level = 0;
328
329         noffset[0] = 0;
330
331         if (block < direct_index) {
332                 offset[n] = block;
333                 goto got;
334         }
335         block -= direct_index;
336         if (block < direct_blks) {
337                 offset[n++] = NODE_DIR1_BLOCK;
338                 noffset[n] = 1;
339                 offset[n] = block;
340                 level = 1;
341                 goto got;
342         }
343         block -= direct_blks;
344         if (block < direct_blks) {
345                 offset[n++] = NODE_DIR2_BLOCK;
346                 noffset[n] = 2;
347                 offset[n] = block;
348                 level = 1;
349                 goto got;
350         }
351         block -= direct_blks;
352         if (block < indirect_blks) {
353                 offset[n++] = NODE_IND1_BLOCK;
354                 noffset[n] = 3;
355                 offset[n++] = block / direct_blks;
356                 noffset[n] = 4 + offset[n - 1];
357                 offset[n] = block % direct_blks;
358                 level = 2;
359                 goto got;
360         }
361         block -= indirect_blks;
362         if (block < indirect_blks) {
363                 offset[n++] = NODE_IND2_BLOCK;
364                 noffset[n] = 4 + dptrs_per_blk;
365                 offset[n++] = block / direct_blks;
366                 noffset[n] = 5 + dptrs_per_blk + offset[n - 1];
367                 offset[n] = block % direct_blks;
368                 level = 2;
369                 goto got;
370         }
371         block -= indirect_blks;
372         if (block < dindirect_blks) {
373                 offset[n++] = NODE_DIND_BLOCK;
374                 noffset[n] = 5 + (dptrs_per_blk * 2);
375                 offset[n++] = block / indirect_blks;
376                 noffset[n] = 6 + (dptrs_per_blk * 2) +
377                               offset[n - 1] * (dptrs_per_blk + 1);
378                 offset[n++] = (block / direct_blks) % dptrs_per_blk;
379                 noffset[n] = 7 + (dptrs_per_blk * 2) +
380                               offset[n - 2] * (dptrs_per_blk + 1) +
381                               offset[n - 1];
382                 offset[n] = block % direct_blks;
383                 level = 3;
384                 goto got;
385         } else {
386                 BUG();
387         }
388 got:
389         return level;
390 }
391
392 /*
393  * Caller should call f2fs_put_dnode(dn).
394  * Also, it should grab and release a mutex by calling mutex_lock_op() and
395  * mutex_unlock_op() only if ro is not set RDONLY_NODE.
396  * In the case of RDONLY_NODE, we don't need to care about mutex.
397  */
398 int get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode)
399 {
400         struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
401         struct page *npage[4];
402         struct page *parent;
403         int offset[4];
404         unsigned int noffset[4];
405         nid_t nids[4];
406         int level, i;
407         int err = 0;
408
409         level = get_node_path(F2FS_I(dn->inode), index, offset, noffset);
410
411         nids[0] = dn->inode->i_ino;
412         npage[0] = dn->inode_page;
413
414         if (!npage[0]) {
415                 npage[0] = get_node_page(sbi, nids[0]);
416                 if (IS_ERR(npage[0]))
417                         return PTR_ERR(npage[0]);
418         }
419         parent = npage[0];
420         if (level != 0)
421                 nids[1] = get_nid(parent, offset[0], true);
422         dn->inode_page = npage[0];
423         dn->inode_page_locked = true;
424
425         /* get indirect or direct nodes */
426         for (i = 1; i <= level; i++) {
427                 bool done = false;
428
429                 if (!nids[i] && mode == ALLOC_NODE) {
430                         /* alloc new node */
431                         if (!alloc_nid(sbi, &(nids[i]))) {
432                                 err = -ENOSPC;
433                                 goto release_pages;
434                         }
435
436                         dn->nid = nids[i];
437                         npage[i] = new_node_page(dn, noffset[i], NULL);
438                         if (IS_ERR(npage[i])) {
439                                 alloc_nid_failed(sbi, nids[i]);
440                                 err = PTR_ERR(npage[i]);
441                                 goto release_pages;
442                         }
443
444                         set_nid(parent, offset[i - 1], nids[i], i == 1);
445                         alloc_nid_done(sbi, nids[i]);
446                         done = true;
447                 } else if (mode == LOOKUP_NODE_RA && i == level && level > 1) {
448                         npage[i] = get_node_page_ra(parent, offset[i - 1]);
449                         if (IS_ERR(npage[i])) {
450                                 err = PTR_ERR(npage[i]);
451                                 goto release_pages;
452                         }
453                         done = true;
454                 }
455                 if (i == 1) {
456                         dn->inode_page_locked = false;
457                         unlock_page(parent);
458                 } else {
459                         f2fs_put_page(parent, 1);
460                 }
461
462                 if (!done) {
463                         npage[i] = get_node_page(sbi, nids[i]);
464                         if (IS_ERR(npage[i])) {
465                                 err = PTR_ERR(npage[i]);
466                                 f2fs_put_page(npage[0], 0);
467                                 goto release_out;
468                         }
469                 }
470                 if (i < level) {
471                         parent = npage[i];
472                         nids[i + 1] = get_nid(parent, offset[i], false);
473                 }
474         }
475         dn->nid = nids[level];
476         dn->ofs_in_node = offset[level];
477         dn->node_page = npage[level];
478         dn->data_blkaddr = datablock_addr(dn->node_page, dn->ofs_in_node);
479         return 0;
480
481 release_pages:
482         f2fs_put_page(parent, 1);
483         if (i > 1)
484                 f2fs_put_page(npage[0], 0);
485 release_out:
486         dn->inode_page = NULL;
487         dn->node_page = NULL;
488         return err;
489 }
490
491 static void truncate_node(struct dnode_of_data *dn)
492 {
493         struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
494         struct node_info ni;
495
496         get_node_info(sbi, dn->nid, &ni);
497         if (dn->inode->i_blocks == 0) {
498                 f2fs_bug_on(ni.blk_addr != NULL_ADDR);
499                 goto invalidate;
500         }
501         f2fs_bug_on(ni.blk_addr == NULL_ADDR);
502
503         /* Deallocate node address */
504         invalidate_blocks(sbi, ni.blk_addr);
505         dec_valid_node_count(sbi, dn->inode, 1);
506         set_node_addr(sbi, &ni, NULL_ADDR);
507
508         if (dn->nid == dn->inode->i_ino) {
509                 remove_orphan_inode(sbi, dn->nid);
510                 dec_valid_inode_count(sbi);
511         } else {
512                 sync_inode_page(dn);
513         }
514 invalidate:
515         clear_node_page_dirty(dn->node_page);
516         F2FS_SET_SB_DIRT(sbi);
517
518         f2fs_put_page(dn->node_page, 1);
519         dn->node_page = NULL;
520         trace_f2fs_truncate_node(dn->inode, dn->nid, ni.blk_addr);
521 }
522
523 static int truncate_dnode(struct dnode_of_data *dn)
524 {
525         struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
526         struct page *page;
527
528         if (dn->nid == 0)
529                 return 1;
530
531         /* get direct node */
532         page = get_node_page(sbi, dn->nid);
533         if (IS_ERR(page) && PTR_ERR(page) == -ENOENT)
534                 return 1;
535         else if (IS_ERR(page))
536                 return PTR_ERR(page);
537
538         /* Make dnode_of_data for parameter */
539         dn->node_page = page;
540         dn->ofs_in_node = 0;
541         truncate_data_blocks(dn);
542         truncate_node(dn);
543         return 1;
544 }
545
546 static int truncate_nodes(struct dnode_of_data *dn, unsigned int nofs,
547                                                 int ofs, int depth)
548 {
549         struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
550         struct dnode_of_data rdn = *dn;
551         struct page *page;
552         struct f2fs_node *rn;
553         nid_t child_nid;
554         unsigned int child_nofs;
555         int freed = 0;
556         int i, ret;
557
558         if (dn->nid == 0)
559                 return NIDS_PER_BLOCK + 1;
560
561         trace_f2fs_truncate_nodes_enter(dn->inode, dn->nid, dn->data_blkaddr);
562
563         page = get_node_page(sbi, dn->nid);
564         if (IS_ERR(page)) {
565                 trace_f2fs_truncate_nodes_exit(dn->inode, PTR_ERR(page));
566                 return PTR_ERR(page);
567         }
568
569         rn = F2FS_NODE(page);
570         if (depth < 3) {
571                 for (i = ofs; i < NIDS_PER_BLOCK; i++, freed++) {
572                         child_nid = le32_to_cpu(rn->in.nid[i]);
573                         if (child_nid == 0)
574                                 continue;
575                         rdn.nid = child_nid;
576                         ret = truncate_dnode(&rdn);
577                         if (ret < 0)
578                                 goto out_err;
579                         set_nid(page, i, 0, false);
580                 }
581         } else {
582                 child_nofs = nofs + ofs * (NIDS_PER_BLOCK + 1) + 1;
583                 for (i = ofs; i < NIDS_PER_BLOCK; i++) {
584                         child_nid = le32_to_cpu(rn->in.nid[i]);
585                         if (child_nid == 0) {
586                                 child_nofs += NIDS_PER_BLOCK + 1;
587                                 continue;
588                         }
589                         rdn.nid = child_nid;
590                         ret = truncate_nodes(&rdn, child_nofs, 0, depth - 1);
591                         if (ret == (NIDS_PER_BLOCK + 1)) {
592                                 set_nid(page, i, 0, false);
593                                 child_nofs += ret;
594                         } else if (ret < 0 && ret != -ENOENT) {
595                                 goto out_err;
596                         }
597                 }
598                 freed = child_nofs;
599         }
600
601         if (!ofs) {
602                 /* remove current indirect node */
603                 dn->node_page = page;
604                 truncate_node(dn);
605                 freed++;
606         } else {
607                 f2fs_put_page(page, 1);
608         }
609         trace_f2fs_truncate_nodes_exit(dn->inode, freed);
610         return freed;
611
612 out_err:
613         f2fs_put_page(page, 1);
614         trace_f2fs_truncate_nodes_exit(dn->inode, ret);
615         return ret;
616 }
617
618 static int truncate_partial_nodes(struct dnode_of_data *dn,
619                         struct f2fs_inode *ri, int *offset, int depth)
620 {
621         struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
622         struct page *pages[2];
623         nid_t nid[3];
624         nid_t child_nid;
625         int err = 0;
626         int i;
627         int idx = depth - 2;
628
629         nid[0] = le32_to_cpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]);
630         if (!nid[0])
631                 return 0;
632
633         /* get indirect nodes in the path */
634         for (i = 0; i < depth - 1; i++) {
635                 /* refernece count'll be increased */
636                 pages[i] = get_node_page(sbi, nid[i]);
637                 if (IS_ERR(pages[i])) {
638                         depth = i + 1;
639                         err = PTR_ERR(pages[i]);
640                         goto fail;
641                 }
642                 nid[i + 1] = get_nid(pages[i], offset[i + 1], false);
643         }
644
645         /* free direct nodes linked to a partial indirect node */
646         for (i = offset[depth - 1]; i < NIDS_PER_BLOCK; i++) {
647                 child_nid = get_nid(pages[idx], i, false);
648                 if (!child_nid)
649                         continue;
650                 dn->nid = child_nid;
651                 err = truncate_dnode(dn);
652                 if (err < 0)
653                         goto fail;
654                 set_nid(pages[idx], i, 0, false);
655         }
656
657         if (offset[depth - 1] == 0) {
658                 dn->node_page = pages[idx];
659                 dn->nid = nid[idx];
660                 truncate_node(dn);
661         } else {
662                 f2fs_put_page(pages[idx], 1);
663         }
664         offset[idx]++;
665         offset[depth - 1] = 0;
666 fail:
667         for (i = depth - 3; i >= 0; i--)
668                 f2fs_put_page(pages[i], 1);
669
670         trace_f2fs_truncate_partial_nodes(dn->inode, nid, depth, err);
671
672         return err;
673 }
674
675 /*
676  * All the block addresses of data and nodes should be nullified.
677  */
678 int truncate_inode_blocks(struct inode *inode, pgoff_t from)
679 {
680         struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
681         struct address_space *node_mapping = sbi->node_inode->i_mapping;
682         int err = 0, cont = 1;
683         int level, offset[4], noffset[4];
684         unsigned int nofs = 0;
685         struct f2fs_node *rn;
686         struct dnode_of_data dn;
687         struct page *page;
688
689         trace_f2fs_truncate_inode_blocks_enter(inode, from);
690
691         level = get_node_path(F2FS_I(inode), from, offset, noffset);
692 restart:
693         page = get_node_page(sbi, inode->i_ino);
694         if (IS_ERR(page)) {
695                 trace_f2fs_truncate_inode_blocks_exit(inode, PTR_ERR(page));
696                 return PTR_ERR(page);
697         }
698
699         set_new_dnode(&dn, inode, page, NULL, 0);
700         unlock_page(page);
701
702         rn = F2FS_NODE(page);
703         switch (level) {
704         case 0:
705         case 1:
706                 nofs = noffset[1];
707                 break;
708         case 2:
709                 nofs = noffset[1];
710                 if (!offset[level - 1])
711                         goto skip_partial;
712                 err = truncate_partial_nodes(&dn, &rn->i, offset, level);
713                 if (err < 0 && err != -ENOENT)
714                         goto fail;
715                 nofs += 1 + NIDS_PER_BLOCK;
716                 break;
717         case 3:
718                 nofs = 5 + 2 * NIDS_PER_BLOCK;
719                 if (!offset[level - 1])
720                         goto skip_partial;
721                 err = truncate_partial_nodes(&dn, &rn->i, offset, level);
722                 if (err < 0 && err != -ENOENT)
723                         goto fail;
724                 break;
725         default:
726                 BUG();
727         }
728
729 skip_partial:
730         while (cont) {
731                 dn.nid = le32_to_cpu(rn->i.i_nid[offset[0] - NODE_DIR1_BLOCK]);
732                 switch (offset[0]) {
733                 case NODE_DIR1_BLOCK:
734                 case NODE_DIR2_BLOCK:
735                         err = truncate_dnode(&dn);
736                         break;
737
738                 case NODE_IND1_BLOCK:
739                 case NODE_IND2_BLOCK:
740                         err = truncate_nodes(&dn, nofs, offset[1], 2);
741                         break;
742
743                 case NODE_DIND_BLOCK:
744                         err = truncate_nodes(&dn, nofs, offset[1], 3);
745                         cont = 0;
746                         break;
747
748                 default:
749                         BUG();
750                 }
751                 if (err < 0 && err != -ENOENT)
752                         goto fail;
753                 if (offset[1] == 0 &&
754                                 rn->i.i_nid[offset[0] - NODE_DIR1_BLOCK]) {
755                         lock_page(page);
756                         if (page->mapping != node_mapping) {
757                                 f2fs_put_page(page, 1);
758                                 goto restart;
759                         }
760                         wait_on_page_writeback(page);
761                         rn->i.i_nid[offset[0] - NODE_DIR1_BLOCK] = 0;
762                         set_page_dirty(page);
763                         unlock_page(page);
764                 }
765                 offset[1] = 0;
766                 offset[0]++;
767                 nofs += err;
768         }
769 fail:
770         f2fs_put_page(page, 0);
771         trace_f2fs_truncate_inode_blocks_exit(inode, err);
772         return err > 0 ? 0 : err;
773 }
774
775 int truncate_xattr_node(struct inode *inode, struct page *page)
776 {
777         struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
778         nid_t nid = F2FS_I(inode)->i_xattr_nid;
779         struct dnode_of_data dn;
780         struct page *npage;
781
782         if (!nid)
783                 return 0;
784
785         npage = get_node_page(sbi, nid);
786         if (IS_ERR(npage))
787                 return PTR_ERR(npage);
788
789         F2FS_I(inode)->i_xattr_nid = 0;
790
791         /* need to do checkpoint during fsync */
792         F2FS_I(inode)->xattr_ver = cur_cp_version(F2FS_CKPT(sbi));
793
794         set_new_dnode(&dn, inode, page, npage, nid);
795
796         if (page)
797                 dn.inode_page_locked = 1;
798         truncate_node(&dn);
799         return 0;
800 }
801
802 /*
803  * Caller should grab and release a mutex by calling mutex_lock_op() and
804  * mutex_unlock_op().
805  */
806 int remove_inode_page(struct inode *inode)
807 {
808         struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
809         struct page *page;
810         nid_t ino = inode->i_ino;
811         struct dnode_of_data dn;
812         int err;
813
814         page = get_node_page(sbi, ino);
815         if (IS_ERR(page))
816                 return PTR_ERR(page);
817
818         err = truncate_xattr_node(inode, page);
819         if (err) {
820                 f2fs_put_page(page, 1);
821                 return err;
822         }
823
824         /* 0 is possible, after f2fs_new_inode() is failed */
825         f2fs_bug_on(inode->i_blocks != 0 && inode->i_blocks != 1);
826         set_new_dnode(&dn, inode, page, page, ino);
827         truncate_node(&dn);
828         return 0;
829 }
830
831 struct page *new_inode_page(struct inode *inode, const struct qstr *name)
832 {
833         struct dnode_of_data dn;
834
835         /* allocate inode page for new inode */
836         set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino);
837
838         /* caller should f2fs_put_page(page, 1); */
839         return new_node_page(&dn, 0, NULL);
840 }
841
842 struct page *new_node_page(struct dnode_of_data *dn,
843                                 unsigned int ofs, struct page *ipage)
844 {
845         struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
846         struct address_space *mapping = sbi->node_inode->i_mapping;
847         struct node_info old_ni, new_ni;
848         struct page *page;
849         int err;
850
851         if (is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC))
852                 return ERR_PTR(-EPERM);
853
854         page = grab_cache_page(mapping, dn->nid);
855         if (!page)
856                 return ERR_PTR(-ENOMEM);
857
858         if (!inc_valid_node_count(sbi, dn->inode, 1)) {
859                 err = -ENOSPC;
860                 goto fail;
861         }
862
863         get_node_info(sbi, dn->nid, &old_ni);
864
865         /* Reinitialize old_ni with new node page */
866         f2fs_bug_on(old_ni.blk_addr != NULL_ADDR);
867         new_ni = old_ni;
868         new_ni.ino = dn->inode->i_ino;
869         set_node_addr(sbi, &new_ni, NEW_ADDR);
870
871         fill_node_footer(page, dn->nid, dn->inode->i_ino, ofs, true);
872         set_cold_node(dn->inode, page);
873         SetPageUptodate(page);
874         set_page_dirty(page);
875
876         if (ofs == XATTR_NODE_OFFSET)
877                 F2FS_I(dn->inode)->i_xattr_nid = dn->nid;
878
879         dn->node_page = page;
880         if (ipage)
881                 update_inode(dn->inode, ipage);
882         else
883                 sync_inode_page(dn);
884         if (ofs == 0)
885                 inc_valid_inode_count(sbi);
886
887         return page;
888
889 fail:
890         clear_node_page_dirty(page);
891         f2fs_put_page(page, 1);
892         return ERR_PTR(err);
893 }
894
895 /*
896  * Caller should do after getting the following values.
897  * 0: f2fs_put_page(page, 0)
898  * LOCKED_PAGE: f2fs_put_page(page, 1)
899  * error: nothing
900  */
901 static int read_node_page(struct page *page, int type)
902 {
903         struct f2fs_sb_info *sbi = F2FS_SB(page->mapping->host->i_sb);
904         struct node_info ni;
905
906         get_node_info(sbi, page->index, &ni);
907
908         if (ni.blk_addr == NULL_ADDR) {
909                 f2fs_put_page(page, 1);
910                 return -ENOENT;
911         }
912
913         if (PageUptodate(page))
914                 return LOCKED_PAGE;
915
916         return f2fs_readpage(sbi, page, ni.blk_addr, type);
917 }
918
919 /*
920  * Readahead a node page
921  */
922 void ra_node_page(struct f2fs_sb_info *sbi, nid_t nid)
923 {
924         struct address_space *mapping = sbi->node_inode->i_mapping;
925         struct page *apage;
926         int err;
927
928         apage = find_get_page(mapping, nid);
929         if (apage && PageUptodate(apage)) {
930                 f2fs_put_page(apage, 0);
931                 return;
932         }
933         f2fs_put_page(apage, 0);
934
935         apage = grab_cache_page(mapping, nid);
936         if (!apage)
937                 return;
938
939         err = read_node_page(apage, READA);
940         if (err == 0)
941                 f2fs_put_page(apage, 0);
942         else if (err == LOCKED_PAGE)
943                 f2fs_put_page(apage, 1);
944 }
945
946 struct page *get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid)
947 {
948         struct address_space *mapping = sbi->node_inode->i_mapping;
949         struct page *page;
950         int err;
951 repeat:
952         page = grab_cache_page(mapping, nid);
953         if (!page)
954                 return ERR_PTR(-ENOMEM);
955
956         err = read_node_page(page, READ_SYNC);
957         if (err < 0)
958                 return ERR_PTR(err);
959         else if (err == LOCKED_PAGE)
960                 goto got_it;
961
962         lock_page(page);
963         if (!PageUptodate(page)) {
964                 f2fs_put_page(page, 1);
965                 return ERR_PTR(-EIO);
966         }
967         if (page->mapping != mapping) {
968                 f2fs_put_page(page, 1);
969                 goto repeat;
970         }
971 got_it:
972         f2fs_bug_on(nid != nid_of_node(page));
973         mark_page_accessed(page);
974         return page;
975 }
976
977 /*
978  * Return a locked page for the desired node page.
979  * And, readahead MAX_RA_NODE number of node pages.
980  */
981 struct page *get_node_page_ra(struct page *parent, int start)
982 {
983         struct f2fs_sb_info *sbi = F2FS_SB(parent->mapping->host->i_sb);
984         struct address_space *mapping = sbi->node_inode->i_mapping;
985         struct blk_plug plug;
986         struct page *page;
987         int err, i, end;
988         nid_t nid;
989
990         /* First, try getting the desired direct node. */
991         nid = get_nid(parent, start, false);
992         if (!nid)
993                 return ERR_PTR(-ENOENT);
994 repeat:
995         page = grab_cache_page(mapping, nid);
996         if (!page)
997                 return ERR_PTR(-ENOMEM);
998
999         err = read_node_page(page, READ_SYNC);
1000         if (err < 0)
1001                 return ERR_PTR(err);
1002         else if (err == LOCKED_PAGE)
1003                 goto page_hit;
1004
1005         blk_start_plug(&plug);
1006
1007         /* Then, try readahead for siblings of the desired node */
1008         end = start + MAX_RA_NODE;
1009         end = min(end, NIDS_PER_BLOCK);
1010         for (i = start + 1; i < end; i++) {
1011                 nid = get_nid(parent, i, false);
1012                 if (!nid)
1013                         continue;
1014                 ra_node_page(sbi, nid);
1015         }
1016
1017         blk_finish_plug(&plug);
1018
1019         lock_page(page);
1020         if (page->mapping != mapping) {
1021                 f2fs_put_page(page, 1);
1022                 goto repeat;
1023         }
1024 page_hit:
1025         if (!PageUptodate(page)) {
1026                 f2fs_put_page(page, 1);
1027                 return ERR_PTR(-EIO);
1028         }
1029         mark_page_accessed(page);
1030         return page;
1031 }
1032
1033 void sync_inode_page(struct dnode_of_data *dn)
1034 {
1035         if (IS_INODE(dn->node_page) || dn->inode_page == dn->node_page) {
1036                 update_inode(dn->inode, dn->node_page);
1037         } else if (dn->inode_page) {
1038                 if (!dn->inode_page_locked)
1039                         lock_page(dn->inode_page);
1040                 update_inode(dn->inode, dn->inode_page);
1041                 if (!dn->inode_page_locked)
1042                         unlock_page(dn->inode_page);
1043         } else {
1044                 update_inode_page(dn->inode);
1045         }
1046 }
1047
1048 int sync_node_pages(struct f2fs_sb_info *sbi, nid_t ino,
1049                                         struct writeback_control *wbc)
1050 {
1051         struct address_space *mapping = sbi->node_inode->i_mapping;
1052         pgoff_t index, end;
1053         struct pagevec pvec;
1054         int step = ino ? 2 : 0;
1055         int nwritten = 0, wrote = 0;
1056
1057         pagevec_init(&pvec, 0);
1058
1059 next_step:
1060         index = 0;
1061         end = LONG_MAX;
1062
1063         while (index <= end) {
1064                 int i, nr_pages;
1065                 nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
1066                                 PAGECACHE_TAG_DIRTY,
1067                                 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
1068                 if (nr_pages == 0)
1069                         break;
1070
1071                 for (i = 0; i < nr_pages; i++) {
1072                         struct page *page = pvec.pages[i];
1073
1074                         /*
1075                          * flushing sequence with step:
1076                          * 0. indirect nodes
1077                          * 1. dentry dnodes
1078                          * 2. file dnodes
1079                          */
1080                         if (step == 0 && IS_DNODE(page))
1081                                 continue;
1082                         if (step == 1 && (!IS_DNODE(page) ||
1083                                                 is_cold_node(page)))
1084                                 continue;
1085                         if (step == 2 && (!IS_DNODE(page) ||
1086                                                 !is_cold_node(page)))
1087                                 continue;
1088
1089                         /*
1090                          * If an fsync mode,
1091                          * we should not skip writing node pages.
1092                          */
1093                         if (ino && ino_of_node(page) == ino)
1094                                 lock_page(page);
1095                         else if (!trylock_page(page))
1096                                 continue;
1097
1098                         if (unlikely(page->mapping != mapping)) {
1099 continue_unlock:
1100                                 unlock_page(page);
1101                                 continue;
1102                         }
1103                         if (ino && ino_of_node(page) != ino)
1104                                 goto continue_unlock;
1105
1106                         if (!PageDirty(page)) {
1107                                 /* someone wrote it for us */
1108                                 goto continue_unlock;
1109                         }
1110
1111                         if (!clear_page_dirty_for_io(page))
1112                                 goto continue_unlock;
1113
1114                         /* called by fsync() */
1115                         if (ino && IS_DNODE(page)) {
1116                                 int mark = !is_checkpointed_node(sbi, ino);
1117                                 set_fsync_mark(page, 1);
1118                                 if (IS_INODE(page))
1119                                         set_dentry_mark(page, mark);
1120                                 nwritten++;
1121                         } else {
1122                                 set_fsync_mark(page, 0);
1123                                 set_dentry_mark(page, 0);
1124                         }
1125                         mapping->a_ops->writepage(page, wbc);
1126                         wrote++;
1127
1128                         if (--wbc->nr_to_write == 0)
1129                                 break;
1130                 }
1131                 pagevec_release(&pvec);
1132                 cond_resched();
1133
1134                 if (wbc->nr_to_write == 0) {
1135                         step = 2;
1136                         break;
1137                 }
1138         }
1139
1140         if (step < 2) {
1141                 step++;
1142                 goto next_step;
1143         }
1144
1145         if (wrote)
1146                 f2fs_submit_bio(sbi, NODE, wbc->sync_mode == WB_SYNC_ALL);
1147
1148         return nwritten;
1149 }
1150
1151 int wait_on_node_pages_writeback(struct f2fs_sb_info *sbi, nid_t ino)
1152 {
1153         struct address_space *mapping = sbi->node_inode->i_mapping;
1154         pgoff_t index = 0, end = LONG_MAX;
1155         struct pagevec pvec;
1156         int nr_pages;
1157         int ret2 = 0, ret = 0;
1158
1159         pagevec_init(&pvec, 0);
1160         while ((index <= end) &&
1161                         (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
1162                         PAGECACHE_TAG_WRITEBACK,
1163                         min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1)) != 0) {
1164                 unsigned i;
1165
1166                 for (i = 0; i < nr_pages; i++) {
1167                         struct page *page = pvec.pages[i];
1168
1169                         /* until radix tree lookup accepts end_index */
1170                         if (page->index > end)
1171                                 continue;
1172
1173                         if (ino && ino_of_node(page) == ino) {
1174                                 wait_on_page_writeback(page);
1175                                 if (TestClearPageError(page))
1176                                         ret = -EIO;
1177                         }
1178                 }
1179                 pagevec_release(&pvec);
1180                 cond_resched();
1181         }
1182
1183         if (test_and_clear_bit(AS_ENOSPC, &mapping->flags))
1184                 ret2 = -ENOSPC;
1185         if (test_and_clear_bit(AS_EIO, &mapping->flags))
1186                 ret2 = -EIO;
1187         if (!ret)
1188                 ret = ret2;
1189         return ret;
1190 }
1191
1192 static int f2fs_write_node_page(struct page *page,
1193                                 struct writeback_control *wbc)
1194 {
1195         struct f2fs_sb_info *sbi = F2FS_SB(page->mapping->host->i_sb);
1196         nid_t nid;
1197         block_t new_addr;
1198         struct node_info ni;
1199
1200         if (sbi->por_doing)
1201                 goto redirty_out;
1202
1203         wait_on_page_writeback(page);
1204
1205         /* get old block addr of this node page */
1206         nid = nid_of_node(page);
1207         f2fs_bug_on(page->index != nid);
1208
1209         get_node_info(sbi, nid, &ni);
1210
1211         /* This page is already truncated */
1212         if (ni.blk_addr == NULL_ADDR) {
1213                 dec_page_count(sbi, F2FS_DIRTY_NODES);
1214                 unlock_page(page);
1215                 return 0;
1216         }
1217
1218         if (wbc->for_reclaim)
1219                 goto redirty_out;
1220
1221         mutex_lock(&sbi->node_write);
1222         set_page_writeback(page);
1223         write_node_page(sbi, page, nid, ni.blk_addr, &new_addr);
1224         set_node_addr(sbi, &ni, new_addr);
1225         dec_page_count(sbi, F2FS_DIRTY_NODES);
1226         mutex_unlock(&sbi->node_write);
1227         unlock_page(page);
1228         return 0;
1229
1230 redirty_out:
1231         dec_page_count(sbi, F2FS_DIRTY_NODES);
1232         wbc->pages_skipped++;
1233         set_page_dirty(page);
1234         return AOP_WRITEPAGE_ACTIVATE;
1235 }
1236
1237 /*
1238  * It is very important to gather dirty pages and write at once, so that we can
1239  * submit a big bio without interfering other data writes.
1240  * Be default, 512 pages (2MB) * 3 node types, is more reasonable.
1241  */
1242 #define COLLECT_DIRTY_NODES     1536
1243 static int f2fs_write_node_pages(struct address_space *mapping,
1244                             struct writeback_control *wbc)
1245 {
1246         struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb);
1247         long nr_to_write = wbc->nr_to_write;
1248
1249         /* balancing f2fs's metadata in background */
1250         f2fs_balance_fs_bg(sbi);
1251
1252         /* collect a number of dirty node pages and write together */
1253         if (get_pages(sbi, F2FS_DIRTY_NODES) < COLLECT_DIRTY_NODES)
1254                 return 0;
1255
1256         /* if mounting is failed, skip writing node pages */
1257         wbc->nr_to_write = 3 * max_hw_blocks(sbi);
1258         sync_node_pages(sbi, 0, wbc);
1259         wbc->nr_to_write = nr_to_write - (3 * max_hw_blocks(sbi) -
1260                                                 wbc->nr_to_write);
1261         return 0;
1262 }
1263
1264 static int f2fs_set_node_page_dirty(struct page *page)
1265 {
1266         struct address_space *mapping = page->mapping;
1267         struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb);
1268
1269         trace_f2fs_set_page_dirty(page, NODE);
1270
1271         SetPageUptodate(page);
1272         if (!PageDirty(page)) {
1273                 __set_page_dirty_nobuffers(page);
1274                 inc_page_count(sbi, F2FS_DIRTY_NODES);
1275                 SetPagePrivate(page);
1276                 return 1;
1277         }
1278         return 0;
1279 }
1280
1281 static void f2fs_invalidate_node_page(struct page *page, unsigned int offset,
1282                                       unsigned int length)
1283 {
1284         struct inode *inode = page->mapping->host;
1285         struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
1286         if (PageDirty(page))
1287                 dec_page_count(sbi, F2FS_DIRTY_NODES);
1288         ClearPagePrivate(page);
1289 }
1290
1291 static int f2fs_release_node_page(struct page *page, gfp_t wait)
1292 {
1293         ClearPagePrivate(page);
1294         return 1;
1295 }
1296
1297 /*
1298  * Structure of the f2fs node operations
1299  */
1300 const struct address_space_operations f2fs_node_aops = {
1301         .writepage      = f2fs_write_node_page,
1302         .writepages     = f2fs_write_node_pages,
1303         .set_page_dirty = f2fs_set_node_page_dirty,
1304         .invalidatepage = f2fs_invalidate_node_page,
1305         .releasepage    = f2fs_release_node_page,
1306 };
1307
1308 static struct free_nid *__lookup_free_nid_list(nid_t n, struct list_head *head)
1309 {
1310         struct list_head *this;
1311         struct free_nid *i;
1312         list_for_each(this, head) {
1313                 i = list_entry(this, struct free_nid, list);
1314                 if (i->nid == n)
1315                         return i;
1316         }
1317         return NULL;
1318 }
1319
1320 static void __del_from_free_nid_list(struct free_nid *i)
1321 {
1322         list_del(&i->list);
1323         kmem_cache_free(free_nid_slab, i);
1324 }
1325
1326 static int add_free_nid(struct f2fs_nm_info *nm_i, nid_t nid, bool build)
1327 {
1328         struct free_nid *i;
1329         struct nat_entry *ne;
1330         bool allocated = false;
1331
1332         if (nm_i->fcnt > 2 * MAX_FREE_NIDS)
1333                 return -1;
1334
1335         /* 0 nid should not be used */
1336         if (nid == 0)
1337                 return 0;
1338
1339         if (build) {
1340                 /* do not add allocated nids */
1341                 read_lock(&nm_i->nat_tree_lock);
1342                 ne = __lookup_nat_cache(nm_i, nid);
1343                 if (ne && nat_get_blkaddr(ne) != NULL_ADDR)
1344                         allocated = true;
1345                 read_unlock(&nm_i->nat_tree_lock);
1346                 if (allocated)
1347                         return 0;
1348         }
1349
1350         i = f2fs_kmem_cache_alloc(free_nid_slab, GFP_NOFS);
1351         i->nid = nid;
1352         i->state = NID_NEW;
1353
1354         spin_lock(&nm_i->free_nid_list_lock);
1355         if (__lookup_free_nid_list(nid, &nm_i->free_nid_list)) {
1356                 spin_unlock(&nm_i->free_nid_list_lock);
1357                 kmem_cache_free(free_nid_slab, i);
1358                 return 0;
1359         }
1360         list_add_tail(&i->list, &nm_i->free_nid_list);
1361         nm_i->fcnt++;
1362         spin_unlock(&nm_i->free_nid_list_lock);
1363         return 1;
1364 }
1365
1366 static void remove_free_nid(struct f2fs_nm_info *nm_i, nid_t nid)
1367 {
1368         struct free_nid *i;
1369         spin_lock(&nm_i->free_nid_list_lock);
1370         i = __lookup_free_nid_list(nid, &nm_i->free_nid_list);
1371         if (i && i->state == NID_NEW) {
1372                 __del_from_free_nid_list(i);
1373                 nm_i->fcnt--;
1374         }
1375         spin_unlock(&nm_i->free_nid_list_lock);
1376 }
1377
1378 static void scan_nat_page(struct f2fs_nm_info *nm_i,
1379                         struct page *nat_page, nid_t start_nid)
1380 {
1381         struct f2fs_nat_block *nat_blk = page_address(nat_page);
1382         block_t blk_addr;
1383         int i;
1384
1385         i = start_nid % NAT_ENTRY_PER_BLOCK;
1386
1387         for (; i < NAT_ENTRY_PER_BLOCK; i++, start_nid++) {
1388
1389                 if (start_nid >= nm_i->max_nid)
1390                         break;
1391
1392                 blk_addr = le32_to_cpu(nat_blk->entries[i].block_addr);
1393                 f2fs_bug_on(blk_addr == NEW_ADDR);
1394                 if (blk_addr == NULL_ADDR) {
1395                         if (add_free_nid(nm_i, start_nid, true) < 0)
1396                                 break;
1397                 }
1398         }
1399 }
1400
1401 static void build_free_nids(struct f2fs_sb_info *sbi)
1402 {
1403         struct f2fs_nm_info *nm_i = NM_I(sbi);
1404         struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
1405         struct f2fs_summary_block *sum = curseg->sum_blk;
1406         int i = 0;
1407         nid_t nid = nm_i->next_scan_nid;
1408
1409         /* Enough entries */
1410         if (nm_i->fcnt > NAT_ENTRY_PER_BLOCK)
1411                 return;
1412
1413         /* readahead nat pages to be scanned */
1414         ra_nat_pages(sbi, nid);
1415
1416         while (1) {
1417                 struct page *page = get_current_nat_page(sbi, nid);
1418
1419                 scan_nat_page(nm_i, page, nid);
1420                 f2fs_put_page(page, 1);
1421
1422                 nid += (NAT_ENTRY_PER_BLOCK - (nid % NAT_ENTRY_PER_BLOCK));
1423                 if (nid >= nm_i->max_nid)
1424                         nid = 0;
1425
1426                 if (i++ == FREE_NID_PAGES)
1427                         break;
1428         }
1429
1430         /* go to the next free nat pages to find free nids abundantly */
1431         nm_i->next_scan_nid = nid;
1432
1433         /* find free nids from current sum_pages */
1434         mutex_lock(&curseg->curseg_mutex);
1435         for (i = 0; i < nats_in_cursum(sum); i++) {
1436                 block_t addr = le32_to_cpu(nat_in_journal(sum, i).block_addr);
1437                 nid = le32_to_cpu(nid_in_journal(sum, i));
1438                 if (addr == NULL_ADDR)
1439                         add_free_nid(nm_i, nid, true);
1440                 else
1441                         remove_free_nid(nm_i, nid);
1442         }
1443         mutex_unlock(&curseg->curseg_mutex);
1444 }
1445
1446 /*
1447  * If this function returns success, caller can obtain a new nid
1448  * from second parameter of this function.
1449  * The returned nid could be used ino as well as nid when inode is created.
1450  */
1451 bool alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid)
1452 {
1453         struct f2fs_nm_info *nm_i = NM_I(sbi);
1454         struct free_nid *i = NULL;
1455         struct list_head *this;
1456 retry:
1457         if (sbi->total_valid_node_count + 1 >= nm_i->max_nid)
1458                 return false;
1459
1460         spin_lock(&nm_i->free_nid_list_lock);
1461
1462         /* We should not use stale free nids created by build_free_nids */
1463         if (nm_i->fcnt && !sbi->on_build_free_nids) {
1464                 f2fs_bug_on(list_empty(&nm_i->free_nid_list));
1465                 list_for_each(this, &nm_i->free_nid_list) {
1466                         i = list_entry(this, struct free_nid, list);
1467                         if (i->state == NID_NEW)
1468                                 break;
1469                 }
1470
1471                 f2fs_bug_on(i->state != NID_NEW);
1472                 *nid = i->nid;
1473                 i->state = NID_ALLOC;
1474                 nm_i->fcnt--;
1475                 spin_unlock(&nm_i->free_nid_list_lock);
1476                 return true;
1477         }
1478         spin_unlock(&nm_i->free_nid_list_lock);
1479
1480         /* Let's scan nat pages and its caches to get free nids */
1481         mutex_lock(&nm_i->build_lock);
1482         sbi->on_build_free_nids = true;
1483         build_free_nids(sbi);
1484         sbi->on_build_free_nids = false;
1485         mutex_unlock(&nm_i->build_lock);
1486         goto retry;
1487 }
1488
1489 /*
1490  * alloc_nid() should be called prior to this function.
1491  */
1492 void alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid)
1493 {
1494         struct f2fs_nm_info *nm_i = NM_I(sbi);
1495         struct free_nid *i;
1496
1497         spin_lock(&nm_i->free_nid_list_lock);
1498         i = __lookup_free_nid_list(nid, &nm_i->free_nid_list);
1499         f2fs_bug_on(!i || i->state != NID_ALLOC);
1500         __del_from_free_nid_list(i);
1501         spin_unlock(&nm_i->free_nid_list_lock);
1502 }
1503
1504 /*
1505  * alloc_nid() should be called prior to this function.
1506  */
1507 void alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid)
1508 {
1509         struct f2fs_nm_info *nm_i = NM_I(sbi);
1510         struct free_nid *i;
1511
1512         if (!nid)
1513                 return;
1514
1515         spin_lock(&nm_i->free_nid_list_lock);
1516         i = __lookup_free_nid_list(nid, &nm_i->free_nid_list);
1517         f2fs_bug_on(!i || i->state != NID_ALLOC);
1518         if (nm_i->fcnt > 2 * MAX_FREE_NIDS) {
1519                 __del_from_free_nid_list(i);
1520         } else {
1521                 i->state = NID_NEW;
1522                 nm_i->fcnt++;
1523         }
1524         spin_unlock(&nm_i->free_nid_list_lock);
1525 }
1526
1527 void recover_node_page(struct f2fs_sb_info *sbi, struct page *page,
1528                 struct f2fs_summary *sum, struct node_info *ni,
1529                 block_t new_blkaddr)
1530 {
1531         rewrite_node_page(sbi, page, sum, ni->blk_addr, new_blkaddr);
1532         set_node_addr(sbi, ni, new_blkaddr);
1533         clear_node_page_dirty(page);
1534 }
1535
1536 int recover_inode_page(struct f2fs_sb_info *sbi, struct page *page)
1537 {
1538         struct address_space *mapping = sbi->node_inode->i_mapping;
1539         struct f2fs_node *src, *dst;
1540         nid_t ino = ino_of_node(page);
1541         struct node_info old_ni, new_ni;
1542         struct page *ipage;
1543
1544         ipage = grab_cache_page(mapping, ino);
1545         if (!ipage)
1546                 return -ENOMEM;
1547
1548         /* Should not use this inode  from free nid list */
1549         remove_free_nid(NM_I(sbi), ino);
1550
1551         get_node_info(sbi, ino, &old_ni);
1552         SetPageUptodate(ipage);
1553         fill_node_footer(ipage, ino, ino, 0, true);
1554
1555         src = F2FS_NODE(page);
1556         dst = F2FS_NODE(ipage);
1557
1558         memcpy(dst, src, (unsigned long)&src->i.i_ext - (unsigned long)&src->i);
1559         dst->i.i_size = 0;
1560         dst->i.i_blocks = cpu_to_le64(1);
1561         dst->i.i_links = cpu_to_le32(1);
1562         dst->i.i_xattr_nid = 0;
1563
1564         new_ni = old_ni;
1565         new_ni.ino = ino;
1566
1567         if (!inc_valid_node_count(sbi, NULL, 1))
1568                 WARN_ON(1);
1569         set_node_addr(sbi, &new_ni, NEW_ADDR);
1570         inc_valid_inode_count(sbi);
1571         f2fs_put_page(ipage, 1);
1572         return 0;
1573 }
1574
1575 int restore_node_summary(struct f2fs_sb_info *sbi,
1576                         unsigned int segno, struct f2fs_summary_block *sum)
1577 {
1578         struct f2fs_node *rn;
1579         struct f2fs_summary *sum_entry;
1580         struct page *page;
1581         block_t addr;
1582         int i, last_offset;
1583
1584         /* alloc temporal page for read node */
1585         page = alloc_page(GFP_NOFS | __GFP_ZERO);
1586         if (!page)
1587                 return -ENOMEM;
1588         lock_page(page);
1589
1590         /* scan the node segment */
1591         last_offset = sbi->blocks_per_seg;
1592         addr = START_BLOCK(sbi, segno);
1593         sum_entry = &sum->entries[0];
1594
1595         for (i = 0; i < last_offset; i++, sum_entry++) {
1596                 /*
1597                  * In order to read next node page,
1598                  * we must clear PageUptodate flag.
1599                  */
1600                 ClearPageUptodate(page);
1601
1602                 if (f2fs_readpage(sbi, page, addr, READ_SYNC))
1603                         goto out;
1604
1605                 lock_page(page);
1606                 rn = F2FS_NODE(page);
1607                 sum_entry->nid = rn->footer.nid;
1608                 sum_entry->version = 0;
1609                 sum_entry->ofs_in_node = 0;
1610                 addr++;
1611         }
1612         unlock_page(page);
1613 out:
1614         __free_pages(page, 0);
1615         return 0;
1616 }
1617
1618 static bool flush_nats_in_journal(struct f2fs_sb_info *sbi)
1619 {
1620         struct f2fs_nm_info *nm_i = NM_I(sbi);
1621         struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
1622         struct f2fs_summary_block *sum = curseg->sum_blk;
1623         int i;
1624
1625         mutex_lock(&curseg->curseg_mutex);
1626
1627         if (nats_in_cursum(sum) < NAT_JOURNAL_ENTRIES) {
1628                 mutex_unlock(&curseg->curseg_mutex);
1629                 return false;
1630         }
1631
1632         for (i = 0; i < nats_in_cursum(sum); i++) {
1633                 struct nat_entry *ne;
1634                 struct f2fs_nat_entry raw_ne;
1635                 nid_t nid = le32_to_cpu(nid_in_journal(sum, i));
1636
1637                 raw_ne = nat_in_journal(sum, i);
1638 retry:
1639                 write_lock(&nm_i->nat_tree_lock);
1640                 ne = __lookup_nat_cache(nm_i, nid);
1641                 if (ne) {
1642                         __set_nat_cache_dirty(nm_i, ne);
1643                         write_unlock(&nm_i->nat_tree_lock);
1644                         continue;
1645                 }
1646                 ne = grab_nat_entry(nm_i, nid);
1647                 if (!ne) {
1648                         write_unlock(&nm_i->nat_tree_lock);
1649                         goto retry;
1650                 }
1651                 nat_set_blkaddr(ne, le32_to_cpu(raw_ne.block_addr));
1652                 nat_set_ino(ne, le32_to_cpu(raw_ne.ino));
1653                 nat_set_version(ne, raw_ne.version);
1654                 __set_nat_cache_dirty(nm_i, ne);
1655                 write_unlock(&nm_i->nat_tree_lock);
1656         }
1657         update_nats_in_cursum(sum, -i);
1658         mutex_unlock(&curseg->curseg_mutex);
1659         return true;
1660 }
1661
1662 /*
1663  * This function is called during the checkpointing process.
1664  */
1665 void flush_nat_entries(struct f2fs_sb_info *sbi)
1666 {
1667         struct f2fs_nm_info *nm_i = NM_I(sbi);
1668         struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
1669         struct f2fs_summary_block *sum = curseg->sum_blk;
1670         struct list_head *cur, *n;
1671         struct page *page = NULL;
1672         struct f2fs_nat_block *nat_blk = NULL;
1673         nid_t start_nid = 0, end_nid = 0;
1674         bool flushed;
1675
1676         flushed = flush_nats_in_journal(sbi);
1677
1678         if (!flushed)
1679                 mutex_lock(&curseg->curseg_mutex);
1680
1681         /* 1) flush dirty nat caches */
1682         list_for_each_safe(cur, n, &nm_i->dirty_nat_entries) {
1683                 struct nat_entry *ne;
1684                 nid_t nid;
1685                 struct f2fs_nat_entry raw_ne;
1686                 int offset = -1;
1687                 block_t new_blkaddr;
1688
1689                 ne = list_entry(cur, struct nat_entry, list);
1690                 nid = nat_get_nid(ne);
1691
1692                 if (nat_get_blkaddr(ne) == NEW_ADDR)
1693                         continue;
1694                 if (flushed)
1695                         goto to_nat_page;
1696
1697                 /* if there is room for nat enries in curseg->sumpage */
1698                 offset = lookup_journal_in_cursum(sum, NAT_JOURNAL, nid, 1);
1699                 if (offset >= 0) {
1700                         raw_ne = nat_in_journal(sum, offset);
1701                         goto flush_now;
1702                 }
1703 to_nat_page:
1704                 if (!page || (start_nid > nid || nid > end_nid)) {
1705                         if (page) {
1706                                 f2fs_put_page(page, 1);
1707                                 page = NULL;
1708                         }
1709                         start_nid = START_NID(nid);
1710                         end_nid = start_nid + NAT_ENTRY_PER_BLOCK - 1;
1711
1712                         /*
1713                          * get nat block with dirty flag, increased reference
1714                          * count, mapped and lock
1715                          */
1716                         page = get_next_nat_page(sbi, start_nid);
1717                         nat_blk = page_address(page);
1718                 }
1719
1720                 f2fs_bug_on(!nat_blk);
1721                 raw_ne = nat_blk->entries[nid - start_nid];
1722 flush_now:
1723                 new_blkaddr = nat_get_blkaddr(ne);
1724
1725                 raw_ne.ino = cpu_to_le32(nat_get_ino(ne));
1726                 raw_ne.block_addr = cpu_to_le32(new_blkaddr);
1727                 raw_ne.version = nat_get_version(ne);
1728
1729                 if (offset < 0) {
1730                         nat_blk->entries[nid - start_nid] = raw_ne;
1731                 } else {
1732                         nat_in_journal(sum, offset) = raw_ne;
1733                         nid_in_journal(sum, offset) = cpu_to_le32(nid);
1734                 }
1735
1736                 if (nat_get_blkaddr(ne) == NULL_ADDR &&
1737                                 add_free_nid(NM_I(sbi), nid, false) <= 0) {
1738                         write_lock(&nm_i->nat_tree_lock);
1739                         __del_from_nat_cache(nm_i, ne);
1740                         write_unlock(&nm_i->nat_tree_lock);
1741                 } else {
1742                         write_lock(&nm_i->nat_tree_lock);
1743                         __clear_nat_cache_dirty(nm_i, ne);
1744                         ne->checkpointed = true;
1745                         write_unlock(&nm_i->nat_tree_lock);
1746                 }
1747         }
1748         if (!flushed)
1749                 mutex_unlock(&curseg->curseg_mutex);
1750         f2fs_put_page(page, 1);
1751
1752         /* 2) shrink nat caches if necessary */
1753         try_to_free_nats(sbi, nm_i->nat_cnt - NM_WOUT_THRESHOLD);
1754 }
1755
1756 static int init_node_manager(struct f2fs_sb_info *sbi)
1757 {
1758         struct f2fs_super_block *sb_raw = F2FS_RAW_SUPER(sbi);
1759         struct f2fs_nm_info *nm_i = NM_I(sbi);
1760         unsigned char *version_bitmap;
1761         unsigned int nat_segs, nat_blocks;
1762
1763         nm_i->nat_blkaddr = le32_to_cpu(sb_raw->nat_blkaddr);
1764
1765         /* segment_count_nat includes pair segment so divide to 2. */
1766         nat_segs = le32_to_cpu(sb_raw->segment_count_nat) >> 1;
1767         nat_blocks = nat_segs << le32_to_cpu(sb_raw->log_blocks_per_seg);
1768         nm_i->max_nid = NAT_ENTRY_PER_BLOCK * nat_blocks;
1769         nm_i->fcnt = 0;
1770         nm_i->nat_cnt = 0;
1771
1772         INIT_LIST_HEAD(&nm_i->free_nid_list);
1773         INIT_RADIX_TREE(&nm_i->nat_root, GFP_ATOMIC);
1774         INIT_LIST_HEAD(&nm_i->nat_entries);
1775         INIT_LIST_HEAD(&nm_i->dirty_nat_entries);
1776
1777         mutex_init(&nm_i->build_lock);
1778         spin_lock_init(&nm_i->free_nid_list_lock);
1779         rwlock_init(&nm_i->nat_tree_lock);
1780
1781         nm_i->next_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid);
1782         nm_i->bitmap_size = __bitmap_size(sbi, NAT_BITMAP);
1783         version_bitmap = __bitmap_ptr(sbi, NAT_BITMAP);
1784         if (!version_bitmap)
1785                 return -EFAULT;
1786
1787         nm_i->nat_bitmap = kmemdup(version_bitmap, nm_i->bitmap_size,
1788                                         GFP_KERNEL);
1789         if (!nm_i->nat_bitmap)
1790                 return -ENOMEM;
1791         return 0;
1792 }
1793
1794 int build_node_manager(struct f2fs_sb_info *sbi)
1795 {
1796         int err;
1797
1798         sbi->nm_info = kzalloc(sizeof(struct f2fs_nm_info), GFP_KERNEL);
1799         if (!sbi->nm_info)
1800                 return -ENOMEM;
1801
1802         err = init_node_manager(sbi);
1803         if (err)
1804                 return err;
1805
1806         build_free_nids(sbi);
1807         return 0;
1808 }
1809
1810 void destroy_node_manager(struct f2fs_sb_info *sbi)
1811 {
1812         struct f2fs_nm_info *nm_i = NM_I(sbi);
1813         struct free_nid *i, *next_i;
1814         struct nat_entry *natvec[NATVEC_SIZE];
1815         nid_t nid = 0;
1816         unsigned int found;
1817
1818         if (!nm_i)
1819                 return;
1820
1821         /* destroy free nid list */
1822         spin_lock(&nm_i->free_nid_list_lock);
1823         list_for_each_entry_safe(i, next_i, &nm_i->free_nid_list, list) {
1824                 f2fs_bug_on(i->state == NID_ALLOC);
1825                 __del_from_free_nid_list(i);
1826                 nm_i->fcnt--;
1827         }
1828         f2fs_bug_on(nm_i->fcnt);
1829         spin_unlock(&nm_i->free_nid_list_lock);
1830
1831         /* destroy nat cache */
1832         write_lock(&nm_i->nat_tree_lock);
1833         while ((found = __gang_lookup_nat_cache(nm_i,
1834                                         nid, NATVEC_SIZE, natvec))) {
1835                 unsigned idx;
1836                 for (idx = 0; idx < found; idx++) {
1837                         struct nat_entry *e = natvec[idx];
1838                         nid = nat_get_nid(e) + 1;
1839                         __del_from_nat_cache(nm_i, e);
1840                 }
1841         }
1842         f2fs_bug_on(nm_i->nat_cnt);
1843         write_unlock(&nm_i->nat_tree_lock);
1844
1845         kfree(nm_i->nat_bitmap);
1846         sbi->nm_info = NULL;
1847         kfree(nm_i);
1848 }
1849
1850 int __init create_node_manager_caches(void)
1851 {
1852         nat_entry_slab = f2fs_kmem_cache_create("nat_entry",
1853                         sizeof(struct nat_entry), NULL);
1854         if (!nat_entry_slab)
1855                 return -ENOMEM;
1856
1857         free_nid_slab = f2fs_kmem_cache_create("free_nid",
1858                         sizeof(struct free_nid), NULL);
1859         if (!free_nid_slab) {
1860                 kmem_cache_destroy(nat_entry_slab);
1861                 return -ENOMEM;
1862         }
1863         return 0;
1864 }
1865
1866 void destroy_node_manager_caches(void)
1867 {
1868         kmem_cache_destroy(free_nid_slab);
1869         kmem_cache_destroy(nat_entry_slab);
1870 }