]> Pileus Git - ~andy/linux/blob - fs/nfs/write.c
Merge branch 'for-jens' of git://git.drbd.org/linux-drbd into for-3.6/drivers
[~andy/linux] / fs / nfs / write.c
1 /*
2  * linux/fs/nfs/write.c
3  *
4  * Write file data over NFS.
5  *
6  * Copyright (C) 1996, 1997, Olaf Kirch <okir@monad.swb.de>
7  */
8
9 #include <linux/types.h>
10 #include <linux/slab.h>
11 #include <linux/mm.h>
12 #include <linux/pagemap.h>
13 #include <linux/file.h>
14 #include <linux/writeback.h>
15 #include <linux/swap.h>
16 #include <linux/migrate.h>
17
18 #include <linux/sunrpc/clnt.h>
19 #include <linux/nfs_fs.h>
20 #include <linux/nfs_mount.h>
21 #include <linux/nfs_page.h>
22 #include <linux/backing-dev.h>
23 #include <linux/export.h>
24
25 #include <asm/uaccess.h>
26
27 #include "delegation.h"
28 #include "internal.h"
29 #include "iostat.h"
30 #include "nfs4_fs.h"
31 #include "fscache.h"
32 #include "pnfs.h"
33
34 #define NFSDBG_FACILITY         NFSDBG_PAGECACHE
35
36 #define MIN_POOL_WRITE          (32)
37 #define MIN_POOL_COMMIT         (4)
38
39 /*
40  * Local function declarations
41  */
42 static void nfs_redirty_request(struct nfs_page *req);
43 static const struct rpc_call_ops nfs_write_common_ops;
44 static const struct rpc_call_ops nfs_commit_ops;
45 static const struct nfs_pgio_completion_ops nfs_async_write_completion_ops;
46 static const struct nfs_commit_completion_ops nfs_commit_completion_ops;
47
48 static struct kmem_cache *nfs_wdata_cachep;
49 static mempool_t *nfs_wdata_mempool;
50 static struct kmem_cache *nfs_cdata_cachep;
51 static mempool_t *nfs_commit_mempool;
52
53 struct nfs_commit_data *nfs_commitdata_alloc(void)
54 {
55         struct nfs_commit_data *p = mempool_alloc(nfs_commit_mempool, GFP_NOFS);
56
57         if (p) {
58                 memset(p, 0, sizeof(*p));
59                 INIT_LIST_HEAD(&p->pages);
60         }
61         return p;
62 }
63 EXPORT_SYMBOL_GPL(nfs_commitdata_alloc);
64
65 void nfs_commit_free(struct nfs_commit_data *p)
66 {
67         mempool_free(p, nfs_commit_mempool);
68 }
69 EXPORT_SYMBOL_GPL(nfs_commit_free);
70
71 struct nfs_write_header *nfs_writehdr_alloc(void)
72 {
73         struct nfs_write_header *p = mempool_alloc(nfs_wdata_mempool, GFP_NOFS);
74
75         if (p) {
76                 struct nfs_pgio_header *hdr = &p->header;
77
78                 memset(p, 0, sizeof(*p));
79                 INIT_LIST_HEAD(&hdr->pages);
80                 INIT_LIST_HEAD(&hdr->rpc_list);
81                 spin_lock_init(&hdr->lock);
82                 atomic_set(&hdr->refcnt, 0);
83                 hdr->verf = &p->verf;
84         }
85         return p;
86 }
87
88 static struct nfs_write_data *nfs_writedata_alloc(struct nfs_pgio_header *hdr,
89                                                   unsigned int pagecount)
90 {
91         struct nfs_write_data *data, *prealloc;
92
93         prealloc = &container_of(hdr, struct nfs_write_header, header)->rpc_data;
94         if (prealloc->header == NULL)
95                 data = prealloc;
96         else
97                 data = kzalloc(sizeof(*data), GFP_KERNEL);
98         if (!data)
99                 goto out;
100
101         if (nfs_pgarray_set(&data->pages, pagecount)) {
102                 data->header = hdr;
103                 atomic_inc(&hdr->refcnt);
104         } else {
105                 if (data != prealloc)
106                         kfree(data);
107                 data = NULL;
108         }
109 out:
110         return data;
111 }
112
113 void nfs_writehdr_free(struct nfs_pgio_header *hdr)
114 {
115         struct nfs_write_header *whdr = container_of(hdr, struct nfs_write_header, header);
116         mempool_free(whdr, nfs_wdata_mempool);
117 }
118
119 void nfs_writedata_release(struct nfs_write_data *wdata)
120 {
121         struct nfs_pgio_header *hdr = wdata->header;
122         struct nfs_write_header *write_header = container_of(hdr, struct nfs_write_header, header);
123
124         put_nfs_open_context(wdata->args.context);
125         if (wdata->pages.pagevec != wdata->pages.page_array)
126                 kfree(wdata->pages.pagevec);
127         if (wdata != &write_header->rpc_data)
128                 kfree(wdata);
129         else
130                 wdata->header = NULL;
131         if (atomic_dec_and_test(&hdr->refcnt))
132                 hdr->completion_ops->completion(hdr);
133 }
134
135 static void nfs_context_set_write_error(struct nfs_open_context *ctx, int error)
136 {
137         ctx->error = error;
138         smp_wmb();
139         set_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags);
140 }
141
142 static struct nfs_page *nfs_page_find_request_locked(struct page *page)
143 {
144         struct nfs_page *req = NULL;
145
146         if (PagePrivate(page)) {
147                 req = (struct nfs_page *)page_private(page);
148                 if (req != NULL)
149                         kref_get(&req->wb_kref);
150         }
151         return req;
152 }
153
154 static struct nfs_page *nfs_page_find_request(struct page *page)
155 {
156         struct inode *inode = page->mapping->host;
157         struct nfs_page *req = NULL;
158
159         spin_lock(&inode->i_lock);
160         req = nfs_page_find_request_locked(page);
161         spin_unlock(&inode->i_lock);
162         return req;
163 }
164
165 /* Adjust the file length if we're writing beyond the end */
166 static void nfs_grow_file(struct page *page, unsigned int offset, unsigned int count)
167 {
168         struct inode *inode = page->mapping->host;
169         loff_t end, i_size;
170         pgoff_t end_index;
171
172         spin_lock(&inode->i_lock);
173         i_size = i_size_read(inode);
174         end_index = (i_size - 1) >> PAGE_CACHE_SHIFT;
175         if (i_size > 0 && page->index < end_index)
176                 goto out;
177         end = ((loff_t)page->index << PAGE_CACHE_SHIFT) + ((loff_t)offset+count);
178         if (i_size >= end)
179                 goto out;
180         i_size_write(inode, end);
181         nfs_inc_stats(inode, NFSIOS_EXTENDWRITE);
182 out:
183         spin_unlock(&inode->i_lock);
184 }
185
186 /* A writeback failed: mark the page as bad, and invalidate the page cache */
187 static void nfs_set_pageerror(struct page *page)
188 {
189         SetPageError(page);
190         nfs_zap_mapping(page->mapping->host, page->mapping);
191 }
192
193 /* We can set the PG_uptodate flag if we see that a write request
194  * covers the full page.
195  */
196 static void nfs_mark_uptodate(struct page *page, unsigned int base, unsigned int count)
197 {
198         if (PageUptodate(page))
199                 return;
200         if (base != 0)
201                 return;
202         if (count != nfs_page_length(page))
203                 return;
204         SetPageUptodate(page);
205 }
206
207 static int wb_priority(struct writeback_control *wbc)
208 {
209         if (wbc->for_reclaim)
210                 return FLUSH_HIGHPRI | FLUSH_STABLE;
211         if (wbc->for_kupdate || wbc->for_background)
212                 return FLUSH_LOWPRI | FLUSH_COND_STABLE;
213         return FLUSH_COND_STABLE;
214 }
215
216 /*
217  * NFS congestion control
218  */
219
220 int nfs_congestion_kb;
221
222 #define NFS_CONGESTION_ON_THRESH        (nfs_congestion_kb >> (PAGE_SHIFT-10))
223 #define NFS_CONGESTION_OFF_THRESH       \
224         (NFS_CONGESTION_ON_THRESH - (NFS_CONGESTION_ON_THRESH >> 2))
225
226 static int nfs_set_page_writeback(struct page *page)
227 {
228         int ret = test_set_page_writeback(page);
229
230         if (!ret) {
231                 struct inode *inode = page->mapping->host;
232                 struct nfs_server *nfss = NFS_SERVER(inode);
233
234                 if (atomic_long_inc_return(&nfss->writeback) >
235                                 NFS_CONGESTION_ON_THRESH) {
236                         set_bdi_congested(&nfss->backing_dev_info,
237                                                 BLK_RW_ASYNC);
238                 }
239         }
240         return ret;
241 }
242
243 static void nfs_end_page_writeback(struct page *page)
244 {
245         struct inode *inode = page->mapping->host;
246         struct nfs_server *nfss = NFS_SERVER(inode);
247
248         end_page_writeback(page);
249         if (atomic_long_dec_return(&nfss->writeback) < NFS_CONGESTION_OFF_THRESH)
250                 clear_bdi_congested(&nfss->backing_dev_info, BLK_RW_ASYNC);
251 }
252
253 static struct nfs_page *nfs_find_and_lock_request(struct page *page, bool nonblock)
254 {
255         struct inode *inode = page->mapping->host;
256         struct nfs_page *req;
257         int ret;
258
259         spin_lock(&inode->i_lock);
260         for (;;) {
261                 req = nfs_page_find_request_locked(page);
262                 if (req == NULL)
263                         break;
264                 if (nfs_lock_request(req))
265                         break;
266                 /* Note: If we hold the page lock, as is the case in nfs_writepage,
267                  *       then the call to nfs_lock_request() will always
268                  *       succeed provided that someone hasn't already marked the
269                  *       request as dirty (in which case we don't care).
270                  */
271                 spin_unlock(&inode->i_lock);
272                 if (!nonblock)
273                         ret = nfs_wait_on_request(req);
274                 else
275                         ret = -EAGAIN;
276                 nfs_release_request(req);
277                 if (ret != 0)
278                         return ERR_PTR(ret);
279                 spin_lock(&inode->i_lock);
280         }
281         spin_unlock(&inode->i_lock);
282         return req;
283 }
284
285 /*
286  * Find an associated nfs write request, and prepare to flush it out
287  * May return an error if the user signalled nfs_wait_on_request().
288  */
289 static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
290                                 struct page *page, bool nonblock)
291 {
292         struct nfs_page *req;
293         int ret = 0;
294
295         req = nfs_find_and_lock_request(page, nonblock);
296         if (!req)
297                 goto out;
298         ret = PTR_ERR(req);
299         if (IS_ERR(req))
300                 goto out;
301
302         ret = nfs_set_page_writeback(page);
303         BUG_ON(ret != 0);
304         BUG_ON(test_bit(PG_CLEAN, &req->wb_flags));
305
306         if (!nfs_pageio_add_request(pgio, req)) {
307                 nfs_redirty_request(req);
308                 ret = pgio->pg_error;
309         }
310 out:
311         return ret;
312 }
313
314 static int nfs_do_writepage(struct page *page, struct writeback_control *wbc, struct nfs_pageio_descriptor *pgio)
315 {
316         struct inode *inode = page->mapping->host;
317         int ret;
318
319         nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE);
320         nfs_add_stats(inode, NFSIOS_WRITEPAGES, 1);
321
322         nfs_pageio_cond_complete(pgio, page->index);
323         ret = nfs_page_async_flush(pgio, page, wbc->sync_mode == WB_SYNC_NONE);
324         if (ret == -EAGAIN) {
325                 redirty_page_for_writepage(wbc, page);
326                 ret = 0;
327         }
328         return ret;
329 }
330
331 /*
332  * Write an mmapped page to the server.
333  */
334 static int nfs_writepage_locked(struct page *page, struct writeback_control *wbc)
335 {
336         struct nfs_pageio_descriptor pgio;
337         int err;
338
339         nfs_pageio_init_write(&pgio, page->mapping->host, wb_priority(wbc),
340                               &nfs_async_write_completion_ops);
341         err = nfs_do_writepage(page, wbc, &pgio);
342         nfs_pageio_complete(&pgio);
343         if (err < 0)
344                 return err;
345         if (pgio.pg_error < 0)
346                 return pgio.pg_error;
347         return 0;
348 }
349
350 int nfs_writepage(struct page *page, struct writeback_control *wbc)
351 {
352         int ret;
353
354         ret = nfs_writepage_locked(page, wbc);
355         unlock_page(page);
356         return ret;
357 }
358
359 static int nfs_writepages_callback(struct page *page, struct writeback_control *wbc, void *data)
360 {
361         int ret;
362
363         ret = nfs_do_writepage(page, wbc, data);
364         unlock_page(page);
365         return ret;
366 }
367
368 int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
369 {
370         struct inode *inode = mapping->host;
371         unsigned long *bitlock = &NFS_I(inode)->flags;
372         struct nfs_pageio_descriptor pgio;
373         int err;
374
375         /* Stop dirtying of new pages while we sync */
376         err = wait_on_bit_lock(bitlock, NFS_INO_FLUSHING,
377                         nfs_wait_bit_killable, TASK_KILLABLE);
378         if (err)
379                 goto out_err;
380
381         nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGES);
382
383         nfs_pageio_init_write(&pgio, inode, wb_priority(wbc),
384                               &nfs_async_write_completion_ops);
385         err = write_cache_pages(mapping, wbc, nfs_writepages_callback, &pgio);
386         nfs_pageio_complete(&pgio);
387
388         clear_bit_unlock(NFS_INO_FLUSHING, bitlock);
389         smp_mb__after_clear_bit();
390         wake_up_bit(bitlock, NFS_INO_FLUSHING);
391
392         if (err < 0)
393                 goto out_err;
394         err = pgio.pg_error;
395         if (err < 0)
396                 goto out_err;
397         return 0;
398 out_err:
399         return err;
400 }
401
402 /*
403  * Insert a write request into an inode
404  */
405 static void nfs_inode_add_request(struct inode *inode, struct nfs_page *req)
406 {
407         struct nfs_inode *nfsi = NFS_I(inode);
408
409         /* Lock the request! */
410         nfs_lock_request(req);
411
412         spin_lock(&inode->i_lock);
413         if (!nfsi->npages && nfs_have_delegation(inode, FMODE_WRITE))
414                 inode->i_version++;
415         set_bit(PG_MAPPED, &req->wb_flags);
416         SetPagePrivate(req->wb_page);
417         set_page_private(req->wb_page, (unsigned long)req);
418         nfsi->npages++;
419         kref_get(&req->wb_kref);
420         spin_unlock(&inode->i_lock);
421 }
422
423 /*
424  * Remove a write request from an inode
425  */
426 static void nfs_inode_remove_request(struct nfs_page *req)
427 {
428         struct inode *inode = req->wb_context->dentry->d_inode;
429         struct nfs_inode *nfsi = NFS_I(inode);
430
431         BUG_ON (!NFS_WBACK_BUSY(req));
432
433         spin_lock(&inode->i_lock);
434         set_page_private(req->wb_page, 0);
435         ClearPagePrivate(req->wb_page);
436         clear_bit(PG_MAPPED, &req->wb_flags);
437         nfsi->npages--;
438         spin_unlock(&inode->i_lock);
439         nfs_release_request(req);
440 }
441
442 static void
443 nfs_mark_request_dirty(struct nfs_page *req)
444 {
445         __set_page_dirty_nobuffers(req->wb_page);
446 }
447
448 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
449 /**
450  * nfs_request_add_commit_list - add request to a commit list
451  * @req: pointer to a struct nfs_page
452  * @dst: commit list head
453  * @cinfo: holds list lock and accounting info
454  *
455  * This sets the PG_CLEAN bit, updates the cinfo count of
456  * number of outstanding requests requiring a commit as well as
457  * the MM page stats.
458  *
459  * The caller must _not_ hold the cinfo->lock, but must be
460  * holding the nfs_page lock.
461  */
462 void
463 nfs_request_add_commit_list(struct nfs_page *req, struct list_head *dst,
464                             struct nfs_commit_info *cinfo)
465 {
466         set_bit(PG_CLEAN, &(req)->wb_flags);
467         spin_lock(cinfo->lock);
468         nfs_list_add_request(req, dst);
469         cinfo->mds->ncommit++;
470         spin_unlock(cinfo->lock);
471         if (!cinfo->dreq) {
472                 inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
473                 inc_bdi_stat(req->wb_page->mapping->backing_dev_info,
474                              BDI_RECLAIMABLE);
475                 __mark_inode_dirty(req->wb_context->dentry->d_inode,
476                                    I_DIRTY_DATASYNC);
477         }
478 }
479 EXPORT_SYMBOL_GPL(nfs_request_add_commit_list);
480
481 /**
482  * nfs_request_remove_commit_list - Remove request from a commit list
483  * @req: pointer to a nfs_page
484  * @cinfo: holds list lock and accounting info
485  *
486  * This clears the PG_CLEAN bit, and updates the cinfo's count of
487  * number of outstanding requests requiring a commit
488  * It does not update the MM page stats.
489  *
490  * The caller _must_ hold the cinfo->lock and the nfs_page lock.
491  */
492 void
493 nfs_request_remove_commit_list(struct nfs_page *req,
494                                struct nfs_commit_info *cinfo)
495 {
496         if (!test_and_clear_bit(PG_CLEAN, &(req)->wb_flags))
497                 return;
498         nfs_list_remove_request(req);
499         cinfo->mds->ncommit--;
500 }
501 EXPORT_SYMBOL_GPL(nfs_request_remove_commit_list);
502
503 static void nfs_init_cinfo_from_inode(struct nfs_commit_info *cinfo,
504                                       struct inode *inode)
505 {
506         cinfo->lock = &inode->i_lock;
507         cinfo->mds = &NFS_I(inode)->commit_info;
508         cinfo->ds = pnfs_get_ds_info(inode);
509         cinfo->dreq = NULL;
510         cinfo->completion_ops = &nfs_commit_completion_ops;
511 }
512
513 void nfs_init_cinfo(struct nfs_commit_info *cinfo,
514                     struct inode *inode,
515                     struct nfs_direct_req *dreq)
516 {
517         if (dreq)
518                 nfs_init_cinfo_from_dreq(cinfo, dreq);
519         else
520                 nfs_init_cinfo_from_inode(cinfo, inode);
521 }
522 EXPORT_SYMBOL_GPL(nfs_init_cinfo);
523
524 /*
525  * Add a request to the inode's commit list.
526  */
527 void
528 nfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg,
529                         struct nfs_commit_info *cinfo)
530 {
531         if (pnfs_mark_request_commit(req, lseg, cinfo))
532                 return;
533         nfs_request_add_commit_list(req, &cinfo->mds->list, cinfo);
534 }
535
536 static void
537 nfs_clear_page_commit(struct page *page)
538 {
539         dec_zone_page_state(page, NR_UNSTABLE_NFS);
540         dec_bdi_stat(page->mapping->backing_dev_info, BDI_RECLAIMABLE);
541 }
542
543 static void
544 nfs_clear_request_commit(struct nfs_page *req)
545 {
546         if (test_bit(PG_CLEAN, &req->wb_flags)) {
547                 struct inode *inode = req->wb_context->dentry->d_inode;
548                 struct nfs_commit_info cinfo;
549
550                 nfs_init_cinfo_from_inode(&cinfo, inode);
551                 if (!pnfs_clear_request_commit(req, &cinfo)) {
552                         spin_lock(cinfo.lock);
553                         nfs_request_remove_commit_list(req, &cinfo);
554                         spin_unlock(cinfo.lock);
555                 }
556                 nfs_clear_page_commit(req->wb_page);
557         }
558 }
559
560 static inline
561 int nfs_write_need_commit(struct nfs_write_data *data)
562 {
563         if (data->verf.committed == NFS_DATA_SYNC)
564                 return data->header->lseg == NULL;
565         return data->verf.committed != NFS_FILE_SYNC;
566 }
567
568 #else
569 static void nfs_init_cinfo_from_inode(struct nfs_commit_info *cinfo,
570                                       struct inode *inode)
571 {
572 }
573
574 void nfs_init_cinfo(struct nfs_commit_info *cinfo,
575                     struct inode *inode,
576                     struct nfs_direct_req *dreq)
577 {
578 }
579
580 void
581 nfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg,
582                         struct nfs_commit_info *cinfo)
583 {
584 }
585
586 static void
587 nfs_clear_request_commit(struct nfs_page *req)
588 {
589 }
590
591 static inline
592 int nfs_write_need_commit(struct nfs_write_data *data)
593 {
594         return 0;
595 }
596
597 #endif
598
599 static void nfs_write_completion(struct nfs_pgio_header *hdr)
600 {
601         struct nfs_commit_info cinfo;
602         unsigned long bytes = 0;
603
604         if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
605                 goto out;
606         nfs_init_cinfo_from_inode(&cinfo, hdr->inode);
607         while (!list_empty(&hdr->pages)) {
608                 struct nfs_page *req = nfs_list_entry(hdr->pages.next);
609
610                 bytes += req->wb_bytes;
611                 nfs_list_remove_request(req);
612                 if (test_bit(NFS_IOHDR_ERROR, &hdr->flags) &&
613                     (hdr->good_bytes < bytes)) {
614                         nfs_set_pageerror(req->wb_page);
615                         nfs_context_set_write_error(req->wb_context, hdr->error);
616                         goto remove_req;
617                 }
618                 if (test_bit(NFS_IOHDR_NEED_RESCHED, &hdr->flags)) {
619                         nfs_mark_request_dirty(req);
620                         goto next;
621                 }
622                 if (test_bit(NFS_IOHDR_NEED_COMMIT, &hdr->flags)) {
623                         memcpy(&req->wb_verf, hdr->verf, sizeof(req->wb_verf));
624                         nfs_mark_request_commit(req, hdr->lseg, &cinfo);
625                         goto next;
626                 }
627 remove_req:
628                 nfs_inode_remove_request(req);
629 next:
630                 nfs_unlock_request(req);
631                 nfs_end_page_writeback(req->wb_page);
632                 nfs_release_request(req);
633         }
634 out:
635         hdr->release(hdr);
636 }
637
638 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
639 static unsigned long
640 nfs_reqs_to_commit(struct nfs_commit_info *cinfo)
641 {
642         return cinfo->mds->ncommit;
643 }
644
645 /* cinfo->lock held by caller */
646 int
647 nfs_scan_commit_list(struct list_head *src, struct list_head *dst,
648                      struct nfs_commit_info *cinfo, int max)
649 {
650         struct nfs_page *req, *tmp;
651         int ret = 0;
652
653         list_for_each_entry_safe(req, tmp, src, wb_list) {
654                 if (!nfs_lock_request(req))
655                         continue;
656                 kref_get(&req->wb_kref);
657                 if (cond_resched_lock(cinfo->lock))
658                         list_safe_reset_next(req, tmp, wb_list);
659                 nfs_request_remove_commit_list(req, cinfo);
660                 nfs_list_add_request(req, dst);
661                 ret++;
662                 if ((ret == max) && !cinfo->dreq)
663                         break;
664         }
665         return ret;
666 }
667
668 /*
669  * nfs_scan_commit - Scan an inode for commit requests
670  * @inode: NFS inode to scan
671  * @dst: mds destination list
672  * @cinfo: mds and ds lists of reqs ready to commit
673  *
674  * Moves requests from the inode's 'commit' request list.
675  * The requests are *not* checked to ensure that they form a contiguous set.
676  */
677 int
678 nfs_scan_commit(struct inode *inode, struct list_head *dst,
679                 struct nfs_commit_info *cinfo)
680 {
681         int ret = 0;
682
683         spin_lock(cinfo->lock);
684         if (cinfo->mds->ncommit > 0) {
685                 const int max = INT_MAX;
686
687                 ret = nfs_scan_commit_list(&cinfo->mds->list, dst,
688                                            cinfo, max);
689                 ret += pnfs_scan_commit_lists(inode, cinfo, max - ret);
690         }
691         spin_unlock(cinfo->lock);
692         return ret;
693 }
694
695 #else
696 static unsigned long nfs_reqs_to_commit(struct nfs_commit_info *cinfo)
697 {
698         return 0;
699 }
700
701 int nfs_scan_commit(struct inode *inode, struct list_head *dst,
702                     struct nfs_commit_info *cinfo)
703 {
704         return 0;
705 }
706 #endif
707
708 /*
709  * Search for an existing write request, and attempt to update
710  * it to reflect a new dirty region on a given page.
711  *
712  * If the attempt fails, then the existing request is flushed out
713  * to disk.
714  */
715 static struct nfs_page *nfs_try_to_update_request(struct inode *inode,
716                 struct page *page,
717                 unsigned int offset,
718                 unsigned int bytes)
719 {
720         struct nfs_page *req;
721         unsigned int rqend;
722         unsigned int end;
723         int error;
724
725         if (!PagePrivate(page))
726                 return NULL;
727
728         end = offset + bytes;
729         spin_lock(&inode->i_lock);
730
731         for (;;) {
732                 req = nfs_page_find_request_locked(page);
733                 if (req == NULL)
734                         goto out_unlock;
735
736                 rqend = req->wb_offset + req->wb_bytes;
737                 /*
738                  * Tell the caller to flush out the request if
739                  * the offsets are non-contiguous.
740                  * Note: nfs_flush_incompatible() will already
741                  * have flushed out requests having wrong owners.
742                  */
743                 if (offset > rqend
744                     || end < req->wb_offset)
745                         goto out_flushme;
746
747                 if (nfs_lock_request(req))
748                         break;
749
750                 /* The request is locked, so wait and then retry */
751                 spin_unlock(&inode->i_lock);
752                 error = nfs_wait_on_request(req);
753                 nfs_release_request(req);
754                 if (error != 0)
755                         goto out_err;
756                 spin_lock(&inode->i_lock);
757         }
758
759         /* Okay, the request matches. Update the region */
760         if (offset < req->wb_offset) {
761                 req->wb_offset = offset;
762                 req->wb_pgbase = offset;
763         }
764         if (end > rqend)
765                 req->wb_bytes = end - req->wb_offset;
766         else
767                 req->wb_bytes = rqend - req->wb_offset;
768 out_unlock:
769         spin_unlock(&inode->i_lock);
770         if (req)
771                 nfs_clear_request_commit(req);
772         return req;
773 out_flushme:
774         spin_unlock(&inode->i_lock);
775         nfs_release_request(req);
776         error = nfs_wb_page(inode, page);
777 out_err:
778         return ERR_PTR(error);
779 }
780
781 /*
782  * Try to update an existing write request, or create one if there is none.
783  *
784  * Note: Should always be called with the Page Lock held to prevent races
785  * if we have to add a new request. Also assumes that the caller has
786  * already called nfs_flush_incompatible() if necessary.
787  */
788 static struct nfs_page * nfs_setup_write_request(struct nfs_open_context* ctx,
789                 struct page *page, unsigned int offset, unsigned int bytes)
790 {
791         struct inode *inode = page->mapping->host;
792         struct nfs_page *req;
793
794         req = nfs_try_to_update_request(inode, page, offset, bytes);
795         if (req != NULL)
796                 goto out;
797         req = nfs_create_request(ctx, inode, page, offset, bytes);
798         if (IS_ERR(req))
799                 goto out;
800         nfs_inode_add_request(inode, req);
801 out:
802         return req;
803 }
804
805 static int nfs_writepage_setup(struct nfs_open_context *ctx, struct page *page,
806                 unsigned int offset, unsigned int count)
807 {
808         struct nfs_page *req;
809
810         req = nfs_setup_write_request(ctx, page, offset, count);
811         if (IS_ERR(req))
812                 return PTR_ERR(req);
813         /* Update file length */
814         nfs_grow_file(page, offset, count);
815         nfs_mark_uptodate(page, req->wb_pgbase, req->wb_bytes);
816         nfs_mark_request_dirty(req);
817         nfs_unlock_and_release_request(req);
818         return 0;
819 }
820
821 int nfs_flush_incompatible(struct file *file, struct page *page)
822 {
823         struct nfs_open_context *ctx = nfs_file_open_context(file);
824         struct nfs_page *req;
825         int do_flush, status;
826         /*
827          * Look for a request corresponding to this page. If there
828          * is one, and it belongs to another file, we flush it out
829          * before we try to copy anything into the page. Do this
830          * due to the lack of an ACCESS-type call in NFSv2.
831          * Also do the same if we find a request from an existing
832          * dropped page.
833          */
834         do {
835                 req = nfs_page_find_request(page);
836                 if (req == NULL)
837                         return 0;
838                 do_flush = req->wb_page != page || req->wb_context != ctx ||
839                         req->wb_lock_context->lockowner != current->files ||
840                         req->wb_lock_context->pid != current->tgid;
841                 nfs_release_request(req);
842                 if (!do_flush)
843                         return 0;
844                 status = nfs_wb_page(page->mapping->host, page);
845         } while (status == 0);
846         return status;
847 }
848
849 /*
850  * If the page cache is marked as unsafe or invalid, then we can't rely on
851  * the PageUptodate() flag. In this case, we will need to turn off
852  * write optimisations that depend on the page contents being correct.
853  */
854 static bool nfs_write_pageuptodate(struct page *page, struct inode *inode)
855 {
856         if (nfs_have_delegated_attributes(inode))
857                 goto out;
858         if (NFS_I(inode)->cache_validity & NFS_INO_REVAL_PAGECACHE)
859                 return false;
860 out:
861         return PageUptodate(page) != 0;
862 }
863
864 /*
865  * Update and possibly write a cached page of an NFS file.
866  *
867  * XXX: Keep an eye on generic_file_read to make sure it doesn't do bad
868  * things with a page scheduled for an RPC call (e.g. invalidate it).
869  */
870 int nfs_updatepage(struct file *file, struct page *page,
871                 unsigned int offset, unsigned int count)
872 {
873         struct nfs_open_context *ctx = nfs_file_open_context(file);
874         struct inode    *inode = page->mapping->host;
875         int             status = 0;
876
877         nfs_inc_stats(inode, NFSIOS_VFSUPDATEPAGE);
878
879         dprintk("NFS:       nfs_updatepage(%s/%s %d@%lld)\n",
880                 file->f_path.dentry->d_parent->d_name.name,
881                 file->f_path.dentry->d_name.name, count,
882                 (long long)(page_offset(page) + offset));
883
884         /* If we're not using byte range locks, and we know the page
885          * is up to date, it may be more efficient to extend the write
886          * to cover the entire page in order to avoid fragmentation
887          * inefficiencies.
888          */
889         if (nfs_write_pageuptodate(page, inode) &&
890                         inode->i_flock == NULL &&
891                         !(file->f_flags & O_DSYNC)) {
892                 count = max(count + offset, nfs_page_length(page));
893                 offset = 0;
894         }
895
896         status = nfs_writepage_setup(ctx, page, offset, count);
897         if (status < 0)
898                 nfs_set_pageerror(page);
899         else
900                 __set_page_dirty_nobuffers(page);
901
902         dprintk("NFS:       nfs_updatepage returns %d (isize %lld)\n",
903                         status, (long long)i_size_read(inode));
904         return status;
905 }
906
907 static int flush_task_priority(int how)
908 {
909         switch (how & (FLUSH_HIGHPRI|FLUSH_LOWPRI)) {
910                 case FLUSH_HIGHPRI:
911                         return RPC_PRIORITY_HIGH;
912                 case FLUSH_LOWPRI:
913                         return RPC_PRIORITY_LOW;
914         }
915         return RPC_PRIORITY_NORMAL;
916 }
917
918 int nfs_initiate_write(struct rpc_clnt *clnt,
919                        struct nfs_write_data *data,
920                        const struct rpc_call_ops *call_ops,
921                        int how, int flags)
922 {
923         struct inode *inode = data->header->inode;
924         int priority = flush_task_priority(how);
925         struct rpc_task *task;
926         struct rpc_message msg = {
927                 .rpc_argp = &data->args,
928                 .rpc_resp = &data->res,
929                 .rpc_cred = data->header->cred,
930         };
931         struct rpc_task_setup task_setup_data = {
932                 .rpc_client = clnt,
933                 .task = &data->task,
934                 .rpc_message = &msg,
935                 .callback_ops = call_ops,
936                 .callback_data = data,
937                 .workqueue = nfsiod_workqueue,
938                 .flags = RPC_TASK_ASYNC | flags,
939                 .priority = priority,
940         };
941         int ret = 0;
942
943         /* Set up the initial task struct.  */
944         NFS_PROTO(inode)->write_setup(data, &msg);
945
946         dprintk("NFS: %5u initiated write call "
947                 "(req %s/%lld, %u bytes @ offset %llu)\n",
948                 data->task.tk_pid,
949                 inode->i_sb->s_id,
950                 (long long)NFS_FILEID(inode),
951                 data->args.count,
952                 (unsigned long long)data->args.offset);
953
954         task = rpc_run_task(&task_setup_data);
955         if (IS_ERR(task)) {
956                 ret = PTR_ERR(task);
957                 goto out;
958         }
959         if (how & FLUSH_SYNC) {
960                 ret = rpc_wait_for_completion_task(task);
961                 if (ret == 0)
962                         ret = task->tk_status;
963         }
964         rpc_put_task(task);
965 out:
966         return ret;
967 }
968 EXPORT_SYMBOL_GPL(nfs_initiate_write);
969
970 /*
971  * Set up the argument/result storage required for the RPC call.
972  */
973 static void nfs_write_rpcsetup(struct nfs_write_data *data,
974                 unsigned int count, unsigned int offset,
975                 int how, struct nfs_commit_info *cinfo)
976 {
977         struct nfs_page *req = data->header->req;
978
979         /* Set up the RPC argument and reply structs
980          * NB: take care not to mess about with data->commit et al. */
981
982         data->args.fh     = NFS_FH(data->header->inode);
983         data->args.offset = req_offset(req) + offset;
984         /* pnfs_set_layoutcommit needs this */
985         data->mds_offset = data->args.offset;
986         data->args.pgbase = req->wb_pgbase + offset;
987         data->args.pages  = data->pages.pagevec;
988         data->args.count  = count;
989         data->args.context = get_nfs_open_context(req->wb_context);
990         data->args.lock_context = req->wb_lock_context;
991         data->args.stable  = NFS_UNSTABLE;
992         switch (how & (FLUSH_STABLE | FLUSH_COND_STABLE)) {
993         case 0:
994                 break;
995         case FLUSH_COND_STABLE:
996                 if (nfs_reqs_to_commit(cinfo))
997                         break;
998         default:
999                 data->args.stable = NFS_FILE_SYNC;
1000         }
1001
1002         data->res.fattr   = &data->fattr;
1003         data->res.count   = count;
1004         data->res.verf    = &data->verf;
1005         nfs_fattr_init(&data->fattr);
1006 }
1007
1008 static int nfs_do_write(struct nfs_write_data *data,
1009                 const struct rpc_call_ops *call_ops,
1010                 int how)
1011 {
1012         struct inode *inode = data->header->inode;
1013
1014         return nfs_initiate_write(NFS_CLIENT(inode), data, call_ops, how, 0);
1015 }
1016
1017 static int nfs_do_multiple_writes(struct list_head *head,
1018                 const struct rpc_call_ops *call_ops,
1019                 int how)
1020 {
1021         struct nfs_write_data *data;
1022         int ret = 0;
1023
1024         while (!list_empty(head)) {
1025                 int ret2;
1026
1027                 data = list_first_entry(head, struct nfs_write_data, list);
1028                 list_del_init(&data->list);
1029                 
1030                 ret2 = nfs_do_write(data, call_ops, how);
1031                  if (ret == 0)
1032                          ret = ret2;
1033         }
1034         return ret;
1035 }
1036
1037 /* If a nfs_flush_* function fails, it should remove reqs from @head and
1038  * call this on each, which will prepare them to be retried on next
1039  * writeback using standard nfs.
1040  */
1041 static void nfs_redirty_request(struct nfs_page *req)
1042 {
1043         nfs_mark_request_dirty(req);
1044         nfs_unlock_request(req);
1045         nfs_end_page_writeback(req->wb_page);
1046         nfs_release_request(req);
1047 }
1048
1049 static void nfs_async_write_error(struct list_head *head)
1050 {
1051         struct nfs_page *req;
1052
1053         while (!list_empty(head)) {
1054                 req = nfs_list_entry(head->next);
1055                 nfs_list_remove_request(req);
1056                 nfs_redirty_request(req);
1057         }
1058 }
1059
1060 static const struct nfs_pgio_completion_ops nfs_async_write_completion_ops = {
1061         .error_cleanup = nfs_async_write_error,
1062         .completion = nfs_write_completion,
1063 };
1064
1065 static void nfs_flush_error(struct nfs_pageio_descriptor *desc,
1066                 struct nfs_pgio_header *hdr)
1067 {
1068         set_bit(NFS_IOHDR_REDO, &hdr->flags);
1069         while (!list_empty(&hdr->rpc_list)) {
1070                 struct nfs_write_data *data = list_first_entry(&hdr->rpc_list,
1071                                 struct nfs_write_data, list);
1072                 list_del(&data->list);
1073                 nfs_writedata_release(data);
1074         }
1075         desc->pg_completion_ops->error_cleanup(&desc->pg_list);
1076 }
1077
1078 /*
1079  * Generate multiple small requests to write out a single
1080  * contiguous dirty area on one page.
1081  */
1082 static int nfs_flush_multi(struct nfs_pageio_descriptor *desc,
1083                            struct nfs_pgio_header *hdr)
1084 {
1085         struct nfs_page *req = hdr->req;
1086         struct page *page = req->wb_page;
1087         struct nfs_write_data *data;
1088         size_t wsize = desc->pg_bsize, nbytes;
1089         unsigned int offset;
1090         int requests = 0;
1091         struct nfs_commit_info cinfo;
1092
1093         nfs_init_cinfo(&cinfo, desc->pg_inode, desc->pg_dreq);
1094
1095         if ((desc->pg_ioflags & FLUSH_COND_STABLE) &&
1096             (desc->pg_moreio || nfs_reqs_to_commit(&cinfo) ||
1097              desc->pg_count > wsize))
1098                 desc->pg_ioflags &= ~FLUSH_COND_STABLE;
1099
1100
1101         offset = 0;
1102         nbytes = desc->pg_count;
1103         do {
1104                 size_t len = min(nbytes, wsize);
1105
1106                 data = nfs_writedata_alloc(hdr, 1);
1107                 if (!data) {
1108                         nfs_flush_error(desc, hdr);
1109                         return -ENOMEM;
1110                 }
1111                 data->pages.pagevec[0] = page;
1112                 nfs_write_rpcsetup(data, len, offset, desc->pg_ioflags, &cinfo);
1113                 list_add(&data->list, &hdr->rpc_list);
1114                 requests++;
1115                 nbytes -= len;
1116                 offset += len;
1117         } while (nbytes != 0);
1118         nfs_list_remove_request(req);
1119         nfs_list_add_request(req, &hdr->pages);
1120         desc->pg_rpc_callops = &nfs_write_common_ops;
1121         return 0;
1122 }
1123
1124 /*
1125  * Create an RPC task for the given write request and kick it.
1126  * The page must have been locked by the caller.
1127  *
1128  * It may happen that the page we're passed is not marked dirty.
1129  * This is the case if nfs_updatepage detects a conflicting request
1130  * that has been written but not committed.
1131  */
1132 static int nfs_flush_one(struct nfs_pageio_descriptor *desc,
1133                          struct nfs_pgio_header *hdr)
1134 {
1135         struct nfs_page         *req;
1136         struct page             **pages;
1137         struct nfs_write_data   *data;
1138         struct list_head *head = &desc->pg_list;
1139         struct nfs_commit_info cinfo;
1140
1141         data = nfs_writedata_alloc(hdr, nfs_page_array_len(desc->pg_base,
1142                                                            desc->pg_count));
1143         if (!data) {
1144                 nfs_flush_error(desc, hdr);
1145                 return -ENOMEM;
1146         }
1147
1148         nfs_init_cinfo(&cinfo, desc->pg_inode, desc->pg_dreq);
1149         pages = data->pages.pagevec;
1150         while (!list_empty(head)) {
1151                 req = nfs_list_entry(head->next);
1152                 nfs_list_remove_request(req);
1153                 nfs_list_add_request(req, &hdr->pages);
1154                 *pages++ = req->wb_page;
1155         }
1156
1157         if ((desc->pg_ioflags & FLUSH_COND_STABLE) &&
1158             (desc->pg_moreio || nfs_reqs_to_commit(&cinfo)))
1159                 desc->pg_ioflags &= ~FLUSH_COND_STABLE;
1160
1161         /* Set up the argument struct */
1162         nfs_write_rpcsetup(data, desc->pg_count, 0, desc->pg_ioflags, &cinfo);
1163         list_add(&data->list, &hdr->rpc_list);
1164         desc->pg_rpc_callops = &nfs_write_common_ops;
1165         return 0;
1166 }
1167
1168 int nfs_generic_flush(struct nfs_pageio_descriptor *desc,
1169                       struct nfs_pgio_header *hdr)
1170 {
1171         if (desc->pg_bsize < PAGE_CACHE_SIZE)
1172                 return nfs_flush_multi(desc, hdr);
1173         return nfs_flush_one(desc, hdr);
1174 }
1175
1176 static int nfs_generic_pg_writepages(struct nfs_pageio_descriptor *desc)
1177 {
1178         struct nfs_write_header *whdr;
1179         struct nfs_pgio_header *hdr;
1180         int ret;
1181
1182         whdr = nfs_writehdr_alloc();
1183         if (!whdr) {
1184                 desc->pg_completion_ops->error_cleanup(&desc->pg_list);
1185                 return -ENOMEM;
1186         }
1187         hdr = &whdr->header;
1188         nfs_pgheader_init(desc, hdr, nfs_writehdr_free);
1189         atomic_inc(&hdr->refcnt);
1190         ret = nfs_generic_flush(desc, hdr);
1191         if (ret == 0)
1192                 ret = nfs_do_multiple_writes(&hdr->rpc_list,
1193                                              desc->pg_rpc_callops,
1194                                              desc->pg_ioflags);
1195         if (atomic_dec_and_test(&hdr->refcnt))
1196                 hdr->completion_ops->completion(hdr);
1197         return ret;
1198 }
1199
1200 static const struct nfs_pageio_ops nfs_pageio_write_ops = {
1201         .pg_test = nfs_generic_pg_test,
1202         .pg_doio = nfs_generic_pg_writepages,
1203 };
1204
1205 void nfs_pageio_init_write_mds(struct nfs_pageio_descriptor *pgio,
1206                                struct inode *inode, int ioflags,
1207                                const struct nfs_pgio_completion_ops *compl_ops)
1208 {
1209         nfs_pageio_init(pgio, inode, &nfs_pageio_write_ops, compl_ops,
1210                                 NFS_SERVER(inode)->wsize, ioflags);
1211 }
1212
1213 void nfs_pageio_reset_write_mds(struct nfs_pageio_descriptor *pgio)
1214 {
1215         pgio->pg_ops = &nfs_pageio_write_ops;
1216         pgio->pg_bsize = NFS_SERVER(pgio->pg_inode)->wsize;
1217 }
1218 EXPORT_SYMBOL_GPL(nfs_pageio_reset_write_mds);
1219
1220 void nfs_pageio_init_write(struct nfs_pageio_descriptor *pgio,
1221                            struct inode *inode, int ioflags,
1222                            const struct nfs_pgio_completion_ops *compl_ops)
1223 {
1224         if (!pnfs_pageio_init_write(pgio, inode, ioflags, compl_ops))
1225                 nfs_pageio_init_write_mds(pgio, inode, ioflags, compl_ops);
1226 }
1227
1228 void nfs_write_prepare(struct rpc_task *task, void *calldata)
1229 {
1230         struct nfs_write_data *data = calldata;
1231         NFS_PROTO(data->header->inode)->write_rpc_prepare(task, data);
1232 }
1233
1234 void nfs_commit_prepare(struct rpc_task *task, void *calldata)
1235 {
1236         struct nfs_commit_data *data = calldata;
1237
1238         NFS_PROTO(data->inode)->commit_rpc_prepare(task, data);
1239 }
1240
1241 /*
1242  * Handle a write reply that flushes a whole page.
1243  *
1244  * FIXME: There is an inherent race with invalidate_inode_pages and
1245  *        writebacks since the page->count is kept > 1 for as long
1246  *        as the page has a write request pending.
1247  */
1248 static void nfs_writeback_done_common(struct rpc_task *task, void *calldata)
1249 {
1250         struct nfs_write_data   *data = calldata;
1251
1252         nfs_writeback_done(task, data);
1253 }
1254
1255 static void nfs_writeback_release_common(void *calldata)
1256 {
1257         struct nfs_write_data   *data = calldata;
1258         struct nfs_pgio_header *hdr = data->header;
1259         int status = data->task.tk_status;
1260
1261         if ((status >= 0) && nfs_write_need_commit(data)) {
1262                 spin_lock(&hdr->lock);
1263                 if (test_bit(NFS_IOHDR_NEED_RESCHED, &hdr->flags))
1264                         ; /* Do nothing */
1265                 else if (!test_and_set_bit(NFS_IOHDR_NEED_COMMIT, &hdr->flags))
1266                         memcpy(hdr->verf, &data->verf, sizeof(*hdr->verf));
1267                 else if (memcmp(hdr->verf, &data->verf, sizeof(*hdr->verf)))
1268                         set_bit(NFS_IOHDR_NEED_RESCHED, &hdr->flags);
1269                 spin_unlock(&hdr->lock);
1270         }
1271         nfs_writedata_release(data);
1272 }
1273
1274 static const struct rpc_call_ops nfs_write_common_ops = {
1275         .rpc_call_prepare = nfs_write_prepare,
1276         .rpc_call_done = nfs_writeback_done_common,
1277         .rpc_release = nfs_writeback_release_common,
1278 };
1279
1280
1281 /*
1282  * This function is called when the WRITE call is complete.
1283  */
1284 void nfs_writeback_done(struct rpc_task *task, struct nfs_write_data *data)
1285 {
1286         struct nfs_writeargs    *argp = &data->args;
1287         struct nfs_writeres     *resp = &data->res;
1288         struct inode            *inode = data->header->inode;
1289         int status;
1290
1291         dprintk("NFS: %5u nfs_writeback_done (status %d)\n",
1292                 task->tk_pid, task->tk_status);
1293
1294         /*
1295          * ->write_done will attempt to use post-op attributes to detect
1296          * conflicting writes by other clients.  A strict interpretation
1297          * of close-to-open would allow us to continue caching even if
1298          * another writer had changed the file, but some applications
1299          * depend on tighter cache coherency when writing.
1300          */
1301         status = NFS_PROTO(inode)->write_done(task, data);
1302         if (status != 0)
1303                 return;
1304         nfs_add_stats(inode, NFSIOS_SERVERWRITTENBYTES, resp->count);
1305
1306 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
1307         if (resp->verf->committed < argp->stable && task->tk_status >= 0) {
1308                 /* We tried a write call, but the server did not
1309                  * commit data to stable storage even though we
1310                  * requested it.
1311                  * Note: There is a known bug in Tru64 < 5.0 in which
1312                  *       the server reports NFS_DATA_SYNC, but performs
1313                  *       NFS_FILE_SYNC. We therefore implement this checking
1314                  *       as a dprintk() in order to avoid filling syslog.
1315                  */
1316                 static unsigned long    complain;
1317
1318                 /* Note this will print the MDS for a DS write */
1319                 if (time_before(complain, jiffies)) {
1320                         dprintk("NFS:       faulty NFS server %s:"
1321                                 " (committed = %d) != (stable = %d)\n",
1322                                 NFS_SERVER(inode)->nfs_client->cl_hostname,
1323                                 resp->verf->committed, argp->stable);
1324                         complain = jiffies + 300 * HZ;
1325                 }
1326         }
1327 #endif
1328         if (task->tk_status < 0)
1329                 nfs_set_pgio_error(data->header, task->tk_status, argp->offset);
1330         else if (resp->count < argp->count) {
1331                 static unsigned long    complain;
1332
1333                 /* This a short write! */
1334                 nfs_inc_stats(inode, NFSIOS_SHORTWRITE);
1335
1336                 /* Has the server at least made some progress? */
1337                 if (resp->count == 0) {
1338                         if (time_before(complain, jiffies)) {
1339                                 printk(KERN_WARNING
1340                                        "NFS: Server wrote zero bytes, expected %u.\n",
1341                                        argp->count);
1342                                 complain = jiffies + 300 * HZ;
1343                         }
1344                         nfs_set_pgio_error(data->header, -EIO, argp->offset);
1345                         task->tk_status = -EIO;
1346                         return;
1347                 }
1348                 /* Was this an NFSv2 write or an NFSv3 stable write? */
1349                 if (resp->verf->committed != NFS_UNSTABLE) {
1350                         /* Resend from where the server left off */
1351                         data->mds_offset += resp->count;
1352                         argp->offset += resp->count;
1353                         argp->pgbase += resp->count;
1354                         argp->count -= resp->count;
1355                 } else {
1356                         /* Resend as a stable write in order to avoid
1357                          * headaches in the case of a server crash.
1358                          */
1359                         argp->stable = NFS_FILE_SYNC;
1360                 }
1361                 rpc_restart_call_prepare(task);
1362         }
1363 }
1364
1365
1366 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
1367 static int nfs_commit_set_lock(struct nfs_inode *nfsi, int may_wait)
1368 {
1369         int ret;
1370
1371         if (!test_and_set_bit(NFS_INO_COMMIT, &nfsi->flags))
1372                 return 1;
1373         if (!may_wait)
1374                 return 0;
1375         ret = out_of_line_wait_on_bit_lock(&nfsi->flags,
1376                                 NFS_INO_COMMIT,
1377                                 nfs_wait_bit_killable,
1378                                 TASK_KILLABLE);
1379         return (ret < 0) ? ret : 1;
1380 }
1381
1382 static void nfs_commit_clear_lock(struct nfs_inode *nfsi)
1383 {
1384         clear_bit(NFS_INO_COMMIT, &nfsi->flags);
1385         smp_mb__after_clear_bit();
1386         wake_up_bit(&nfsi->flags, NFS_INO_COMMIT);
1387 }
1388
1389 void nfs_commitdata_release(struct nfs_commit_data *data)
1390 {
1391         put_nfs_open_context(data->context);
1392         nfs_commit_free(data);
1393 }
1394 EXPORT_SYMBOL_GPL(nfs_commitdata_release);
1395
1396 int nfs_initiate_commit(struct rpc_clnt *clnt, struct nfs_commit_data *data,
1397                         const struct rpc_call_ops *call_ops,
1398                         int how, int flags)
1399 {
1400         struct rpc_task *task;
1401         int priority = flush_task_priority(how);
1402         struct rpc_message msg = {
1403                 .rpc_argp = &data->args,
1404                 .rpc_resp = &data->res,
1405                 .rpc_cred = data->cred,
1406         };
1407         struct rpc_task_setup task_setup_data = {
1408                 .task = &data->task,
1409                 .rpc_client = clnt,
1410                 .rpc_message = &msg,
1411                 .callback_ops = call_ops,
1412                 .callback_data = data,
1413                 .workqueue = nfsiod_workqueue,
1414                 .flags = RPC_TASK_ASYNC | flags,
1415                 .priority = priority,
1416         };
1417         /* Set up the initial task struct.  */
1418         NFS_PROTO(data->inode)->commit_setup(data, &msg);
1419
1420         dprintk("NFS: %5u initiated commit call\n", data->task.tk_pid);
1421
1422         task = rpc_run_task(&task_setup_data);
1423         if (IS_ERR(task))
1424                 return PTR_ERR(task);
1425         if (how & FLUSH_SYNC)
1426                 rpc_wait_for_completion_task(task);
1427         rpc_put_task(task);
1428         return 0;
1429 }
1430 EXPORT_SYMBOL_GPL(nfs_initiate_commit);
1431
1432 /*
1433  * Set up the argument/result storage required for the RPC call.
1434  */
1435 void nfs_init_commit(struct nfs_commit_data *data,
1436                      struct list_head *head,
1437                      struct pnfs_layout_segment *lseg,
1438                      struct nfs_commit_info *cinfo)
1439 {
1440         struct nfs_page *first = nfs_list_entry(head->next);
1441         struct inode *inode = first->wb_context->dentry->d_inode;
1442
1443         /* Set up the RPC argument and reply structs
1444          * NB: take care not to mess about with data->commit et al. */
1445
1446         list_splice_init(head, &data->pages);
1447
1448         data->inode       = inode;
1449         data->cred        = first->wb_context->cred;
1450         data->lseg        = lseg; /* reference transferred */
1451         data->mds_ops     = &nfs_commit_ops;
1452         data->completion_ops = cinfo->completion_ops;
1453         data->dreq        = cinfo->dreq;
1454
1455         data->args.fh     = NFS_FH(data->inode);
1456         /* Note: we always request a commit of the entire inode */
1457         data->args.offset = 0;
1458         data->args.count  = 0;
1459         data->context     = get_nfs_open_context(first->wb_context);
1460         data->res.fattr   = &data->fattr;
1461         data->res.verf    = &data->verf;
1462         nfs_fattr_init(&data->fattr);
1463 }
1464 EXPORT_SYMBOL_GPL(nfs_init_commit);
1465
1466 void nfs_retry_commit(struct list_head *page_list,
1467                       struct pnfs_layout_segment *lseg,
1468                       struct nfs_commit_info *cinfo)
1469 {
1470         struct nfs_page *req;
1471
1472         while (!list_empty(page_list)) {
1473                 req = nfs_list_entry(page_list->next);
1474                 nfs_list_remove_request(req);
1475                 nfs_mark_request_commit(req, lseg, cinfo);
1476                 if (!cinfo->dreq) {
1477                         dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
1478                         dec_bdi_stat(req->wb_page->mapping->backing_dev_info,
1479                                      BDI_RECLAIMABLE);
1480                 }
1481                 nfs_unlock_and_release_request(req);
1482         }
1483 }
1484 EXPORT_SYMBOL_GPL(nfs_retry_commit);
1485
1486 /*
1487  * Commit dirty pages
1488  */
1489 static int
1490 nfs_commit_list(struct inode *inode, struct list_head *head, int how,
1491                 struct nfs_commit_info *cinfo)
1492 {
1493         struct nfs_commit_data  *data;
1494
1495         data = nfs_commitdata_alloc();
1496
1497         if (!data)
1498                 goto out_bad;
1499
1500         /* Set up the argument struct */
1501         nfs_init_commit(data, head, NULL, cinfo);
1502         atomic_inc(&cinfo->mds->rpcs_out);
1503         return nfs_initiate_commit(NFS_CLIENT(inode), data, data->mds_ops,
1504                                    how, 0);
1505  out_bad:
1506         nfs_retry_commit(head, NULL, cinfo);
1507         cinfo->completion_ops->error_cleanup(NFS_I(inode));
1508         return -ENOMEM;
1509 }
1510
1511 /*
1512  * COMMIT call returned
1513  */
1514 static void nfs_commit_done(struct rpc_task *task, void *calldata)
1515 {
1516         struct nfs_commit_data  *data = calldata;
1517
1518         dprintk("NFS: %5u nfs_commit_done (status %d)\n",
1519                                 task->tk_pid, task->tk_status);
1520
1521         /* Call the NFS version-specific code */
1522         NFS_PROTO(data->inode)->commit_done(task, data);
1523 }
1524
1525 static void nfs_commit_release_pages(struct nfs_commit_data *data)
1526 {
1527         struct nfs_page *req;
1528         int status = data->task.tk_status;
1529         struct nfs_commit_info cinfo;
1530
1531         while (!list_empty(&data->pages)) {
1532                 req = nfs_list_entry(data->pages.next);
1533                 nfs_list_remove_request(req);
1534                 nfs_clear_page_commit(req->wb_page);
1535
1536                 dprintk("NFS:       commit (%s/%lld %d@%lld)",
1537                         req->wb_context->dentry->d_sb->s_id,
1538                         (long long)NFS_FILEID(req->wb_context->dentry->d_inode),
1539                         req->wb_bytes,
1540                         (long long)req_offset(req));
1541                 if (status < 0) {
1542                         nfs_context_set_write_error(req->wb_context, status);
1543                         nfs_inode_remove_request(req);
1544                         dprintk(", error = %d\n", status);
1545                         goto next;
1546                 }
1547
1548                 /* Okay, COMMIT succeeded, apparently. Check the verifier
1549                  * returned by the server against all stored verfs. */
1550                 if (!memcmp(req->wb_verf.verifier, data->verf.verifier, sizeof(data->verf.verifier))) {
1551                         /* We have a match */
1552                         nfs_inode_remove_request(req);
1553                         dprintk(" OK\n");
1554                         goto next;
1555                 }
1556                 /* We have a mismatch. Write the page again */
1557                 dprintk(" mismatch\n");
1558                 nfs_mark_request_dirty(req);
1559         next:
1560                 nfs_unlock_and_release_request(req);
1561         }
1562         nfs_init_cinfo(&cinfo, data->inode, data->dreq);
1563         if (atomic_dec_and_test(&cinfo.mds->rpcs_out))
1564                 nfs_commit_clear_lock(NFS_I(data->inode));
1565 }
1566
1567 static void nfs_commit_release(void *calldata)
1568 {
1569         struct nfs_commit_data *data = calldata;
1570
1571         data->completion_ops->completion(data);
1572         nfs_commitdata_release(calldata);
1573 }
1574
1575 static const struct rpc_call_ops nfs_commit_ops = {
1576         .rpc_call_prepare = nfs_commit_prepare,
1577         .rpc_call_done = nfs_commit_done,
1578         .rpc_release = nfs_commit_release,
1579 };
1580
1581 static const struct nfs_commit_completion_ops nfs_commit_completion_ops = {
1582         .completion = nfs_commit_release_pages,
1583         .error_cleanup = nfs_commit_clear_lock,
1584 };
1585
1586 int nfs_generic_commit_list(struct inode *inode, struct list_head *head,
1587                             int how, struct nfs_commit_info *cinfo)
1588 {
1589         int status;
1590
1591         status = pnfs_commit_list(inode, head, how, cinfo);
1592         if (status == PNFS_NOT_ATTEMPTED)
1593                 status = nfs_commit_list(inode, head, how, cinfo);
1594         return status;
1595 }
1596
1597 int nfs_commit_inode(struct inode *inode, int how)
1598 {
1599         LIST_HEAD(head);
1600         struct nfs_commit_info cinfo;
1601         int may_wait = how & FLUSH_SYNC;
1602         int res;
1603
1604         res = nfs_commit_set_lock(NFS_I(inode), may_wait);
1605         if (res <= 0)
1606                 goto out_mark_dirty;
1607         nfs_init_cinfo_from_inode(&cinfo, inode);
1608         res = nfs_scan_commit(inode, &head, &cinfo);
1609         if (res) {
1610                 int error;
1611
1612                 error = nfs_generic_commit_list(inode, &head, how, &cinfo);
1613                 if (error < 0)
1614                         return error;
1615                 if (!may_wait)
1616                         goto out_mark_dirty;
1617                 error = wait_on_bit(&NFS_I(inode)->flags,
1618                                 NFS_INO_COMMIT,
1619                                 nfs_wait_bit_killable,
1620                                 TASK_KILLABLE);
1621                 if (error < 0)
1622                         return error;
1623         } else
1624                 nfs_commit_clear_lock(NFS_I(inode));
1625         return res;
1626         /* Note: If we exit without ensuring that the commit is complete,
1627          * we must mark the inode as dirty. Otherwise, future calls to
1628          * sync_inode() with the WB_SYNC_ALL flag set will fail to ensure
1629          * that the data is on the disk.
1630          */
1631 out_mark_dirty:
1632         __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
1633         return res;
1634 }
1635
1636 static int nfs_commit_unstable_pages(struct inode *inode, struct writeback_control *wbc)
1637 {
1638         struct nfs_inode *nfsi = NFS_I(inode);
1639         int flags = FLUSH_SYNC;
1640         int ret = 0;
1641
1642         /* no commits means nothing needs to be done */
1643         if (!nfsi->commit_info.ncommit)
1644                 return ret;
1645
1646         if (wbc->sync_mode == WB_SYNC_NONE) {
1647                 /* Don't commit yet if this is a non-blocking flush and there
1648                  * are a lot of outstanding writes for this mapping.
1649                  */
1650                 if (nfsi->commit_info.ncommit <= (nfsi->npages >> 1))
1651                         goto out_mark_dirty;
1652
1653                 /* don't wait for the COMMIT response */
1654                 flags = 0;
1655         }
1656
1657         ret = nfs_commit_inode(inode, flags);
1658         if (ret >= 0) {
1659                 if (wbc->sync_mode == WB_SYNC_NONE) {
1660                         if (ret < wbc->nr_to_write)
1661                                 wbc->nr_to_write -= ret;
1662                         else
1663                                 wbc->nr_to_write = 0;
1664                 }
1665                 return 0;
1666         }
1667 out_mark_dirty:
1668         __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
1669         return ret;
1670 }
1671 #else
1672 static int nfs_commit_unstable_pages(struct inode *inode, struct writeback_control *wbc)
1673 {
1674         return 0;
1675 }
1676 #endif
1677
1678 int nfs_write_inode(struct inode *inode, struct writeback_control *wbc)
1679 {
1680         int ret;
1681
1682         ret = nfs_commit_unstable_pages(inode, wbc);
1683         if (ret >= 0 && test_bit(NFS_INO_LAYOUTCOMMIT, &NFS_I(inode)->flags)) {
1684                 int status;
1685                 bool sync = true;
1686
1687                 if (wbc->sync_mode == WB_SYNC_NONE)
1688                         sync = false;
1689
1690                 status = pnfs_layoutcommit_inode(inode, sync);
1691                 if (status < 0)
1692                         return status;
1693         }
1694         return ret;
1695 }
1696
1697 /*
1698  * flush the inode to disk.
1699  */
1700 int nfs_wb_all(struct inode *inode)
1701 {
1702         struct writeback_control wbc = {
1703                 .sync_mode = WB_SYNC_ALL,
1704                 .nr_to_write = LONG_MAX,
1705                 .range_start = 0,
1706                 .range_end = LLONG_MAX,
1707         };
1708
1709         return sync_inode(inode, &wbc);
1710 }
1711
1712 int nfs_wb_page_cancel(struct inode *inode, struct page *page)
1713 {
1714         struct nfs_page *req;
1715         int ret = 0;
1716
1717         BUG_ON(!PageLocked(page));
1718         for (;;) {
1719                 wait_on_page_writeback(page);
1720                 req = nfs_page_find_request(page);
1721                 if (req == NULL)
1722                         break;
1723                 if (nfs_lock_request(req)) {
1724                         nfs_clear_request_commit(req);
1725                         nfs_inode_remove_request(req);
1726                         /*
1727                          * In case nfs_inode_remove_request has marked the
1728                          * page as being dirty
1729                          */
1730                         cancel_dirty_page(page, PAGE_CACHE_SIZE);
1731                         nfs_unlock_and_release_request(req);
1732                         break;
1733                 }
1734                 ret = nfs_wait_on_request(req);
1735                 nfs_release_request(req);
1736                 if (ret < 0)
1737                         break;
1738         }
1739         return ret;
1740 }
1741
1742 /*
1743  * Write back all requests on one page - we do this before reading it.
1744  */
1745 int nfs_wb_page(struct inode *inode, struct page *page)
1746 {
1747         loff_t range_start = page_offset(page);
1748         loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
1749         struct writeback_control wbc = {
1750                 .sync_mode = WB_SYNC_ALL,
1751                 .nr_to_write = 0,
1752                 .range_start = range_start,
1753                 .range_end = range_end,
1754         };
1755         int ret;
1756
1757         for (;;) {
1758                 wait_on_page_writeback(page);
1759                 if (clear_page_dirty_for_io(page)) {
1760                         ret = nfs_writepage_locked(page, &wbc);
1761                         if (ret < 0)
1762                                 goto out_error;
1763                         continue;
1764                 }
1765                 if (!PagePrivate(page))
1766                         break;
1767                 ret = nfs_commit_inode(inode, FLUSH_SYNC);
1768                 if (ret < 0)
1769                         goto out_error;
1770         }
1771         return 0;
1772 out_error:
1773         return ret;
1774 }
1775
1776 #ifdef CONFIG_MIGRATION
1777 int nfs_migrate_page(struct address_space *mapping, struct page *newpage,
1778                 struct page *page, enum migrate_mode mode)
1779 {
1780         /*
1781          * If PagePrivate is set, then the page is currently associated with
1782          * an in-progress read or write request. Don't try to migrate it.
1783          *
1784          * FIXME: we could do this in principle, but we'll need a way to ensure
1785          *        that we can safely release the inode reference while holding
1786          *        the page lock.
1787          */
1788         if (PagePrivate(page))
1789                 return -EBUSY;
1790
1791         nfs_fscache_release_page(page, GFP_KERNEL);
1792
1793         return migrate_page(mapping, newpage, page, mode);
1794 }
1795 #endif
1796
1797 int __init nfs_init_writepagecache(void)
1798 {
1799         nfs_wdata_cachep = kmem_cache_create("nfs_write_data",
1800                                              sizeof(struct nfs_write_header),
1801                                              0, SLAB_HWCACHE_ALIGN,
1802                                              NULL);
1803         if (nfs_wdata_cachep == NULL)
1804                 return -ENOMEM;
1805
1806         nfs_wdata_mempool = mempool_create_slab_pool(MIN_POOL_WRITE,
1807                                                      nfs_wdata_cachep);
1808         if (nfs_wdata_mempool == NULL)
1809                 return -ENOMEM;
1810
1811         nfs_cdata_cachep = kmem_cache_create("nfs_commit_data",
1812                                              sizeof(struct nfs_commit_data),
1813                                              0, SLAB_HWCACHE_ALIGN,
1814                                              NULL);
1815         if (nfs_cdata_cachep == NULL)
1816                 return -ENOMEM;
1817
1818         nfs_commit_mempool = mempool_create_slab_pool(MIN_POOL_COMMIT,
1819                                                       nfs_wdata_cachep);
1820         if (nfs_commit_mempool == NULL)
1821                 return -ENOMEM;
1822
1823         /*
1824          * NFS congestion size, scale with available memory.
1825          *
1826          *  64MB:    8192k
1827          * 128MB:   11585k
1828          * 256MB:   16384k
1829          * 512MB:   23170k
1830          *   1GB:   32768k
1831          *   2GB:   46340k
1832          *   4GB:   65536k
1833          *   8GB:   92681k
1834          *  16GB:  131072k
1835          *
1836          * This allows larger machines to have larger/more transfers.
1837          * Limit the default to 256M
1838          */
1839         nfs_congestion_kb = (16*int_sqrt(totalram_pages)) << (PAGE_SHIFT-10);
1840         if (nfs_congestion_kb > 256*1024)
1841                 nfs_congestion_kb = 256*1024;
1842
1843         return 0;
1844 }
1845
1846 void nfs_destroy_writepagecache(void)
1847 {
1848         mempool_destroy(nfs_commit_mempool);
1849         mempool_destroy(nfs_wdata_mempool);
1850         kmem_cache_destroy(nfs_wdata_cachep);
1851 }
1852