]> Pileus Git - ~andy/linux/blob - drivers/staging/lustre/lustre/llite/vvp_page.c
Merge tag 'v3.11' into next
[~andy/linux] / drivers / staging / lustre / lustre / llite / vvp_page.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2011, 2012, Intel Corporation.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * Implementation of cl_page for VVP layer.
37  *
38  *   Author: Nikita Danilov <nikita.danilov@sun.com>
39  *   Author: Jinshan Xiong <jinshan.xiong@whamcloud.com>
40  */
41
42 #define DEBUG_SUBSYSTEM S_LLITE
43
44
45 #include <obd.h>
46 #include <lustre_lite.h>
47
48 #include "vvp_internal.h"
49
50 /*****************************************************************************
51  *
52  * Page operations.
53  *
54  */
55
56 static void vvp_page_fini_common(struct ccc_page *cp)
57 {
58         struct page *vmpage = cp->cpg_page;
59
60         LASSERT(vmpage != NULL);
61         page_cache_release(vmpage);
62 }
63
64 static void vvp_page_fini(const struct lu_env *env,
65                           struct cl_page_slice *slice)
66 {
67         struct ccc_page *cp = cl2ccc_page(slice);
68         struct page *vmpage  = cp->cpg_page;
69
70         /*
71          * vmpage->private was already cleared when page was moved into
72          * VPG_FREEING state.
73          */
74         LASSERT((struct cl_page *)vmpage->private != slice->cpl_page);
75         vvp_page_fini_common(cp);
76 }
77
78 static int vvp_page_own(const struct lu_env *env,
79                         const struct cl_page_slice *slice, struct cl_io *io,
80                         int nonblock)
81 {
82         struct ccc_page *vpg    = cl2ccc_page(slice);
83         struct page      *vmpage = vpg->cpg_page;
84
85         LASSERT(vmpage != NULL);
86         if (nonblock) {
87                 if (!trylock_page(vmpage))
88                         return -EAGAIN;
89
90                 if (unlikely(PageWriteback(vmpage))) {
91                         unlock_page(vmpage);
92                         return -EAGAIN;
93                 }
94
95                 return 0;
96         }
97
98         lock_page(vmpage);
99         wait_on_page_writeback(vmpage);
100         return 0;
101 }
102
103 static void vvp_page_assume(const struct lu_env *env,
104                             const struct cl_page_slice *slice,
105                             struct cl_io *unused)
106 {
107         struct page *vmpage = cl2vm_page(slice);
108
109         LASSERT(vmpage != NULL);
110         LASSERT(PageLocked(vmpage));
111         wait_on_page_writeback(vmpage);
112 }
113
114 static void vvp_page_unassume(const struct lu_env *env,
115                               const struct cl_page_slice *slice,
116                               struct cl_io *unused)
117 {
118         struct page *vmpage = cl2vm_page(slice);
119
120         LASSERT(vmpage != NULL);
121         LASSERT(PageLocked(vmpage));
122 }
123
124 static void vvp_page_disown(const struct lu_env *env,
125                             const struct cl_page_slice *slice, struct cl_io *io)
126 {
127         struct page *vmpage = cl2vm_page(slice);
128
129         LASSERT(vmpage != NULL);
130         LASSERT(PageLocked(vmpage));
131
132         unlock_page(cl2vm_page(slice));
133 }
134
135 static void vvp_page_discard(const struct lu_env *env,
136                              const struct cl_page_slice *slice,
137                              struct cl_io *unused)
138 {
139         struct page        *vmpage  = cl2vm_page(slice);
140         struct address_space *mapping;
141         struct ccc_page      *cpg     = cl2ccc_page(slice);
142
143         LASSERT(vmpage != NULL);
144         LASSERT(PageLocked(vmpage));
145
146         mapping = vmpage->mapping;
147
148         if (cpg->cpg_defer_uptodate && !cpg->cpg_ra_used)
149                 ll_ra_stats_inc(mapping, RA_STAT_DISCARDED);
150
151         /*
152          * truncate_complete_page() calls
153          * a_ops->invalidatepage()->cl_page_delete()->vvp_page_delete().
154          */
155         truncate_complete_page(mapping, vmpage);
156 }
157
158 static int vvp_page_unmap(const struct lu_env *env,
159                           const struct cl_page_slice *slice,
160                           struct cl_io *unused)
161 {
162         struct page *vmpage = cl2vm_page(slice);
163         __u64       offset;
164
165         LASSERT(vmpage != NULL);
166         LASSERT(PageLocked(vmpage));
167
168         offset = vmpage->index << PAGE_CACHE_SHIFT;
169
170         /*
171          * XXX is it safe to call this with the page lock held?
172          */
173         ll_teardown_mmaps(vmpage->mapping, offset, offset + PAGE_CACHE_SIZE);
174         return 0;
175 }
176
177 static void vvp_page_delete(const struct lu_env *env,
178                             const struct cl_page_slice *slice)
179 {
180         struct page       *vmpage = cl2vm_page(slice);
181         struct inode     *inode  = vmpage->mapping->host;
182         struct cl_object *obj    = slice->cpl_obj;
183
184         LASSERT(PageLocked(vmpage));
185         LASSERT((struct cl_page *)vmpage->private == slice->cpl_page);
186         LASSERT(inode == ccc_object_inode(obj));
187
188         vvp_write_complete(cl2ccc(obj), cl2ccc_page(slice));
189         ClearPagePrivate(vmpage);
190         vmpage->private = 0;
191         /*
192          * Reference from vmpage to cl_page is removed, but the reference back
193          * is still here. It is removed later in vvp_page_fini().
194          */
195 }
196
197 static void vvp_page_export(const struct lu_env *env,
198                             const struct cl_page_slice *slice,
199                             int uptodate)
200 {
201         struct page *vmpage = cl2vm_page(slice);
202
203         LASSERT(vmpage != NULL);
204         LASSERT(PageLocked(vmpage));
205         if (uptodate)
206                 SetPageUptodate(vmpage);
207         else
208                 ClearPageUptodate(vmpage);
209 }
210
211 static int vvp_page_is_vmlocked(const struct lu_env *env,
212                                 const struct cl_page_slice *slice)
213 {
214         return PageLocked(cl2vm_page(slice)) ? -EBUSY : -ENODATA;
215 }
216
217 static int vvp_page_prep_read(const struct lu_env *env,
218                               const struct cl_page_slice *slice,
219                               struct cl_io *unused)
220 {
221         ENTRY;
222         /* Skip the page already marked as PG_uptodate. */
223         RETURN(PageUptodate(cl2vm_page(slice)) ? -EALREADY : 0);
224 }
225
226 static int vvp_page_prep_write(const struct lu_env *env,
227                                const struct cl_page_slice *slice,
228                                struct cl_io *unused)
229 {
230         struct page *vmpage = cl2vm_page(slice);
231
232         LASSERT(PageLocked(vmpage));
233         LASSERT(!PageDirty(vmpage));
234
235         set_page_writeback(vmpage);
236         vvp_write_pending(cl2ccc(slice->cpl_obj), cl2ccc_page(slice));
237
238         return 0;
239 }
240
241 /**
242  * Handles page transfer errors at VM level.
243  *
244  * This takes inode as a separate argument, because inode on which error is to
245  * be set can be different from \a vmpage inode in case of direct-io.
246  */
247 static void vvp_vmpage_error(struct inode *inode, struct page *vmpage, int ioret)
248 {
249         struct ccc_object *obj = cl_inode2ccc(inode);
250
251         if (ioret == 0) {
252                 ClearPageError(vmpage);
253                 obj->cob_discard_page_warned = 0;
254         } else {
255                 SetPageError(vmpage);
256                 if (ioret == -ENOSPC)
257                         set_bit(AS_ENOSPC, &inode->i_mapping->flags);
258                 else
259                         set_bit(AS_EIO, &inode->i_mapping->flags);
260
261                 if ((ioret == -ESHUTDOWN || ioret == -EINTR) &&
262                      obj->cob_discard_page_warned == 0) {
263                         obj->cob_discard_page_warned = 1;
264                         ll_dirty_page_discard_warn(vmpage, ioret);
265                 }
266         }
267 }
268
269 static void vvp_page_completion_read(const struct lu_env *env,
270                                      const struct cl_page_slice *slice,
271                                      int ioret)
272 {
273         struct ccc_page *cp     = cl2ccc_page(slice);
274         struct page      *vmpage = cp->cpg_page;
275         struct cl_page  *page   = cl_page_top(slice->cpl_page);
276         struct inode    *inode  = ccc_object_inode(page->cp_obj);
277         ENTRY;
278
279         LASSERT(PageLocked(vmpage));
280         CL_PAGE_HEADER(D_PAGE, env, page, "completing READ with %d\n", ioret);
281
282         if (cp->cpg_defer_uptodate)
283                 ll_ra_count_put(ll_i2sbi(inode), 1);
284
285         if (ioret == 0)  {
286                 if (!cp->cpg_defer_uptodate)
287                         cl_page_export(env, page, 1);
288         } else
289                 cp->cpg_defer_uptodate = 0;
290
291         if (page->cp_sync_io == NULL)
292                 unlock_page(vmpage);
293
294         EXIT;
295 }
296
297 static void vvp_page_completion_write(const struct lu_env *env,
298                                       const struct cl_page_slice *slice,
299                                       int ioret)
300 {
301         struct ccc_page *cp     = cl2ccc_page(slice);
302         struct cl_page  *pg     = slice->cpl_page;
303         struct page      *vmpage = cp->cpg_page;
304         ENTRY;
305
306         LASSERT(ergo(pg->cp_sync_io != NULL, PageLocked(vmpage)));
307         LASSERT(PageWriteback(vmpage));
308
309         CL_PAGE_HEADER(D_PAGE, env, pg, "completing WRITE with %d\n", ioret);
310
311         /*
312          * TODO: Actually it makes sense to add the page into oap pending
313          * list again and so that we don't need to take the page out from
314          * SoM write pending list, if we just meet a recoverable error,
315          * -ENOMEM, etc.
316          * To implement this, we just need to return a non zero value in
317          * ->cpo_completion method. The underlying transfer should be notified
318          * and then re-add the page into pending transfer queue.  -jay
319          */
320
321         cp->cpg_write_queued = 0;
322         vvp_write_complete(cl2ccc(slice->cpl_obj), cp);
323
324         /*
325          * Only mark the page error only when it's an async write because
326          * applications won't wait for IO to finish.
327          */
328         if (pg->cp_sync_io == NULL)
329                 vvp_vmpage_error(ccc_object_inode(pg->cp_obj), vmpage, ioret);
330
331         end_page_writeback(vmpage);
332         EXIT;
333 }
334
335 /**
336  * Implements cl_page_operations::cpo_make_ready() method.
337  *
338  * This is called to yank a page from the transfer cache and to send it out as
339  * a part of transfer. This function try-locks the page. If try-lock failed,
340  * page is owned by some concurrent IO, and should be skipped (this is bad,
341  * but hopefully rare situation, as it usually results in transfer being
342  * shorter than possible).
343  *
344  * \retval 0      success, page can be placed into transfer
345  *
346  * \retval -EAGAIN page is either used by concurrent IO has been
347  * truncated. Skip it.
348  */
349 static int vvp_page_make_ready(const struct lu_env *env,
350                                const struct cl_page_slice *slice)
351 {
352         struct page *vmpage = cl2vm_page(slice);
353         struct cl_page *pg = slice->cpl_page;
354         int result = 0;
355
356         lock_page(vmpage);
357         if (clear_page_dirty_for_io(vmpage)) {
358                 LASSERT(pg->cp_state == CPS_CACHED);
359                 /* This actually clears the dirty bit in the radix
360                  * tree. */
361                 set_page_writeback(vmpage);
362                 vvp_write_pending(cl2ccc(slice->cpl_obj),
363                                 cl2ccc_page(slice));
364                 CL_PAGE_HEADER(D_PAGE, env, pg, "readied\n");
365         } else if (pg->cp_state == CPS_PAGEOUT) {
366                 /* is it possible for osc_flush_async_page() to already
367                  * make it ready? */
368                 result = -EALREADY;
369         } else {
370                 CL_PAGE_DEBUG(D_ERROR, env, pg, "Unexpecting page state %d.\n",
371                               pg->cp_state);
372                 LBUG();
373         }
374         unlock_page(vmpage);
375         RETURN(result);
376 }
377
378 static int vvp_page_print(const struct lu_env *env,
379                           const struct cl_page_slice *slice,
380                           void *cookie, lu_printer_t printer)
381 {
382         struct ccc_page *vp = cl2ccc_page(slice);
383         struct page      *vmpage = vp->cpg_page;
384
385         (*printer)(env, cookie, LUSTRE_VVP_NAME"-page@%p(%d:%d:%d) "
386                    "vm@%p ",
387                    vp, vp->cpg_defer_uptodate, vp->cpg_ra_used,
388                    vp->cpg_write_queued, vmpage);
389         if (vmpage != NULL) {
390                 (*printer)(env, cookie, "%lx %d:%d %lx %lu %slru",
391                            (long)vmpage->flags, page_count(vmpage),
392                            page_mapcount(vmpage), vmpage->private,
393                            page_index(vmpage),
394                            list_empty(&vmpage->lru) ? "not-" : "");
395         }
396         (*printer)(env, cookie, "\n");
397         return 0;
398 }
399
400 static const struct cl_page_operations vvp_page_ops = {
401         .cpo_own           = vvp_page_own,
402         .cpo_assume     = vvp_page_assume,
403         .cpo_unassume      = vvp_page_unassume,
404         .cpo_disown     = vvp_page_disown,
405         .cpo_vmpage     = ccc_page_vmpage,
406         .cpo_discard       = vvp_page_discard,
407         .cpo_delete     = vvp_page_delete,
408         .cpo_unmap       = vvp_page_unmap,
409         .cpo_export     = vvp_page_export,
410         .cpo_is_vmlocked   = vvp_page_is_vmlocked,
411         .cpo_fini         = vvp_page_fini,
412         .cpo_print       = vvp_page_print,
413         .cpo_is_under_lock = ccc_page_is_under_lock,
414         .io = {
415                 [CRT_READ] = {
416                         .cpo_prep       = vvp_page_prep_read,
417                         .cpo_completion  = vvp_page_completion_read,
418                         .cpo_make_ready  = ccc_fail,
419                 },
420                 [CRT_WRITE] = {
421                         .cpo_prep       = vvp_page_prep_write,
422                         .cpo_completion  = vvp_page_completion_write,
423                         .cpo_make_ready  = vvp_page_make_ready,
424                 }
425         }
426 };
427
428 static void vvp_transient_page_verify(const struct cl_page *page)
429 {
430         struct inode *inode = ccc_object_inode(page->cp_obj);
431
432         LASSERT(!mutex_trylock(&inode->i_mutex));
433 }
434
435 static int vvp_transient_page_own(const struct lu_env *env,
436                                   const struct cl_page_slice *slice,
437                                   struct cl_io *unused, int nonblock)
438 {
439         vvp_transient_page_verify(slice->cpl_page);
440         return 0;
441 }
442
443 static void vvp_transient_page_assume(const struct lu_env *env,
444                                       const struct cl_page_slice *slice,
445                                       struct cl_io *unused)
446 {
447         vvp_transient_page_verify(slice->cpl_page);
448 }
449
450 static void vvp_transient_page_unassume(const struct lu_env *env,
451                                         const struct cl_page_slice *slice,
452                                         struct cl_io *unused)
453 {
454         vvp_transient_page_verify(slice->cpl_page);
455 }
456
457 static void vvp_transient_page_disown(const struct lu_env *env,
458                                       const struct cl_page_slice *slice,
459                                       struct cl_io *unused)
460 {
461         vvp_transient_page_verify(slice->cpl_page);
462 }
463
464 static void vvp_transient_page_discard(const struct lu_env *env,
465                                        const struct cl_page_slice *slice,
466                                        struct cl_io *unused)
467 {
468         struct cl_page *page = slice->cpl_page;
469
470         vvp_transient_page_verify(slice->cpl_page);
471
472         /*
473          * For transient pages, remove it from the radix tree.
474          */
475         cl_page_delete(env, page);
476 }
477
478 static int vvp_transient_page_is_vmlocked(const struct lu_env *env,
479                                           const struct cl_page_slice *slice)
480 {
481         struct inode    *inode = ccc_object_inode(slice->cpl_obj);
482         int     locked;
483
484         locked = !mutex_trylock(&inode->i_mutex);
485         if (!locked)
486                 mutex_unlock(&inode->i_mutex);
487         return locked ? -EBUSY : -ENODATA;
488 }
489
490 static void
491 vvp_transient_page_completion(const struct lu_env *env,
492                               const struct cl_page_slice *slice,
493                               int ioret)
494 {
495         vvp_transient_page_verify(slice->cpl_page);
496 }
497
498 static void vvp_transient_page_fini(const struct lu_env *env,
499                                     struct cl_page_slice *slice)
500 {
501         struct ccc_page *cp = cl2ccc_page(slice);
502         struct cl_page *clp = slice->cpl_page;
503         struct ccc_object *clobj = cl2ccc(clp->cp_obj);
504
505         vvp_page_fini_common(cp);
506         LASSERT(!mutex_trylock(&clobj->cob_inode->i_mutex));
507         clobj->cob_transient_pages--;
508 }
509
510 static const struct cl_page_operations vvp_transient_page_ops = {
511         .cpo_own           = vvp_transient_page_own,
512         .cpo_assume     = vvp_transient_page_assume,
513         .cpo_unassume      = vvp_transient_page_unassume,
514         .cpo_disown     = vvp_transient_page_disown,
515         .cpo_discard       = vvp_transient_page_discard,
516         .cpo_vmpage     = ccc_page_vmpage,
517         .cpo_fini         = vvp_transient_page_fini,
518         .cpo_is_vmlocked   = vvp_transient_page_is_vmlocked,
519         .cpo_print       = vvp_page_print,
520         .cpo_is_under_lock = ccc_page_is_under_lock,
521         .io = {
522                 [CRT_READ] = {
523                         .cpo_prep       = ccc_transient_page_prep,
524                         .cpo_completion  = vvp_transient_page_completion,
525                 },
526                 [CRT_WRITE] = {
527                         .cpo_prep       = ccc_transient_page_prep,
528                         .cpo_completion  = vvp_transient_page_completion,
529                 }
530         }
531 };
532
533 int vvp_page_init(const struct lu_env *env, struct cl_object *obj,
534                 struct cl_page *page, struct page *vmpage)
535 {
536         struct ccc_page *cpg = cl_object_page_slice(obj, page);
537
538         CLOBINVRNT(env, obj, ccc_object_invariant(obj));
539
540         cpg->cpg_page = vmpage;
541         page_cache_get(vmpage);
542
543         INIT_LIST_HEAD(&cpg->cpg_pending_linkage);
544         if (page->cp_type == CPT_CACHEABLE) {
545                 SetPagePrivate(vmpage);
546                 vmpage->private = (unsigned long)page;
547                 cl_page_slice_add(page, &cpg->cpg_cl, obj,
548                                 &vvp_page_ops);
549         } else {
550                 struct ccc_object *clobj = cl2ccc(obj);
551
552                 LASSERT(!mutex_trylock(&clobj->cob_inode->i_mutex));
553                 cl_page_slice_add(page, &cpg->cpg_cl, obj,
554                                 &vvp_transient_page_ops);
555                 clobj->cob_transient_pages++;
556         }
557         return 0;
558 }