]> Pileus Git - ~andy/linux/blob - fs/xfs/linux-2.6/xfs_buf.c
0a00d7a2fc2374549baa6a405a8ec5202cb4b0af
[~andy/linux] / fs / xfs / linux-2.6 / xfs_buf.c
1 /*
2  * Copyright (c) 2000-2006 Silicon Graphics, Inc.
3  * All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it would be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write the Free Software Foundation,
16  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17  */
18 #include "xfs.h"
19 #include <linux/stddef.h>
20 #include <linux/errno.h>
21 #include <linux/gfp.h>
22 #include <linux/pagemap.h>
23 #include <linux/init.h>
24 #include <linux/vmalloc.h>
25 #include <linux/bio.h>
26 #include <linux/sysctl.h>
27 #include <linux/proc_fs.h>
28 #include <linux/workqueue.h>
29 #include <linux/percpu.h>
30 #include <linux/blkdev.h>
31 #include <linux/hash.h>
32 #include <linux/kthread.h>
33 #include <linux/migrate.h>
34 #include <linux/backing-dev.h>
35 #include <linux/freezer.h>
36 #include <linux/list_sort.h>
37
38 #include "xfs_sb.h"
39 #include "xfs_inum.h"
40 #include "xfs_log.h"
41 #include "xfs_ag.h"
42 #include "xfs_mount.h"
43 #include "xfs_trace.h"
44
45 static kmem_zone_t *xfs_buf_zone;
46 STATIC int xfsbufd(void *);
47 STATIC void xfs_buf_delwri_queue(xfs_buf_t *, int);
48
49 static struct workqueue_struct *xfslogd_workqueue;
50 struct workqueue_struct *xfsdatad_workqueue;
51 struct workqueue_struct *xfsconvertd_workqueue;
52
53 #ifdef XFS_BUF_LOCK_TRACKING
54 # define XB_SET_OWNER(bp)       ((bp)->b_last_holder = current->pid)
55 # define XB_CLEAR_OWNER(bp)     ((bp)->b_last_holder = -1)
56 # define XB_GET_OWNER(bp)       ((bp)->b_last_holder)
57 #else
58 # define XB_SET_OWNER(bp)       do { } while (0)
59 # define XB_CLEAR_OWNER(bp)     do { } while (0)
60 # define XB_GET_OWNER(bp)       do { } while (0)
61 #endif
62
63 #define xb_to_gfp(flags) \
64         ((((flags) & XBF_READ_AHEAD) ? __GFP_NORETRY : \
65           ((flags) & XBF_DONT_BLOCK) ? GFP_NOFS : GFP_KERNEL) | __GFP_NOWARN)
66
67 #define xb_to_km(flags) \
68          (((flags) & XBF_DONT_BLOCK) ? KM_NOFS : KM_SLEEP)
69
70 #define xfs_buf_allocate(flags) \
71         kmem_zone_alloc(xfs_buf_zone, xb_to_km(flags))
72 #define xfs_buf_deallocate(bp) \
73         kmem_zone_free(xfs_buf_zone, (bp));
74
75 static inline int
76 xfs_buf_is_vmapped(
77         struct xfs_buf  *bp)
78 {
79         /*
80          * Return true if the buffer is vmapped.
81          *
82          * The XBF_MAPPED flag is set if the buffer should be mapped, but the
83          * code is clever enough to know it doesn't have to map a single page,
84          * so the check has to be both for XBF_MAPPED and bp->b_page_count > 1.
85          */
86         return (bp->b_flags & XBF_MAPPED) && bp->b_page_count > 1;
87 }
88
89 static inline int
90 xfs_buf_vmap_len(
91         struct xfs_buf  *bp)
92 {
93         return (bp->b_page_count * PAGE_SIZE) - bp->b_offset;
94 }
95
96 /*
97  *      Page Region interfaces.
98  *
99  *      For pages in filesystems where the blocksize is smaller than the
100  *      pagesize, we use the page->private field (long) to hold a bitmap
101  *      of uptodate regions within the page.
102  *
103  *      Each such region is "bytes per page / bits per long" bytes long.
104  *
105  *      NBPPR == number-of-bytes-per-page-region
106  *      BTOPR == bytes-to-page-region (rounded up)
107  *      BTOPRT == bytes-to-page-region-truncated (rounded down)
108  */
109 #if (BITS_PER_LONG == 32)
110 #define PRSHIFT         (PAGE_CACHE_SHIFT - 5)  /* (32 == 1<<5) */
111 #elif (BITS_PER_LONG == 64)
112 #define PRSHIFT         (PAGE_CACHE_SHIFT - 6)  /* (64 == 1<<6) */
113 #else
114 #error BITS_PER_LONG must be 32 or 64
115 #endif
116 #define NBPPR           (PAGE_CACHE_SIZE/BITS_PER_LONG)
117 #define BTOPR(b)        (((unsigned int)(b) + (NBPPR - 1)) >> PRSHIFT)
118 #define BTOPRT(b)       (((unsigned int)(b) >> PRSHIFT))
119
120 STATIC unsigned long
121 page_region_mask(
122         size_t          offset,
123         size_t          length)
124 {
125         unsigned long   mask;
126         int             first, final;
127
128         first = BTOPR(offset);
129         final = BTOPRT(offset + length - 1);
130         first = min(first, final);
131
132         mask = ~0UL;
133         mask <<= BITS_PER_LONG - (final - first);
134         mask >>= BITS_PER_LONG - (final);
135
136         ASSERT(offset + length <= PAGE_CACHE_SIZE);
137         ASSERT((final - first) < BITS_PER_LONG && (final - first) >= 0);
138
139         return mask;
140 }
141
142 STATIC void
143 set_page_region(
144         struct page     *page,
145         size_t          offset,
146         size_t          length)
147 {
148         set_page_private(page,
149                 page_private(page) | page_region_mask(offset, length));
150         if (page_private(page) == ~0UL)
151                 SetPageUptodate(page);
152 }
153
154 STATIC int
155 test_page_region(
156         struct page     *page,
157         size_t          offset,
158         size_t          length)
159 {
160         unsigned long   mask = page_region_mask(offset, length);
161
162         return (mask && (page_private(page) & mask) == mask);
163 }
164
165 /*
166  *      Internal xfs_buf_t object manipulation
167  */
168
169 STATIC void
170 _xfs_buf_initialize(
171         xfs_buf_t               *bp,
172         xfs_buftarg_t           *target,
173         xfs_off_t               range_base,
174         size_t                  range_length,
175         xfs_buf_flags_t         flags)
176 {
177         /*
178          * We don't want certain flags to appear in b_flags.
179          */
180         flags &= ~(XBF_LOCK|XBF_MAPPED|XBF_DONT_BLOCK|XBF_READ_AHEAD);
181
182         memset(bp, 0, sizeof(xfs_buf_t));
183         atomic_set(&bp->b_hold, 1);
184         init_completion(&bp->b_iowait);
185         INIT_LIST_HEAD(&bp->b_list);
186         RB_CLEAR_NODE(&bp->b_rbnode);
187         sema_init(&bp->b_sema, 0); /* held, no waiters */
188         XB_SET_OWNER(bp);
189         bp->b_target = target;
190         bp->b_file_offset = range_base;
191         /*
192          * Set buffer_length and count_desired to the same value initially.
193          * I/O routines should use count_desired, which will be the same in
194          * most cases but may be reset (e.g. XFS recovery).
195          */
196         bp->b_buffer_length = bp->b_count_desired = range_length;
197         bp->b_flags = flags;
198         bp->b_bn = XFS_BUF_DADDR_NULL;
199         atomic_set(&bp->b_pin_count, 0);
200         init_waitqueue_head(&bp->b_waiters);
201
202         XFS_STATS_INC(xb_create);
203
204         trace_xfs_buf_init(bp, _RET_IP_);
205 }
206
207 /*
208  *      Allocate a page array capable of holding a specified number
209  *      of pages, and point the page buf at it.
210  */
211 STATIC int
212 _xfs_buf_get_pages(
213         xfs_buf_t               *bp,
214         int                     page_count,
215         xfs_buf_flags_t         flags)
216 {
217         /* Make sure that we have a page list */
218         if (bp->b_pages == NULL) {
219                 bp->b_offset = xfs_buf_poff(bp->b_file_offset);
220                 bp->b_page_count = page_count;
221                 if (page_count <= XB_PAGES) {
222                         bp->b_pages = bp->b_page_array;
223                 } else {
224                         bp->b_pages = kmem_alloc(sizeof(struct page *) *
225                                         page_count, xb_to_km(flags));
226                         if (bp->b_pages == NULL)
227                                 return -ENOMEM;
228                 }
229                 memset(bp->b_pages, 0, sizeof(struct page *) * page_count);
230         }
231         return 0;
232 }
233
234 /*
235  *      Frees b_pages if it was allocated.
236  */
237 STATIC void
238 _xfs_buf_free_pages(
239         xfs_buf_t       *bp)
240 {
241         if (bp->b_pages != bp->b_page_array) {
242                 kmem_free(bp->b_pages);
243                 bp->b_pages = NULL;
244         }
245 }
246
247 /*
248  *      Releases the specified buffer.
249  *
250  *      The modification state of any associated pages is left unchanged.
251  *      The buffer most not be on any hash - use xfs_buf_rele instead for
252  *      hashed and refcounted buffers
253  */
254 void
255 xfs_buf_free(
256         xfs_buf_t               *bp)
257 {
258         trace_xfs_buf_free(bp, _RET_IP_);
259
260         if (bp->b_flags & (_XBF_PAGE_CACHE|_XBF_PAGES)) {
261                 uint            i;
262
263                 if (xfs_buf_is_vmapped(bp))
264                         vm_unmap_ram(bp->b_addr - bp->b_offset,
265                                         bp->b_page_count);
266
267                 for (i = 0; i < bp->b_page_count; i++) {
268                         struct page     *page = bp->b_pages[i];
269
270                         if (bp->b_flags & _XBF_PAGE_CACHE)
271                                 ASSERT(!PagePrivate(page));
272                         page_cache_release(page);
273                 }
274         }
275         _xfs_buf_free_pages(bp);
276         xfs_buf_deallocate(bp);
277 }
278
279 /*
280  *      Finds all pages for buffer in question and builds it's page list.
281  */
282 STATIC int
283 _xfs_buf_lookup_pages(
284         xfs_buf_t               *bp,
285         uint                    flags)
286 {
287         struct address_space    *mapping = bp->b_target->bt_mapping;
288         size_t                  blocksize = bp->b_target->bt_bsize;
289         size_t                  size = bp->b_count_desired;
290         size_t                  nbytes, offset;
291         gfp_t                   gfp_mask = xb_to_gfp(flags);
292         unsigned short          page_count, i;
293         pgoff_t                 first;
294         xfs_off_t               end;
295         int                     error;
296
297         end = bp->b_file_offset + bp->b_buffer_length;
298         page_count = xfs_buf_btoc(end) - xfs_buf_btoct(bp->b_file_offset);
299
300         error = _xfs_buf_get_pages(bp, page_count, flags);
301         if (unlikely(error))
302                 return error;
303         bp->b_flags |= _XBF_PAGE_CACHE;
304
305         offset = bp->b_offset;
306         first = bp->b_file_offset >> PAGE_CACHE_SHIFT;
307
308         for (i = 0; i < bp->b_page_count; i++) {
309                 struct page     *page;
310                 uint            retries = 0;
311
312               retry:
313                 page = find_or_create_page(mapping, first + i, gfp_mask);
314                 if (unlikely(page == NULL)) {
315                         if (flags & XBF_READ_AHEAD) {
316                                 bp->b_page_count = i;
317                                 for (i = 0; i < bp->b_page_count; i++)
318                                         unlock_page(bp->b_pages[i]);
319                                 return -ENOMEM;
320                         }
321
322                         /*
323                          * This could deadlock.
324                          *
325                          * But until all the XFS lowlevel code is revamped to
326                          * handle buffer allocation failures we can't do much.
327                          */
328                         if (!(++retries % 100))
329                                 printk(KERN_ERR
330                                         "XFS: possible memory allocation "
331                                         "deadlock in %s (mode:0x%x)\n",
332                                         __func__, gfp_mask);
333
334                         XFS_STATS_INC(xb_page_retries);
335                         congestion_wait(BLK_RW_ASYNC, HZ/50);
336                         goto retry;
337                 }
338
339                 XFS_STATS_INC(xb_page_found);
340
341                 nbytes = min_t(size_t, size, PAGE_CACHE_SIZE - offset);
342                 size -= nbytes;
343
344                 ASSERT(!PagePrivate(page));
345                 if (!PageUptodate(page)) {
346                         page_count--;
347                         if (blocksize >= PAGE_CACHE_SIZE) {
348                                 if (flags & XBF_READ)
349                                         bp->b_flags |= _XBF_PAGE_LOCKED;
350                         } else if (!PagePrivate(page)) {
351                                 if (test_page_region(page, offset, nbytes))
352                                         page_count++;
353                         }
354                 }
355
356                 bp->b_pages[i] = page;
357                 offset = 0;
358         }
359
360         if (!(bp->b_flags & _XBF_PAGE_LOCKED)) {
361                 for (i = 0; i < bp->b_page_count; i++)
362                         unlock_page(bp->b_pages[i]);
363         }
364
365         if (page_count == bp->b_page_count)
366                 bp->b_flags |= XBF_DONE;
367
368         return error;
369 }
370
371 /*
372  *      Map buffer into kernel address-space if nessecary.
373  */
374 STATIC int
375 _xfs_buf_map_pages(
376         xfs_buf_t               *bp,
377         uint                    flags)
378 {
379         /* A single page buffer is always mappable */
380         if (bp->b_page_count == 1) {
381                 bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset;
382                 bp->b_flags |= XBF_MAPPED;
383         } else if (flags & XBF_MAPPED) {
384                 bp->b_addr = vm_map_ram(bp->b_pages, bp->b_page_count,
385                                         -1, PAGE_KERNEL);
386                 if (unlikely(bp->b_addr == NULL))
387                         return -ENOMEM;
388                 bp->b_addr += bp->b_offset;
389                 bp->b_flags |= XBF_MAPPED;
390         }
391
392         return 0;
393 }
394
395 /*
396  *      Finding and Reading Buffers
397  */
398
399 /*
400  *      Look up, and creates if absent, a lockable buffer for
401  *      a given range of an inode.  The buffer is returned
402  *      locked.  If other overlapping buffers exist, they are
403  *      released before the new buffer is created and locked,
404  *      which may imply that this call will block until those buffers
405  *      are unlocked.  No I/O is implied by this call.
406  */
407 xfs_buf_t *
408 _xfs_buf_find(
409         xfs_buftarg_t           *btp,   /* block device target          */
410         xfs_off_t               ioff,   /* starting offset of range     */
411         size_t                  isize,  /* length of range              */
412         xfs_buf_flags_t         flags,
413         xfs_buf_t               *new_bp)
414 {
415         xfs_off_t               range_base;
416         size_t                  range_length;
417         struct xfs_perag        *pag;
418         struct rb_node          **rbp;
419         struct rb_node          *parent;
420         xfs_buf_t               *bp;
421
422         range_base = (ioff << BBSHIFT);
423         range_length = (isize << BBSHIFT);
424
425         /* Check for IOs smaller than the sector size / not sector aligned */
426         ASSERT(!(range_length < (1 << btp->bt_sshift)));
427         ASSERT(!(range_base & (xfs_off_t)btp->bt_smask));
428
429         /* get tree root */
430         pag = xfs_perag_get(btp->bt_mount,
431                                 xfs_daddr_to_agno(btp->bt_mount, ioff));
432
433         /* walk tree */
434         spin_lock(&pag->pag_buf_lock);
435         rbp = &pag->pag_buf_tree.rb_node;
436         parent = NULL;
437         bp = NULL;
438         while (*rbp) {
439                 parent = *rbp;
440                 bp = rb_entry(parent, struct xfs_buf, b_rbnode);
441
442                 if (range_base < bp->b_file_offset)
443                         rbp = &(*rbp)->rb_left;
444                 else if (range_base > bp->b_file_offset)
445                         rbp = &(*rbp)->rb_right;
446                 else {
447                         /*
448                          * found a block offset match. If the range doesn't
449                          * match, the only way this is allowed is if the buffer
450                          * in the cache is stale and the transaction that made
451                          * it stale has not yet committed. i.e. we are
452                          * reallocating a busy extent. Skip this buffer and
453                          * continue searching to the right for an exact match.
454                          */
455                         if (bp->b_buffer_length != range_length) {
456                                 ASSERT(bp->b_flags & XBF_STALE);
457                                 rbp = &(*rbp)->rb_right;
458                                 continue;
459                         }
460                         atomic_inc(&bp->b_hold);
461                         goto found;
462                 }
463         }
464
465         /* No match found */
466         if (new_bp) {
467                 _xfs_buf_initialize(new_bp, btp, range_base,
468                                 range_length, flags);
469                 rb_link_node(&new_bp->b_rbnode, parent, rbp);
470                 rb_insert_color(&new_bp->b_rbnode, &pag->pag_buf_tree);
471                 /* the buffer keeps the perag reference until it is freed */
472                 new_bp->b_pag = pag;
473                 spin_unlock(&pag->pag_buf_lock);
474         } else {
475                 XFS_STATS_INC(xb_miss_locked);
476                 spin_unlock(&pag->pag_buf_lock);
477                 xfs_perag_put(pag);
478         }
479         return new_bp;
480
481 found:
482         spin_unlock(&pag->pag_buf_lock);
483         xfs_perag_put(pag);
484
485         if (xfs_buf_cond_lock(bp)) {
486                 /* failed, so wait for the lock if requested. */
487                 if (!(flags & XBF_TRYLOCK)) {
488                         xfs_buf_lock(bp);
489                         XFS_STATS_INC(xb_get_locked_waited);
490                 } else {
491                         xfs_buf_rele(bp);
492                         XFS_STATS_INC(xb_busy_locked);
493                         return NULL;
494                 }
495         }
496
497         if (bp->b_flags & XBF_STALE) {
498                 ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0);
499                 bp->b_flags &= XBF_MAPPED;
500         }
501
502         trace_xfs_buf_find(bp, flags, _RET_IP_);
503         XFS_STATS_INC(xb_get_locked);
504         return bp;
505 }
506
507 /*
508  *      Assembles a buffer covering the specified range.
509  *      Storage in memory for all portions of the buffer will be allocated,
510  *      although backing storage may not be.
511  */
512 xfs_buf_t *
513 xfs_buf_get(
514         xfs_buftarg_t           *target,/* target for buffer            */
515         xfs_off_t               ioff,   /* starting offset of range     */
516         size_t                  isize,  /* length of range              */
517         xfs_buf_flags_t         flags)
518 {
519         xfs_buf_t               *bp, *new_bp;
520         int                     error = 0, i;
521
522         new_bp = xfs_buf_allocate(flags);
523         if (unlikely(!new_bp))
524                 return NULL;
525
526         bp = _xfs_buf_find(target, ioff, isize, flags, new_bp);
527         if (bp == new_bp) {
528                 error = _xfs_buf_lookup_pages(bp, flags);
529                 if (error)
530                         goto no_buffer;
531         } else {
532                 xfs_buf_deallocate(new_bp);
533                 if (unlikely(bp == NULL))
534                         return NULL;
535         }
536
537         for (i = 0; i < bp->b_page_count; i++)
538                 mark_page_accessed(bp->b_pages[i]);
539
540         if (!(bp->b_flags & XBF_MAPPED)) {
541                 error = _xfs_buf_map_pages(bp, flags);
542                 if (unlikely(error)) {
543                         printk(KERN_WARNING "%s: failed to map pages\n",
544                                         __func__);
545                         goto no_buffer;
546                 }
547         }
548
549         XFS_STATS_INC(xb_get);
550
551         /*
552          * Always fill in the block number now, the mapped cases can do
553          * their own overlay of this later.
554          */
555         bp->b_bn = ioff;
556         bp->b_count_desired = bp->b_buffer_length;
557
558         trace_xfs_buf_get(bp, flags, _RET_IP_);
559         return bp;
560
561  no_buffer:
562         if (flags & (XBF_LOCK | XBF_TRYLOCK))
563                 xfs_buf_unlock(bp);
564         xfs_buf_rele(bp);
565         return NULL;
566 }
567
568 STATIC int
569 _xfs_buf_read(
570         xfs_buf_t               *bp,
571         xfs_buf_flags_t         flags)
572 {
573         int                     status;
574
575         ASSERT(!(flags & (XBF_DELWRI|XBF_WRITE)));
576         ASSERT(bp->b_bn != XFS_BUF_DADDR_NULL);
577
578         bp->b_flags &= ~(XBF_WRITE | XBF_ASYNC | XBF_DELWRI | \
579                         XBF_READ_AHEAD | _XBF_RUN_QUEUES);
580         bp->b_flags |= flags & (XBF_READ | XBF_ASYNC | \
581                         XBF_READ_AHEAD | _XBF_RUN_QUEUES);
582
583         status = xfs_buf_iorequest(bp);
584         if (status || XFS_BUF_ISERROR(bp) || (flags & XBF_ASYNC))
585                 return status;
586         return xfs_buf_iowait(bp);
587 }
588
589 xfs_buf_t *
590 xfs_buf_read(
591         xfs_buftarg_t           *target,
592         xfs_off_t               ioff,
593         size_t                  isize,
594         xfs_buf_flags_t         flags)
595 {
596         xfs_buf_t               *bp;
597
598         flags |= XBF_READ;
599
600         bp = xfs_buf_get(target, ioff, isize, flags);
601         if (bp) {
602                 trace_xfs_buf_read(bp, flags, _RET_IP_);
603
604                 if (!XFS_BUF_ISDONE(bp)) {
605                         XFS_STATS_INC(xb_get_read);
606                         _xfs_buf_read(bp, flags);
607                 } else if (flags & XBF_ASYNC) {
608                         /*
609                          * Read ahead call which is already satisfied,
610                          * drop the buffer
611                          */
612                         goto no_buffer;
613                 } else {
614                         /* We do not want read in the flags */
615                         bp->b_flags &= ~XBF_READ;
616                 }
617         }
618
619         return bp;
620
621  no_buffer:
622         if (flags & (XBF_LOCK | XBF_TRYLOCK))
623                 xfs_buf_unlock(bp);
624         xfs_buf_rele(bp);
625         return NULL;
626 }
627
628 /*
629  *      If we are not low on memory then do the readahead in a deadlock
630  *      safe manner.
631  */
632 void
633 xfs_buf_readahead(
634         xfs_buftarg_t           *target,
635         xfs_off_t               ioff,
636         size_t                  isize)
637 {
638         struct backing_dev_info *bdi;
639
640         bdi = target->bt_mapping->backing_dev_info;
641         if (bdi_read_congested(bdi))
642                 return;
643
644         xfs_buf_read(target, ioff, isize,
645                      XBF_TRYLOCK|XBF_ASYNC|XBF_READ_AHEAD|XBF_DONT_BLOCK);
646 }
647
648 /*
649  * Read an uncached buffer from disk. Allocates and returns a locked
650  * buffer containing the disk contents or nothing.
651  */
652 struct xfs_buf *
653 xfs_buf_read_uncached(
654         struct xfs_mount        *mp,
655         struct xfs_buftarg      *target,
656         xfs_daddr_t             daddr,
657         size_t                  length,
658         int                     flags)
659 {
660         xfs_buf_t               *bp;
661         int                     error;
662
663         bp = xfs_buf_get_uncached(target, length, flags);
664         if (!bp)
665                 return NULL;
666
667         /* set up the buffer for a read IO */
668         xfs_buf_lock(bp);
669         XFS_BUF_SET_ADDR(bp, daddr);
670         XFS_BUF_READ(bp);
671         XFS_BUF_BUSY(bp);
672
673         xfsbdstrat(mp, bp);
674         error = xfs_buf_iowait(bp);
675         if (error || bp->b_error) {
676                 xfs_buf_relse(bp);
677                 return NULL;
678         }
679         return bp;
680 }
681
682 xfs_buf_t *
683 xfs_buf_get_empty(
684         size_t                  len,
685         xfs_buftarg_t           *target)
686 {
687         xfs_buf_t               *bp;
688
689         bp = xfs_buf_allocate(0);
690         if (bp)
691                 _xfs_buf_initialize(bp, target, 0, len, 0);
692         return bp;
693 }
694
695 static inline struct page *
696 mem_to_page(
697         void                    *addr)
698 {
699         if ((!is_vmalloc_addr(addr))) {
700                 return virt_to_page(addr);
701         } else {
702                 return vmalloc_to_page(addr);
703         }
704 }
705
706 int
707 xfs_buf_associate_memory(
708         xfs_buf_t               *bp,
709         void                    *mem,
710         size_t                  len)
711 {
712         int                     rval;
713         int                     i = 0;
714         unsigned long           pageaddr;
715         unsigned long           offset;
716         size_t                  buflen;
717         int                     page_count;
718
719         pageaddr = (unsigned long)mem & PAGE_CACHE_MASK;
720         offset = (unsigned long)mem - pageaddr;
721         buflen = PAGE_CACHE_ALIGN(len + offset);
722         page_count = buflen >> PAGE_CACHE_SHIFT;
723
724         /* Free any previous set of page pointers */
725         if (bp->b_pages)
726                 _xfs_buf_free_pages(bp);
727
728         bp->b_pages = NULL;
729         bp->b_addr = mem;
730
731         rval = _xfs_buf_get_pages(bp, page_count, XBF_DONT_BLOCK);
732         if (rval)
733                 return rval;
734
735         bp->b_offset = offset;
736
737         for (i = 0; i < bp->b_page_count; i++) {
738                 bp->b_pages[i] = mem_to_page((void *)pageaddr);
739                 pageaddr += PAGE_CACHE_SIZE;
740         }
741
742         bp->b_count_desired = len;
743         bp->b_buffer_length = buflen;
744         bp->b_flags |= XBF_MAPPED;
745         bp->b_flags &= ~_XBF_PAGE_LOCKED;
746
747         return 0;
748 }
749
750 xfs_buf_t *
751 xfs_buf_get_uncached(
752         struct xfs_buftarg      *target,
753         size_t                  len,
754         int                     flags)
755 {
756         unsigned long           page_count = PAGE_ALIGN(len) >> PAGE_SHIFT;
757         int                     error, i;
758         xfs_buf_t               *bp;
759
760         bp = xfs_buf_allocate(0);
761         if (unlikely(bp == NULL))
762                 goto fail;
763         _xfs_buf_initialize(bp, target, 0, len, 0);
764
765         error = _xfs_buf_get_pages(bp, page_count, 0);
766         if (error)
767                 goto fail_free_buf;
768
769         for (i = 0; i < page_count; i++) {
770                 bp->b_pages[i] = alloc_page(xb_to_gfp(flags));
771                 if (!bp->b_pages[i])
772                         goto fail_free_mem;
773         }
774         bp->b_flags |= _XBF_PAGES;
775
776         error = _xfs_buf_map_pages(bp, XBF_MAPPED);
777         if (unlikely(error)) {
778                 printk(KERN_WARNING "%s: failed to map pages\n",
779                                 __func__);
780                 goto fail_free_mem;
781         }
782
783         xfs_buf_unlock(bp);
784
785         trace_xfs_buf_get_uncached(bp, _RET_IP_);
786         return bp;
787
788  fail_free_mem:
789         while (--i >= 0)
790                 __free_page(bp->b_pages[i]);
791         _xfs_buf_free_pages(bp);
792  fail_free_buf:
793         xfs_buf_deallocate(bp);
794  fail:
795         return NULL;
796 }
797
798 /*
799  *      Increment reference count on buffer, to hold the buffer concurrently
800  *      with another thread which may release (free) the buffer asynchronously.
801  *      Must hold the buffer already to call this function.
802  */
803 void
804 xfs_buf_hold(
805         xfs_buf_t               *bp)
806 {
807         trace_xfs_buf_hold(bp, _RET_IP_);
808         atomic_inc(&bp->b_hold);
809 }
810
811 /*
812  *      Releases a hold on the specified buffer.  If the
813  *      the hold count is 1, calls xfs_buf_free.
814  */
815 void
816 xfs_buf_rele(
817         xfs_buf_t               *bp)
818 {
819         struct xfs_perag        *pag = bp->b_pag;
820
821         trace_xfs_buf_rele(bp, _RET_IP_);
822
823         if (!pag) {
824                 ASSERT(!bp->b_relse);
825                 ASSERT(RB_EMPTY_NODE(&bp->b_rbnode));
826                 if (atomic_dec_and_test(&bp->b_hold))
827                         xfs_buf_free(bp);
828                 return;
829         }
830
831         ASSERT(!RB_EMPTY_NODE(&bp->b_rbnode));
832         ASSERT(atomic_read(&bp->b_hold) > 0);
833         if (atomic_dec_and_lock(&bp->b_hold, &pag->pag_buf_lock)) {
834                 if (bp->b_relse) {
835                         atomic_inc(&bp->b_hold);
836                         spin_unlock(&pag->pag_buf_lock);
837                         bp->b_relse(bp);
838                 } else {
839                         ASSERT(!(bp->b_flags & (XBF_DELWRI|_XBF_DELWRI_Q)));
840                         rb_erase(&bp->b_rbnode, &pag->pag_buf_tree);
841                         spin_unlock(&pag->pag_buf_lock);
842                         xfs_perag_put(pag);
843                         xfs_buf_free(bp);
844                 }
845         }
846 }
847
848
849 /*
850  *      Mutual exclusion on buffers.  Locking model:
851  *
852  *      Buffers associated with inodes for which buffer locking
853  *      is not enabled are not protected by semaphores, and are
854  *      assumed to be exclusively owned by the caller.  There is a
855  *      spinlock in the buffer, used by the caller when concurrent
856  *      access is possible.
857  */
858
859 /*
860  *      Locks a buffer object, if it is not already locked.  Note that this in
861  *      no way locks the underlying pages, so it is only useful for
862  *      synchronizing concurrent use of buffer objects, not for synchronizing
863  *      independent access to the underlying pages.
864  *
865  *      If we come across a stale, pinned, locked buffer, we know that we are
866  *      being asked to lock a buffer that has been reallocated. Because it is
867  *      pinned, we know that the log has not been pushed to disk and hence it
868  *      will still be locked.  Rather than continuing to have trylock attempts
869  *      fail until someone else pushes the log, push it ourselves before
870  *      returning.  This means that the xfsaild will not get stuck trying
871  *      to push on stale inode buffers.
872  */
873 int
874 xfs_buf_cond_lock(
875         xfs_buf_t               *bp)
876 {
877         int                     locked;
878
879         locked = down_trylock(&bp->b_sema) == 0;
880         if (locked)
881                 XB_SET_OWNER(bp);
882         else if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE))
883                 xfs_log_force(bp->b_target->bt_mount, 0);
884
885         trace_xfs_buf_cond_lock(bp, _RET_IP_);
886         return locked ? 0 : -EBUSY;
887 }
888
889 int
890 xfs_buf_lock_value(
891         xfs_buf_t               *bp)
892 {
893         return bp->b_sema.count;
894 }
895
896 /*
897  *      Locks a buffer object.
898  *      Note that this in no way locks the underlying pages, so it is only
899  *      useful for synchronizing concurrent use of buffer objects, not for
900  *      synchronizing independent access to the underlying pages.
901  *
902  *      If we come across a stale, pinned, locked buffer, we know that we
903  *      are being asked to lock a buffer that has been reallocated. Because
904  *      it is pinned, we know that the log has not been pushed to disk and
905  *      hence it will still be locked. Rather than sleeping until someone
906  *      else pushes the log, push it ourselves before trying to get the lock.
907  */
908 void
909 xfs_buf_lock(
910         xfs_buf_t               *bp)
911 {
912         trace_xfs_buf_lock(bp, _RET_IP_);
913
914         if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE))
915                 xfs_log_force(bp->b_target->bt_mount, 0);
916         if (atomic_read(&bp->b_io_remaining))
917                 blk_run_address_space(bp->b_target->bt_mapping);
918         down(&bp->b_sema);
919         XB_SET_OWNER(bp);
920
921         trace_xfs_buf_lock_done(bp, _RET_IP_);
922 }
923
924 /*
925  *      Releases the lock on the buffer object.
926  *      If the buffer is marked delwri but is not queued, do so before we
927  *      unlock the buffer as we need to set flags correctly.  We also need to
928  *      take a reference for the delwri queue because the unlocker is going to
929  *      drop their's and they don't know we just queued it.
930  */
931 void
932 xfs_buf_unlock(
933         xfs_buf_t               *bp)
934 {
935         if ((bp->b_flags & (XBF_DELWRI|_XBF_DELWRI_Q)) == XBF_DELWRI) {
936                 atomic_inc(&bp->b_hold);
937                 bp->b_flags |= XBF_ASYNC;
938                 xfs_buf_delwri_queue(bp, 0);
939         }
940
941         XB_CLEAR_OWNER(bp);
942         up(&bp->b_sema);
943
944         trace_xfs_buf_unlock(bp, _RET_IP_);
945 }
946
947 STATIC void
948 xfs_buf_wait_unpin(
949         xfs_buf_t               *bp)
950 {
951         DECLARE_WAITQUEUE       (wait, current);
952
953         if (atomic_read(&bp->b_pin_count) == 0)
954                 return;
955
956         add_wait_queue(&bp->b_waiters, &wait);
957         for (;;) {
958                 set_current_state(TASK_UNINTERRUPTIBLE);
959                 if (atomic_read(&bp->b_pin_count) == 0)
960                         break;
961                 if (atomic_read(&bp->b_io_remaining))
962                         blk_run_address_space(bp->b_target->bt_mapping);
963                 schedule();
964         }
965         remove_wait_queue(&bp->b_waiters, &wait);
966         set_current_state(TASK_RUNNING);
967 }
968
969 /*
970  *      Buffer Utility Routines
971  */
972
973 STATIC void
974 xfs_buf_iodone_work(
975         struct work_struct      *work)
976 {
977         xfs_buf_t               *bp =
978                 container_of(work, xfs_buf_t, b_iodone_work);
979
980         if (bp->b_iodone)
981                 (*(bp->b_iodone))(bp);
982         else if (bp->b_flags & XBF_ASYNC)
983                 xfs_buf_relse(bp);
984 }
985
986 void
987 xfs_buf_ioend(
988         xfs_buf_t               *bp,
989         int                     schedule)
990 {
991         trace_xfs_buf_iodone(bp, _RET_IP_);
992
993         bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD);
994         if (bp->b_error == 0)
995                 bp->b_flags |= XBF_DONE;
996
997         if ((bp->b_iodone) || (bp->b_flags & XBF_ASYNC)) {
998                 if (schedule) {
999                         INIT_WORK(&bp->b_iodone_work, xfs_buf_iodone_work);
1000                         queue_work(xfslogd_workqueue, &bp->b_iodone_work);
1001                 } else {
1002                         xfs_buf_iodone_work(&bp->b_iodone_work);
1003                 }
1004         } else {
1005                 complete(&bp->b_iowait);
1006         }
1007 }
1008
1009 void
1010 xfs_buf_ioerror(
1011         xfs_buf_t               *bp,
1012         int                     error)
1013 {
1014         ASSERT(error >= 0 && error <= 0xffff);
1015         bp->b_error = (unsigned short)error;
1016         trace_xfs_buf_ioerror(bp, error, _RET_IP_);
1017 }
1018
1019 int
1020 xfs_bwrite(
1021         struct xfs_mount        *mp,
1022         struct xfs_buf          *bp)
1023 {
1024         int                     error;
1025
1026         bp->b_flags |= XBF_WRITE;
1027         bp->b_flags &= ~(XBF_ASYNC | XBF_READ);
1028
1029         xfs_buf_delwri_dequeue(bp);
1030         xfs_bdstrat_cb(bp);
1031
1032         error = xfs_buf_iowait(bp);
1033         if (error)
1034                 xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
1035         xfs_buf_relse(bp);
1036         return error;
1037 }
1038
1039 void
1040 xfs_bdwrite(
1041         void                    *mp,
1042         struct xfs_buf          *bp)
1043 {
1044         trace_xfs_buf_bdwrite(bp, _RET_IP_);
1045
1046         bp->b_flags &= ~XBF_READ;
1047         bp->b_flags |= (XBF_DELWRI | XBF_ASYNC);
1048
1049         xfs_buf_delwri_queue(bp, 1);
1050 }
1051
1052 /*
1053  * Called when we want to stop a buffer from getting written or read.
1054  * We attach the EIO error, muck with its flags, and call xfs_buf_ioend
1055  * so that the proper iodone callbacks get called.
1056  */
1057 STATIC int
1058 xfs_bioerror(
1059         xfs_buf_t *bp)
1060 {
1061 #ifdef XFSERRORDEBUG
1062         ASSERT(XFS_BUF_ISREAD(bp) || bp->b_iodone);
1063 #endif
1064
1065         /*
1066          * No need to wait until the buffer is unpinned, we aren't flushing it.
1067          */
1068         XFS_BUF_ERROR(bp, EIO);
1069
1070         /*
1071          * We're calling xfs_buf_ioend, so delete XBF_DONE flag.
1072          */
1073         XFS_BUF_UNREAD(bp);
1074         XFS_BUF_UNDELAYWRITE(bp);
1075         XFS_BUF_UNDONE(bp);
1076         XFS_BUF_STALE(bp);
1077
1078         xfs_buf_ioend(bp, 0);
1079
1080         return EIO;
1081 }
1082
1083 /*
1084  * Same as xfs_bioerror, except that we are releasing the buffer
1085  * here ourselves, and avoiding the xfs_buf_ioend call.
1086  * This is meant for userdata errors; metadata bufs come with
1087  * iodone functions attached, so that we can track down errors.
1088  */
1089 STATIC int
1090 xfs_bioerror_relse(
1091         struct xfs_buf  *bp)
1092 {
1093         int64_t         fl = XFS_BUF_BFLAGS(bp);
1094         /*
1095          * No need to wait until the buffer is unpinned.
1096          * We aren't flushing it.
1097          *
1098          * chunkhold expects B_DONE to be set, whether
1099          * we actually finish the I/O or not. We don't want to
1100          * change that interface.
1101          */
1102         XFS_BUF_UNREAD(bp);
1103         XFS_BUF_UNDELAYWRITE(bp);
1104         XFS_BUF_DONE(bp);
1105         XFS_BUF_STALE(bp);
1106         XFS_BUF_CLR_IODONE_FUNC(bp);
1107         if (!(fl & XBF_ASYNC)) {
1108                 /*
1109                  * Mark b_error and B_ERROR _both_.
1110                  * Lot's of chunkcache code assumes that.
1111                  * There's no reason to mark error for
1112                  * ASYNC buffers.
1113                  */
1114                 XFS_BUF_ERROR(bp, EIO);
1115                 XFS_BUF_FINISH_IOWAIT(bp);
1116         } else {
1117                 xfs_buf_relse(bp);
1118         }
1119
1120         return EIO;
1121 }
1122
1123
1124 /*
1125  * All xfs metadata buffers except log state machine buffers
1126  * get this attached as their b_bdstrat callback function.
1127  * This is so that we can catch a buffer
1128  * after prematurely unpinning it to forcibly shutdown the filesystem.
1129  */
1130 int
1131 xfs_bdstrat_cb(
1132         struct xfs_buf  *bp)
1133 {
1134         if (XFS_FORCED_SHUTDOWN(bp->b_target->bt_mount)) {
1135                 trace_xfs_bdstrat_shut(bp, _RET_IP_);
1136                 /*
1137                  * Metadata write that didn't get logged but
1138                  * written delayed anyway. These aren't associated
1139                  * with a transaction, and can be ignored.
1140                  */
1141                 if (!bp->b_iodone && !XFS_BUF_ISREAD(bp))
1142                         return xfs_bioerror_relse(bp);
1143                 else
1144                         return xfs_bioerror(bp);
1145         }
1146
1147         xfs_buf_iorequest(bp);
1148         return 0;
1149 }
1150
1151 /*
1152  * Wrapper around bdstrat so that we can stop data from going to disk in case
1153  * we are shutting down the filesystem.  Typically user data goes thru this
1154  * path; one of the exceptions is the superblock.
1155  */
1156 void
1157 xfsbdstrat(
1158         struct xfs_mount        *mp,
1159         struct xfs_buf          *bp)
1160 {
1161         if (XFS_FORCED_SHUTDOWN(mp)) {
1162                 trace_xfs_bdstrat_shut(bp, _RET_IP_);
1163                 xfs_bioerror_relse(bp);
1164                 return;
1165         }
1166
1167         xfs_buf_iorequest(bp);
1168 }
1169
1170 STATIC void
1171 _xfs_buf_ioend(
1172         xfs_buf_t               *bp,
1173         int                     schedule)
1174 {
1175         if (atomic_dec_and_test(&bp->b_io_remaining) == 1) {
1176                 bp->b_flags &= ~_XBF_PAGE_LOCKED;
1177                 xfs_buf_ioend(bp, schedule);
1178         }
1179 }
1180
1181 STATIC void
1182 xfs_buf_bio_end_io(
1183         struct bio              *bio,
1184         int                     error)
1185 {
1186         xfs_buf_t               *bp = (xfs_buf_t *)bio->bi_private;
1187         unsigned int            blocksize = bp->b_target->bt_bsize;
1188         struct bio_vec          *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1189
1190         xfs_buf_ioerror(bp, -error);
1191
1192         if (!error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ))
1193                 invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp));
1194
1195         do {
1196                 struct page     *page = bvec->bv_page;
1197
1198                 ASSERT(!PagePrivate(page));
1199                 if (unlikely(bp->b_error)) {
1200                         if (bp->b_flags & XBF_READ)
1201                                 ClearPageUptodate(page);
1202                 } else if (blocksize >= PAGE_CACHE_SIZE) {
1203                         SetPageUptodate(page);
1204                 } else if (!PagePrivate(page) &&
1205                                 (bp->b_flags & _XBF_PAGE_CACHE)) {
1206                         set_page_region(page, bvec->bv_offset, bvec->bv_len);
1207                 }
1208
1209                 if (--bvec >= bio->bi_io_vec)
1210                         prefetchw(&bvec->bv_page->flags);
1211
1212                 if (bp->b_flags & _XBF_PAGE_LOCKED)
1213                         unlock_page(page);
1214         } while (bvec >= bio->bi_io_vec);
1215
1216         _xfs_buf_ioend(bp, 1);
1217         bio_put(bio);
1218 }
1219
1220 STATIC void
1221 _xfs_buf_ioapply(
1222         xfs_buf_t               *bp)
1223 {
1224         int                     rw, map_i, total_nr_pages, nr_pages;
1225         struct bio              *bio;
1226         int                     offset = bp->b_offset;
1227         int                     size = bp->b_count_desired;
1228         sector_t                sector = bp->b_bn;
1229         unsigned int            blocksize = bp->b_target->bt_bsize;
1230
1231         total_nr_pages = bp->b_page_count;
1232         map_i = 0;
1233
1234         if (bp->b_flags & XBF_ORDERED) {
1235                 ASSERT(!(bp->b_flags & XBF_READ));
1236                 rw = WRITE_FLUSH_FUA;
1237         } else if (bp->b_flags & XBF_LOG_BUFFER) {
1238                 ASSERT(!(bp->b_flags & XBF_READ_AHEAD));
1239                 bp->b_flags &= ~_XBF_RUN_QUEUES;
1240                 rw = (bp->b_flags & XBF_WRITE) ? WRITE_SYNC : READ_SYNC;
1241         } else if (bp->b_flags & _XBF_RUN_QUEUES) {
1242                 ASSERT(!(bp->b_flags & XBF_READ_AHEAD));
1243                 bp->b_flags &= ~_XBF_RUN_QUEUES;
1244                 rw = (bp->b_flags & XBF_WRITE) ? WRITE_META : READ_META;
1245         } else {
1246                 rw = (bp->b_flags & XBF_WRITE) ? WRITE :
1247                      (bp->b_flags & XBF_READ_AHEAD) ? READA : READ;
1248         }
1249
1250         /* Special code path for reading a sub page size buffer in --
1251          * we populate up the whole page, and hence the other metadata
1252          * in the same page.  This optimization is only valid when the
1253          * filesystem block size is not smaller than the page size.
1254          */
1255         if ((bp->b_buffer_length < PAGE_CACHE_SIZE) &&
1256             ((bp->b_flags & (XBF_READ|_XBF_PAGE_LOCKED)) ==
1257               (XBF_READ|_XBF_PAGE_LOCKED)) &&
1258             (blocksize >= PAGE_CACHE_SIZE)) {
1259                 bio = bio_alloc(GFP_NOIO, 1);
1260
1261                 bio->bi_bdev = bp->b_target->bt_bdev;
1262                 bio->bi_sector = sector - (offset >> BBSHIFT);
1263                 bio->bi_end_io = xfs_buf_bio_end_io;
1264                 bio->bi_private = bp;
1265
1266                 bio_add_page(bio, bp->b_pages[0], PAGE_CACHE_SIZE, 0);
1267                 size = 0;
1268
1269                 atomic_inc(&bp->b_io_remaining);
1270
1271                 goto submit_io;
1272         }
1273
1274 next_chunk:
1275         atomic_inc(&bp->b_io_remaining);
1276         nr_pages = BIO_MAX_SECTORS >> (PAGE_SHIFT - BBSHIFT);
1277         if (nr_pages > total_nr_pages)
1278                 nr_pages = total_nr_pages;
1279
1280         bio = bio_alloc(GFP_NOIO, nr_pages);
1281         bio->bi_bdev = bp->b_target->bt_bdev;
1282         bio->bi_sector = sector;
1283         bio->bi_end_io = xfs_buf_bio_end_io;
1284         bio->bi_private = bp;
1285
1286         for (; size && nr_pages; nr_pages--, map_i++) {
1287                 int     rbytes, nbytes = PAGE_CACHE_SIZE - offset;
1288
1289                 if (nbytes > size)
1290                         nbytes = size;
1291
1292                 rbytes = bio_add_page(bio, bp->b_pages[map_i], nbytes, offset);
1293                 if (rbytes < nbytes)
1294                         break;
1295
1296                 offset = 0;
1297                 sector += nbytes >> BBSHIFT;
1298                 size -= nbytes;
1299                 total_nr_pages--;
1300         }
1301
1302 submit_io:
1303         if (likely(bio->bi_size)) {
1304                 if (xfs_buf_is_vmapped(bp)) {
1305                         flush_kernel_vmap_range(bp->b_addr,
1306                                                 xfs_buf_vmap_len(bp));
1307                 }
1308                 submit_bio(rw, bio);
1309                 if (size)
1310                         goto next_chunk;
1311         } else {
1312                 /*
1313                  * if we get here, no pages were added to the bio. However,
1314                  * we can't just error out here - if the pages are locked then
1315                  * we have to unlock them otherwise we can hang on a later
1316                  * access to the page.
1317                  */
1318                 xfs_buf_ioerror(bp, EIO);
1319                 if (bp->b_flags & _XBF_PAGE_LOCKED) {
1320                         int i;
1321                         for (i = 0; i < bp->b_page_count; i++)
1322                                 unlock_page(bp->b_pages[i]);
1323                 }
1324                 bio_put(bio);
1325         }
1326 }
1327
1328 int
1329 xfs_buf_iorequest(
1330         xfs_buf_t               *bp)
1331 {
1332         trace_xfs_buf_iorequest(bp, _RET_IP_);
1333
1334         if (bp->b_flags & XBF_DELWRI) {
1335                 xfs_buf_delwri_queue(bp, 1);
1336                 return 0;
1337         }
1338
1339         if (bp->b_flags & XBF_WRITE) {
1340                 xfs_buf_wait_unpin(bp);
1341         }
1342
1343         xfs_buf_hold(bp);
1344
1345         /* Set the count to 1 initially, this will stop an I/O
1346          * completion callout which happens before we have started
1347          * all the I/O from calling xfs_buf_ioend too early.
1348          */
1349         atomic_set(&bp->b_io_remaining, 1);
1350         _xfs_buf_ioapply(bp);
1351         _xfs_buf_ioend(bp, 0);
1352
1353         xfs_buf_rele(bp);
1354         return 0;
1355 }
1356
1357 /*
1358  *      Waits for I/O to complete on the buffer supplied.
1359  *      It returns immediately if no I/O is pending.
1360  *      It returns the I/O error code, if any, or 0 if there was no error.
1361  */
1362 int
1363 xfs_buf_iowait(
1364         xfs_buf_t               *bp)
1365 {
1366         trace_xfs_buf_iowait(bp, _RET_IP_);
1367
1368         if (atomic_read(&bp->b_io_remaining))
1369                 blk_run_address_space(bp->b_target->bt_mapping);
1370         wait_for_completion(&bp->b_iowait);
1371
1372         trace_xfs_buf_iowait_done(bp, _RET_IP_);
1373         return bp->b_error;
1374 }
1375
1376 xfs_caddr_t
1377 xfs_buf_offset(
1378         xfs_buf_t               *bp,
1379         size_t                  offset)
1380 {
1381         struct page             *page;
1382
1383         if (bp->b_flags & XBF_MAPPED)
1384                 return XFS_BUF_PTR(bp) + offset;
1385
1386         offset += bp->b_offset;
1387         page = bp->b_pages[offset >> PAGE_CACHE_SHIFT];
1388         return (xfs_caddr_t)page_address(page) + (offset & (PAGE_CACHE_SIZE-1));
1389 }
1390
1391 /*
1392  *      Move data into or out of a buffer.
1393  */
1394 void
1395 xfs_buf_iomove(
1396         xfs_buf_t               *bp,    /* buffer to process            */
1397         size_t                  boff,   /* starting buffer offset       */
1398         size_t                  bsize,  /* length to copy               */
1399         void                    *data,  /* data address                 */
1400         xfs_buf_rw_t            mode)   /* read/write/zero flag         */
1401 {
1402         size_t                  bend, cpoff, csize;
1403         struct page             *page;
1404
1405         bend = boff + bsize;
1406         while (boff < bend) {
1407                 page = bp->b_pages[xfs_buf_btoct(boff + bp->b_offset)];
1408                 cpoff = xfs_buf_poff(boff + bp->b_offset);
1409                 csize = min_t(size_t,
1410                               PAGE_CACHE_SIZE-cpoff, bp->b_count_desired-boff);
1411
1412                 ASSERT(((csize + cpoff) <= PAGE_CACHE_SIZE));
1413
1414                 switch (mode) {
1415                 case XBRW_ZERO:
1416                         memset(page_address(page) + cpoff, 0, csize);
1417                         break;
1418                 case XBRW_READ:
1419                         memcpy(data, page_address(page) + cpoff, csize);
1420                         break;
1421                 case XBRW_WRITE:
1422                         memcpy(page_address(page) + cpoff, data, csize);
1423                 }
1424
1425                 boff += csize;
1426                 data += csize;
1427         }
1428 }
1429
1430 /*
1431  *      Handling of buffer targets (buftargs).
1432  */
1433
1434 /*
1435  *      Wait for any bufs with callbacks that have been submitted but
1436  *      have not yet returned... walk the hash list for the target.
1437  */
1438 void
1439 xfs_wait_buftarg(
1440         struct xfs_buftarg      *btp)
1441 {
1442         struct xfs_perag        *pag;
1443         uint                    i;
1444
1445         for (i = 0; i < btp->bt_mount->m_sb.sb_agcount; i++) {
1446                 pag = xfs_perag_get(btp->bt_mount, i);
1447                 spin_lock(&pag->pag_buf_lock);
1448                 while (rb_first(&pag->pag_buf_tree)) {
1449                         spin_unlock(&pag->pag_buf_lock);
1450                         delay(100);
1451                         spin_lock(&pag->pag_buf_lock);
1452                 }
1453                 spin_unlock(&pag->pag_buf_lock);
1454                 xfs_perag_put(pag);
1455         }
1456 }
1457
1458 int
1459 xfs_buftarg_shrink(
1460         struct shrinker         *shrink,
1461         int                     nr_to_scan,
1462         gfp_t                   mask)
1463 {
1464         struct xfs_buftarg      *btp = container_of(shrink,
1465                                         struct xfs_buftarg, bt_shrinker);
1466         if (nr_to_scan) {
1467                 if (test_bit(XBT_FORCE_SLEEP, &btp->bt_flags))
1468                         return -1;
1469                 if (list_empty(&btp->bt_delwrite_queue))
1470                         return -1;
1471                 set_bit(XBT_FORCE_FLUSH, &btp->bt_flags);
1472                 wake_up_process(btp->bt_task);
1473         }
1474         return list_empty(&btp->bt_delwrite_queue) ? -1 : 1;
1475 }
1476
1477 void
1478 xfs_free_buftarg(
1479         struct xfs_mount        *mp,
1480         struct xfs_buftarg      *btp)
1481 {
1482         unregister_shrinker(&btp->bt_shrinker);
1483
1484         xfs_flush_buftarg(btp, 1);
1485         if (mp->m_flags & XFS_MOUNT_BARRIER)
1486                 xfs_blkdev_issue_flush(btp);
1487         iput(btp->bt_mapping->host);
1488
1489         kthread_stop(btp->bt_task);
1490         kmem_free(btp);
1491 }
1492
1493 STATIC int
1494 xfs_setsize_buftarg_flags(
1495         xfs_buftarg_t           *btp,
1496         unsigned int            blocksize,
1497         unsigned int            sectorsize,
1498         int                     verbose)
1499 {
1500         btp->bt_bsize = blocksize;
1501         btp->bt_sshift = ffs(sectorsize) - 1;
1502         btp->bt_smask = sectorsize - 1;
1503
1504         if (set_blocksize(btp->bt_bdev, sectorsize)) {
1505                 printk(KERN_WARNING
1506                         "XFS: Cannot set_blocksize to %u on device %s\n",
1507                         sectorsize, XFS_BUFTARG_NAME(btp));
1508                 return EINVAL;
1509         }
1510
1511         if (verbose &&
1512             (PAGE_CACHE_SIZE / BITS_PER_LONG) > sectorsize) {
1513                 printk(KERN_WARNING
1514                         "XFS: %u byte sectors in use on device %s.  "
1515                         "This is suboptimal; %u or greater is ideal.\n",
1516                         sectorsize, XFS_BUFTARG_NAME(btp),
1517                         (unsigned int)PAGE_CACHE_SIZE / BITS_PER_LONG);
1518         }
1519
1520         return 0;
1521 }
1522
1523 /*
1524  *      When allocating the initial buffer target we have not yet
1525  *      read in the superblock, so don't know what sized sectors
1526  *      are being used is at this early stage.  Play safe.
1527  */
1528 STATIC int
1529 xfs_setsize_buftarg_early(
1530         xfs_buftarg_t           *btp,
1531         struct block_device     *bdev)
1532 {
1533         return xfs_setsize_buftarg_flags(btp,
1534                         PAGE_CACHE_SIZE, bdev_logical_block_size(bdev), 0);
1535 }
1536
1537 int
1538 xfs_setsize_buftarg(
1539         xfs_buftarg_t           *btp,
1540         unsigned int            blocksize,
1541         unsigned int            sectorsize)
1542 {
1543         return xfs_setsize_buftarg_flags(btp, blocksize, sectorsize, 1);
1544 }
1545
1546 STATIC int
1547 xfs_mapping_buftarg(
1548         xfs_buftarg_t           *btp,
1549         struct block_device     *bdev)
1550 {
1551         struct backing_dev_info *bdi;
1552         struct inode            *inode;
1553         struct address_space    *mapping;
1554         static const struct address_space_operations mapping_aops = {
1555                 .sync_page = block_sync_page,
1556                 .migratepage = fail_migrate_page,
1557         };
1558
1559         inode = new_inode(bdev->bd_inode->i_sb);
1560         if (!inode) {
1561                 printk(KERN_WARNING
1562                         "XFS: Cannot allocate mapping inode for device %s\n",
1563                         XFS_BUFTARG_NAME(btp));
1564                 return ENOMEM;
1565         }
1566         inode->i_ino = get_next_ino();
1567         inode->i_mode = S_IFBLK;
1568         inode->i_bdev = bdev;
1569         inode->i_rdev = bdev->bd_dev;
1570         bdi = blk_get_backing_dev_info(bdev);
1571         if (!bdi)
1572                 bdi = &default_backing_dev_info;
1573         mapping = &inode->i_data;
1574         mapping->a_ops = &mapping_aops;
1575         mapping->backing_dev_info = bdi;
1576         mapping_set_gfp_mask(mapping, GFP_NOFS);
1577         btp->bt_mapping = mapping;
1578         return 0;
1579 }
1580
1581 STATIC int
1582 xfs_alloc_delwrite_queue(
1583         xfs_buftarg_t           *btp,
1584         const char              *fsname)
1585 {
1586         INIT_LIST_HEAD(&btp->bt_delwrite_queue);
1587         spin_lock_init(&btp->bt_delwrite_lock);
1588         btp->bt_flags = 0;
1589         btp->bt_task = kthread_run(xfsbufd, btp, "xfsbufd/%s", fsname);
1590         if (IS_ERR(btp->bt_task))
1591                 return PTR_ERR(btp->bt_task);
1592         return 0;
1593 }
1594
1595 xfs_buftarg_t *
1596 xfs_alloc_buftarg(
1597         struct xfs_mount        *mp,
1598         struct block_device     *bdev,
1599         int                     external,
1600         const char              *fsname)
1601 {
1602         xfs_buftarg_t           *btp;
1603
1604         btp = kmem_zalloc(sizeof(*btp), KM_SLEEP);
1605
1606         btp->bt_mount = mp;
1607         btp->bt_dev =  bdev->bd_dev;
1608         btp->bt_bdev = bdev;
1609         if (xfs_setsize_buftarg_early(btp, bdev))
1610                 goto error;
1611         if (xfs_mapping_buftarg(btp, bdev))
1612                 goto error;
1613         if (xfs_alloc_delwrite_queue(btp, fsname))
1614                 goto error;
1615         btp->bt_shrinker.shrink = xfs_buftarg_shrink;
1616         btp->bt_shrinker.seeks = DEFAULT_SEEKS;
1617         register_shrinker(&btp->bt_shrinker);
1618         return btp;
1619
1620 error:
1621         kmem_free(btp);
1622         return NULL;
1623 }
1624
1625
1626 /*
1627  *      Delayed write buffer handling
1628  */
1629 STATIC void
1630 xfs_buf_delwri_queue(
1631         xfs_buf_t               *bp,
1632         int                     unlock)
1633 {
1634         struct list_head        *dwq = &bp->b_target->bt_delwrite_queue;
1635         spinlock_t              *dwlk = &bp->b_target->bt_delwrite_lock;
1636
1637         trace_xfs_buf_delwri_queue(bp, _RET_IP_);
1638
1639         ASSERT((bp->b_flags&(XBF_DELWRI|XBF_ASYNC)) == (XBF_DELWRI|XBF_ASYNC));
1640
1641         spin_lock(dwlk);
1642         /* If already in the queue, dequeue and place at tail */
1643         if (!list_empty(&bp->b_list)) {
1644                 ASSERT(bp->b_flags & _XBF_DELWRI_Q);
1645                 if (unlock)
1646                         atomic_dec(&bp->b_hold);
1647                 list_del(&bp->b_list);
1648         }
1649
1650         if (list_empty(dwq)) {
1651                 /* start xfsbufd as it is about to have something to do */
1652                 wake_up_process(bp->b_target->bt_task);
1653         }
1654
1655         bp->b_flags |= _XBF_DELWRI_Q;
1656         list_add_tail(&bp->b_list, dwq);
1657         bp->b_queuetime = jiffies;
1658         spin_unlock(dwlk);
1659
1660         if (unlock)
1661                 xfs_buf_unlock(bp);
1662 }
1663
1664 void
1665 xfs_buf_delwri_dequeue(
1666         xfs_buf_t               *bp)
1667 {
1668         spinlock_t              *dwlk = &bp->b_target->bt_delwrite_lock;
1669         int                     dequeued = 0;
1670
1671         spin_lock(dwlk);
1672         if ((bp->b_flags & XBF_DELWRI) && !list_empty(&bp->b_list)) {
1673                 ASSERT(bp->b_flags & _XBF_DELWRI_Q);
1674                 list_del_init(&bp->b_list);
1675                 dequeued = 1;
1676         }
1677         bp->b_flags &= ~(XBF_DELWRI|_XBF_DELWRI_Q);
1678         spin_unlock(dwlk);
1679
1680         if (dequeued)
1681                 xfs_buf_rele(bp);
1682
1683         trace_xfs_buf_delwri_dequeue(bp, _RET_IP_);
1684 }
1685
1686 /*
1687  * If a delwri buffer needs to be pushed before it has aged out, then promote
1688  * it to the head of the delwri queue so that it will be flushed on the next
1689  * xfsbufd run. We do this by resetting the queuetime of the buffer to be older
1690  * than the age currently needed to flush the buffer. Hence the next time the
1691  * xfsbufd sees it is guaranteed to be considered old enough to flush.
1692  */
1693 void
1694 xfs_buf_delwri_promote(
1695         struct xfs_buf  *bp)
1696 {
1697         struct xfs_buftarg *btp = bp->b_target;
1698         long            age = xfs_buf_age_centisecs * msecs_to_jiffies(10) + 1;
1699
1700         ASSERT(bp->b_flags & XBF_DELWRI);
1701         ASSERT(bp->b_flags & _XBF_DELWRI_Q);
1702
1703         /*
1704          * Check the buffer age before locking the delayed write queue as we
1705          * don't need to promote buffers that are already past the flush age.
1706          */
1707         if (bp->b_queuetime < jiffies - age)
1708                 return;
1709         bp->b_queuetime = jiffies - age;
1710         spin_lock(&btp->bt_delwrite_lock);
1711         list_move(&bp->b_list, &btp->bt_delwrite_queue);
1712         spin_unlock(&btp->bt_delwrite_lock);
1713 }
1714
1715 STATIC void
1716 xfs_buf_runall_queues(
1717         struct workqueue_struct *queue)
1718 {
1719         flush_workqueue(queue);
1720 }
1721
1722 /*
1723  * Move as many buffers as specified to the supplied list
1724  * idicating if we skipped any buffers to prevent deadlocks.
1725  */
1726 STATIC int
1727 xfs_buf_delwri_split(
1728         xfs_buftarg_t   *target,
1729         struct list_head *list,
1730         unsigned long   age)
1731 {
1732         xfs_buf_t       *bp, *n;
1733         struct list_head *dwq = &target->bt_delwrite_queue;
1734         spinlock_t      *dwlk = &target->bt_delwrite_lock;
1735         int             skipped = 0;
1736         int             force;
1737
1738         force = test_and_clear_bit(XBT_FORCE_FLUSH, &target->bt_flags);
1739         INIT_LIST_HEAD(list);
1740         spin_lock(dwlk);
1741         list_for_each_entry_safe(bp, n, dwq, b_list) {
1742                 ASSERT(bp->b_flags & XBF_DELWRI);
1743
1744                 if (!XFS_BUF_ISPINNED(bp) && !xfs_buf_cond_lock(bp)) {
1745                         if (!force &&
1746                             time_before(jiffies, bp->b_queuetime + age)) {
1747                                 xfs_buf_unlock(bp);
1748                                 break;
1749                         }
1750
1751                         bp->b_flags &= ~(XBF_DELWRI|_XBF_DELWRI_Q|
1752                                          _XBF_RUN_QUEUES);
1753                         bp->b_flags |= XBF_WRITE;
1754                         list_move_tail(&bp->b_list, list);
1755                         trace_xfs_buf_delwri_split(bp, _RET_IP_);
1756                 } else
1757                         skipped++;
1758         }
1759         spin_unlock(dwlk);
1760
1761         return skipped;
1762
1763 }
1764
1765 /*
1766  * Compare function is more complex than it needs to be because
1767  * the return value is only 32 bits and we are doing comparisons
1768  * on 64 bit values
1769  */
1770 static int
1771 xfs_buf_cmp(
1772         void            *priv,
1773         struct list_head *a,
1774         struct list_head *b)
1775 {
1776         struct xfs_buf  *ap = container_of(a, struct xfs_buf, b_list);
1777         struct xfs_buf  *bp = container_of(b, struct xfs_buf, b_list);
1778         xfs_daddr_t             diff;
1779
1780         diff = ap->b_bn - bp->b_bn;
1781         if (diff < 0)
1782                 return -1;
1783         if (diff > 0)
1784                 return 1;
1785         return 0;
1786 }
1787
1788 void
1789 xfs_buf_delwri_sort(
1790         xfs_buftarg_t   *target,
1791         struct list_head *list)
1792 {
1793         list_sort(NULL, list, xfs_buf_cmp);
1794 }
1795
1796 STATIC int
1797 xfsbufd(
1798         void            *data)
1799 {
1800         xfs_buftarg_t   *target = (xfs_buftarg_t *)data;
1801
1802         current->flags |= PF_MEMALLOC;
1803
1804         set_freezable();
1805
1806         do {
1807                 long    age = xfs_buf_age_centisecs * msecs_to_jiffies(10);
1808                 long    tout = xfs_buf_timer_centisecs * msecs_to_jiffies(10);
1809                 int     count = 0;
1810                 struct list_head tmp;
1811
1812                 if (unlikely(freezing(current))) {
1813                         set_bit(XBT_FORCE_SLEEP, &target->bt_flags);
1814                         refrigerator();
1815                 } else {
1816                         clear_bit(XBT_FORCE_SLEEP, &target->bt_flags);
1817                 }
1818
1819                 /* sleep for a long time if there is nothing to do. */
1820                 if (list_empty(&target->bt_delwrite_queue))
1821                         tout = MAX_SCHEDULE_TIMEOUT;
1822                 schedule_timeout_interruptible(tout);
1823
1824                 xfs_buf_delwri_split(target, &tmp, age);
1825                 list_sort(NULL, &tmp, xfs_buf_cmp);
1826                 while (!list_empty(&tmp)) {
1827                         struct xfs_buf *bp;
1828                         bp = list_first_entry(&tmp, struct xfs_buf, b_list);
1829                         list_del_init(&bp->b_list);
1830                         xfs_bdstrat_cb(bp);
1831                         count++;
1832                 }
1833                 if (count)
1834                         blk_run_address_space(target->bt_mapping);
1835
1836         } while (!kthread_should_stop());
1837
1838         return 0;
1839 }
1840
1841 /*
1842  *      Go through all incore buffers, and release buffers if they belong to
1843  *      the given device. This is used in filesystem error handling to
1844  *      preserve the consistency of its metadata.
1845  */
1846 int
1847 xfs_flush_buftarg(
1848         xfs_buftarg_t   *target,
1849         int             wait)
1850 {
1851         xfs_buf_t       *bp;
1852         int             pincount = 0;
1853         LIST_HEAD(tmp_list);
1854         LIST_HEAD(wait_list);
1855
1856         xfs_buf_runall_queues(xfsconvertd_workqueue);
1857         xfs_buf_runall_queues(xfsdatad_workqueue);
1858         xfs_buf_runall_queues(xfslogd_workqueue);
1859
1860         set_bit(XBT_FORCE_FLUSH, &target->bt_flags);
1861         pincount = xfs_buf_delwri_split(target, &tmp_list, 0);
1862
1863         /*
1864          * Dropped the delayed write list lock, now walk the temporary list.
1865          * All I/O is issued async and then if we need to wait for completion
1866          * we do that after issuing all the IO.
1867          */
1868         list_sort(NULL, &tmp_list, xfs_buf_cmp);
1869         while (!list_empty(&tmp_list)) {
1870                 bp = list_first_entry(&tmp_list, struct xfs_buf, b_list);
1871                 ASSERT(target == bp->b_target);
1872                 list_del_init(&bp->b_list);
1873                 if (wait) {
1874                         bp->b_flags &= ~XBF_ASYNC;
1875                         list_add(&bp->b_list, &wait_list);
1876                 }
1877                 xfs_bdstrat_cb(bp);
1878         }
1879
1880         if (wait) {
1881                 /* Expedite and wait for IO to complete. */
1882                 blk_run_address_space(target->bt_mapping);
1883                 while (!list_empty(&wait_list)) {
1884                         bp = list_first_entry(&wait_list, struct xfs_buf, b_list);
1885
1886                         list_del_init(&bp->b_list);
1887                         xfs_buf_iowait(bp);
1888                         xfs_buf_relse(bp);
1889                 }
1890         }
1891
1892         return pincount;
1893 }
1894
1895 int __init
1896 xfs_buf_init(void)
1897 {
1898         xfs_buf_zone = kmem_zone_init_flags(sizeof(xfs_buf_t), "xfs_buf",
1899                                                 KM_ZONE_HWALIGN, NULL);
1900         if (!xfs_buf_zone)
1901                 goto out;
1902
1903         xfslogd_workqueue = alloc_workqueue("xfslogd",
1904                                         WQ_MEM_RECLAIM | WQ_HIGHPRI, 1);
1905         if (!xfslogd_workqueue)
1906                 goto out_free_buf_zone;
1907
1908         xfsdatad_workqueue = create_workqueue("xfsdatad");
1909         if (!xfsdatad_workqueue)
1910                 goto out_destroy_xfslogd_workqueue;
1911
1912         xfsconvertd_workqueue = create_workqueue("xfsconvertd");
1913         if (!xfsconvertd_workqueue)
1914                 goto out_destroy_xfsdatad_workqueue;
1915
1916         return 0;
1917
1918  out_destroy_xfsdatad_workqueue:
1919         destroy_workqueue(xfsdatad_workqueue);
1920  out_destroy_xfslogd_workqueue:
1921         destroy_workqueue(xfslogd_workqueue);
1922  out_free_buf_zone:
1923         kmem_zone_destroy(xfs_buf_zone);
1924  out:
1925         return -ENOMEM;
1926 }
1927
1928 void
1929 xfs_buf_terminate(void)
1930 {
1931         destroy_workqueue(xfsconvertd_workqueue);
1932         destroy_workqueue(xfsdatad_workqueue);
1933         destroy_workqueue(xfslogd_workqueue);
1934         kmem_zone_destroy(xfs_buf_zone);
1935 }
1936
1937 #ifdef CONFIG_KDB_MODULES
1938 struct list_head *
1939 xfs_get_buftarg_list(void)
1940 {
1941         return &xfs_buftarg_list;
1942 }
1943 #endif