]> Pileus Git - ~andy/linux/blob - fs/buffer.c
introduce __block_write_begin
[~andy/linux] / fs / buffer.c
1 /*
2  *  linux/fs/buffer.c
3  *
4  *  Copyright (C) 1991, 1992, 2002  Linus Torvalds
5  */
6
7 /*
8  * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
9  *
10  * Removed a lot of unnecessary code and simplified things now that
11  * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
12  *
13  * Speed up hash, lru, and free list operations.  Use gfp() for allocating
14  * hash table, use SLAB cache for buffer heads. SMP threading.  -DaveM
15  *
16  * Added 32k buffer block sizes - these are required older ARM systems. - RMK
17  *
18  * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
19  */
20
21 #include <linux/kernel.h>
22 #include <linux/syscalls.h>
23 #include <linux/fs.h>
24 #include <linux/mm.h>
25 #include <linux/percpu.h>
26 #include <linux/slab.h>
27 #include <linux/capability.h>
28 #include <linux/blkdev.h>
29 #include <linux/file.h>
30 #include <linux/quotaops.h>
31 #include <linux/highmem.h>
32 #include <linux/module.h>
33 #include <linux/writeback.h>
34 #include <linux/hash.h>
35 #include <linux/suspend.h>
36 #include <linux/buffer_head.h>
37 #include <linux/task_io_accounting_ops.h>
38 #include <linux/bio.h>
39 #include <linux/notifier.h>
40 #include <linux/cpu.h>
41 #include <linux/bitops.h>
42 #include <linux/mpage.h>
43 #include <linux/bit_spinlock.h>
44
45 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
46
47 #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
48
49 inline void
50 init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
51 {
52         bh->b_end_io = handler;
53         bh->b_private = private;
54 }
55 EXPORT_SYMBOL(init_buffer);
56
57 static int sync_buffer(void *word)
58 {
59         struct block_device *bd;
60         struct buffer_head *bh
61                 = container_of(word, struct buffer_head, b_state);
62
63         smp_mb();
64         bd = bh->b_bdev;
65         if (bd)
66                 blk_run_address_space(bd->bd_inode->i_mapping);
67         io_schedule();
68         return 0;
69 }
70
71 void __lock_buffer(struct buffer_head *bh)
72 {
73         wait_on_bit_lock(&bh->b_state, BH_Lock, sync_buffer,
74                                                         TASK_UNINTERRUPTIBLE);
75 }
76 EXPORT_SYMBOL(__lock_buffer);
77
78 void unlock_buffer(struct buffer_head *bh)
79 {
80         clear_bit_unlock(BH_Lock, &bh->b_state);
81         smp_mb__after_clear_bit();
82         wake_up_bit(&bh->b_state, BH_Lock);
83 }
84 EXPORT_SYMBOL(unlock_buffer);
85
86 /*
87  * Block until a buffer comes unlocked.  This doesn't stop it
88  * from becoming locked again - you have to lock it yourself
89  * if you want to preserve its state.
90  */
91 void __wait_on_buffer(struct buffer_head * bh)
92 {
93         wait_on_bit(&bh->b_state, BH_Lock, sync_buffer, TASK_UNINTERRUPTIBLE);
94 }
95 EXPORT_SYMBOL(__wait_on_buffer);
96
97 static void
98 __clear_page_buffers(struct page *page)
99 {
100         ClearPagePrivate(page);
101         set_page_private(page, 0);
102         page_cache_release(page);
103 }
104
105
106 static int quiet_error(struct buffer_head *bh)
107 {
108         if (!test_bit(BH_Quiet, &bh->b_state) && printk_ratelimit())
109                 return 0;
110         return 1;
111 }
112
113
114 static void buffer_io_error(struct buffer_head *bh)
115 {
116         char b[BDEVNAME_SIZE];
117         printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n",
118                         bdevname(bh->b_bdev, b),
119                         (unsigned long long)bh->b_blocknr);
120 }
121
122 /*
123  * End-of-IO handler helper function which does not touch the bh after
124  * unlocking it.
125  * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
126  * a race there is benign: unlock_buffer() only use the bh's address for
127  * hashing after unlocking the buffer, so it doesn't actually touch the bh
128  * itself.
129  */
130 static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate)
131 {
132         if (uptodate) {
133                 set_buffer_uptodate(bh);
134         } else {
135                 /* This happens, due to failed READA attempts. */
136                 clear_buffer_uptodate(bh);
137         }
138         unlock_buffer(bh);
139 }
140
141 /*
142  * Default synchronous end-of-IO handler..  Just mark it up-to-date and
143  * unlock the buffer. This is what ll_rw_block uses too.
144  */
145 void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
146 {
147         __end_buffer_read_notouch(bh, uptodate);
148         put_bh(bh);
149 }
150 EXPORT_SYMBOL(end_buffer_read_sync);
151
152 void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
153 {
154         char b[BDEVNAME_SIZE];
155
156         if (uptodate) {
157                 set_buffer_uptodate(bh);
158         } else {
159                 if (!buffer_eopnotsupp(bh) && !quiet_error(bh)) {
160                         buffer_io_error(bh);
161                         printk(KERN_WARNING "lost page write due to "
162                                         "I/O error on %s\n",
163                                        bdevname(bh->b_bdev, b));
164                 }
165                 set_buffer_write_io_error(bh);
166                 clear_buffer_uptodate(bh);
167         }
168         unlock_buffer(bh);
169         put_bh(bh);
170 }
171 EXPORT_SYMBOL(end_buffer_write_sync);
172
173 /*
174  * Various filesystems appear to want __find_get_block to be non-blocking.
175  * But it's the page lock which protects the buffers.  To get around this,
176  * we get exclusion from try_to_free_buffers with the blockdev mapping's
177  * private_lock.
178  *
179  * Hack idea: for the blockdev mapping, i_bufferlist_lock contention
180  * may be quite high.  This code could TryLock the page, and if that
181  * succeeds, there is no need to take private_lock. (But if
182  * private_lock is contended then so is mapping->tree_lock).
183  */
184 static struct buffer_head *
185 __find_get_block_slow(struct block_device *bdev, sector_t block)
186 {
187         struct inode *bd_inode = bdev->bd_inode;
188         struct address_space *bd_mapping = bd_inode->i_mapping;
189         struct buffer_head *ret = NULL;
190         pgoff_t index;
191         struct buffer_head *bh;
192         struct buffer_head *head;
193         struct page *page;
194         int all_mapped = 1;
195
196         index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits);
197         page = find_get_page(bd_mapping, index);
198         if (!page)
199                 goto out;
200
201         spin_lock(&bd_mapping->private_lock);
202         if (!page_has_buffers(page))
203                 goto out_unlock;
204         head = page_buffers(page);
205         bh = head;
206         do {
207                 if (!buffer_mapped(bh))
208                         all_mapped = 0;
209                 else if (bh->b_blocknr == block) {
210                         ret = bh;
211                         get_bh(bh);
212                         goto out_unlock;
213                 }
214                 bh = bh->b_this_page;
215         } while (bh != head);
216
217         /* we might be here because some of the buffers on this page are
218          * not mapped.  This is due to various races between
219          * file io on the block device and getblk.  It gets dealt with
220          * elsewhere, don't buffer_error if we had some unmapped buffers
221          */
222         if (all_mapped) {
223                 printk("__find_get_block_slow() failed. "
224                         "block=%llu, b_blocknr=%llu\n",
225                         (unsigned long long)block,
226                         (unsigned long long)bh->b_blocknr);
227                 printk("b_state=0x%08lx, b_size=%zu\n",
228                         bh->b_state, bh->b_size);
229                 printk("device blocksize: %d\n", 1 << bd_inode->i_blkbits);
230         }
231 out_unlock:
232         spin_unlock(&bd_mapping->private_lock);
233         page_cache_release(page);
234 out:
235         return ret;
236 }
237
238 /* If invalidate_buffers() will trash dirty buffers, it means some kind
239    of fs corruption is going on. Trashing dirty data always imply losing
240    information that was supposed to be just stored on the physical layer
241    by the user.
242
243    Thus invalidate_buffers in general usage is not allwowed to trash
244    dirty buffers. For example ioctl(FLSBLKBUF) expects dirty data to
245    be preserved.  These buffers are simply skipped.
246   
247    We also skip buffers which are still in use.  For example this can
248    happen if a userspace program is reading the block device.
249
250    NOTE: In the case where the user removed a removable-media-disk even if
251    there's still dirty data not synced on disk (due a bug in the device driver
252    or due an error of the user), by not destroying the dirty buffers we could
253    generate corruption also on the next media inserted, thus a parameter is
254    necessary to handle this case in the most safe way possible (trying
255    to not corrupt also the new disk inserted with the data belonging to
256    the old now corrupted disk). Also for the ramdisk the natural thing
257    to do in order to release the ramdisk memory is to destroy dirty buffers.
258
259    These are two special cases. Normal usage imply the device driver
260    to issue a sync on the device (without waiting I/O completion) and
261    then an invalidate_buffers call that doesn't trash dirty buffers.
262
263    For handling cache coherency with the blkdev pagecache the 'update' case
264    is been introduced. It is needed to re-read from disk any pinned
265    buffer. NOTE: re-reading from disk is destructive so we can do it only
266    when we assume nobody is changing the buffercache under our I/O and when
267    we think the disk contains more recent information than the buffercache.
268    The update == 1 pass marks the buffers we need to update, the update == 2
269    pass does the actual I/O. */
270 void invalidate_bdev(struct block_device *bdev)
271 {
272         struct address_space *mapping = bdev->bd_inode->i_mapping;
273
274         if (mapping->nrpages == 0)
275                 return;
276
277         invalidate_bh_lrus();
278         lru_add_drain_all();    /* make sure all lru add caches are flushed */
279         invalidate_mapping_pages(mapping, 0, -1);
280 }
281 EXPORT_SYMBOL(invalidate_bdev);
282
283 /*
284  * Kick the writeback threads then try to free up some ZONE_NORMAL memory.
285  */
286 static void free_more_memory(void)
287 {
288         struct zone *zone;
289         int nid;
290
291         wakeup_flusher_threads(1024);
292         yield();
293
294         for_each_online_node(nid) {
295                 (void)first_zones_zonelist(node_zonelist(nid, GFP_NOFS),
296                                                 gfp_zone(GFP_NOFS), NULL,
297                                                 &zone);
298                 if (zone)
299                         try_to_free_pages(node_zonelist(nid, GFP_NOFS), 0,
300                                                 GFP_NOFS, NULL);
301         }
302 }
303
304 /*
305  * I/O completion handler for block_read_full_page() - pages
306  * which come unlocked at the end of I/O.
307  */
308 static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
309 {
310         unsigned long flags;
311         struct buffer_head *first;
312         struct buffer_head *tmp;
313         struct page *page;
314         int page_uptodate = 1;
315
316         BUG_ON(!buffer_async_read(bh));
317
318         page = bh->b_page;
319         if (uptodate) {
320                 set_buffer_uptodate(bh);
321         } else {
322                 clear_buffer_uptodate(bh);
323                 if (!quiet_error(bh))
324                         buffer_io_error(bh);
325                 SetPageError(page);
326         }
327
328         /*
329          * Be _very_ careful from here on. Bad things can happen if
330          * two buffer heads end IO at almost the same time and both
331          * decide that the page is now completely done.
332          */
333         first = page_buffers(page);
334         local_irq_save(flags);
335         bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
336         clear_buffer_async_read(bh);
337         unlock_buffer(bh);
338         tmp = bh;
339         do {
340                 if (!buffer_uptodate(tmp))
341                         page_uptodate = 0;
342                 if (buffer_async_read(tmp)) {
343                         BUG_ON(!buffer_locked(tmp));
344                         goto still_busy;
345                 }
346                 tmp = tmp->b_this_page;
347         } while (tmp != bh);
348         bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
349         local_irq_restore(flags);
350
351         /*
352          * If none of the buffers had errors and they are all
353          * uptodate then we can set the page uptodate.
354          */
355         if (page_uptodate && !PageError(page))
356                 SetPageUptodate(page);
357         unlock_page(page);
358         return;
359
360 still_busy:
361         bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
362         local_irq_restore(flags);
363         return;
364 }
365
366 /*
367  * Completion handler for block_write_full_page() - pages which are unlocked
368  * during I/O, and which have PageWriteback cleared upon I/O completion.
369  */
370 void end_buffer_async_write(struct buffer_head *bh, int uptodate)
371 {
372         char b[BDEVNAME_SIZE];
373         unsigned long flags;
374         struct buffer_head *first;
375         struct buffer_head *tmp;
376         struct page *page;
377
378         BUG_ON(!buffer_async_write(bh));
379
380         page = bh->b_page;
381         if (uptodate) {
382                 set_buffer_uptodate(bh);
383         } else {
384                 if (!quiet_error(bh)) {
385                         buffer_io_error(bh);
386                         printk(KERN_WARNING "lost page write due to "
387                                         "I/O error on %s\n",
388                                bdevname(bh->b_bdev, b));
389                 }
390                 set_bit(AS_EIO, &page->mapping->flags);
391                 set_buffer_write_io_error(bh);
392                 clear_buffer_uptodate(bh);
393                 SetPageError(page);
394         }
395
396         first = page_buffers(page);
397         local_irq_save(flags);
398         bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
399
400         clear_buffer_async_write(bh);
401         unlock_buffer(bh);
402         tmp = bh->b_this_page;
403         while (tmp != bh) {
404                 if (buffer_async_write(tmp)) {
405                         BUG_ON(!buffer_locked(tmp));
406                         goto still_busy;
407                 }
408                 tmp = tmp->b_this_page;
409         }
410         bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
411         local_irq_restore(flags);
412         end_page_writeback(page);
413         return;
414
415 still_busy:
416         bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
417         local_irq_restore(flags);
418         return;
419 }
420 EXPORT_SYMBOL(end_buffer_async_write);
421
422 /*
423  * If a page's buffers are under async readin (end_buffer_async_read
424  * completion) then there is a possibility that another thread of
425  * control could lock one of the buffers after it has completed
426  * but while some of the other buffers have not completed.  This
427  * locked buffer would confuse end_buffer_async_read() into not unlocking
428  * the page.  So the absence of BH_Async_Read tells end_buffer_async_read()
429  * that this buffer is not under async I/O.
430  *
431  * The page comes unlocked when it has no locked buffer_async buffers
432  * left.
433  *
434  * PageLocked prevents anyone starting new async I/O reads any of
435  * the buffers.
436  *
437  * PageWriteback is used to prevent simultaneous writeout of the same
438  * page.
439  *
440  * PageLocked prevents anyone from starting writeback of a page which is
441  * under read I/O (PageWriteback is only ever set against a locked page).
442  */
443 static void mark_buffer_async_read(struct buffer_head *bh)
444 {
445         bh->b_end_io = end_buffer_async_read;
446         set_buffer_async_read(bh);
447 }
448
449 static void mark_buffer_async_write_endio(struct buffer_head *bh,
450                                           bh_end_io_t *handler)
451 {
452         bh->b_end_io = handler;
453         set_buffer_async_write(bh);
454 }
455
456 void mark_buffer_async_write(struct buffer_head *bh)
457 {
458         mark_buffer_async_write_endio(bh, end_buffer_async_write);
459 }
460 EXPORT_SYMBOL(mark_buffer_async_write);
461
462
463 /*
464  * fs/buffer.c contains helper functions for buffer-backed address space's
465  * fsync functions.  A common requirement for buffer-based filesystems is
466  * that certain data from the backing blockdev needs to be written out for
467  * a successful fsync().  For example, ext2 indirect blocks need to be
468  * written back and waited upon before fsync() returns.
469  *
470  * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
471  * inode_has_buffers() and invalidate_inode_buffers() are provided for the
472  * management of a list of dependent buffers at ->i_mapping->private_list.
473  *
474  * Locking is a little subtle: try_to_free_buffers() will remove buffers
475  * from their controlling inode's queue when they are being freed.  But
476  * try_to_free_buffers() will be operating against the *blockdev* mapping
477  * at the time, not against the S_ISREG file which depends on those buffers.
478  * So the locking for private_list is via the private_lock in the address_space
479  * which backs the buffers.  Which is different from the address_space 
480  * against which the buffers are listed.  So for a particular address_space,
481  * mapping->private_lock does *not* protect mapping->private_list!  In fact,
482  * mapping->private_list will always be protected by the backing blockdev's
483  * ->private_lock.
484  *
485  * Which introduces a requirement: all buffers on an address_space's
486  * ->private_list must be from the same address_space: the blockdev's.
487  *
488  * address_spaces which do not place buffers at ->private_list via these
489  * utility functions are free to use private_lock and private_list for
490  * whatever they want.  The only requirement is that list_empty(private_list)
491  * be true at clear_inode() time.
492  *
493  * FIXME: clear_inode should not call invalidate_inode_buffers().  The
494  * filesystems should do that.  invalidate_inode_buffers() should just go
495  * BUG_ON(!list_empty).
496  *
497  * FIXME: mark_buffer_dirty_inode() is a data-plane operation.  It should
498  * take an address_space, not an inode.  And it should be called
499  * mark_buffer_dirty_fsync() to clearly define why those buffers are being
500  * queued up.
501  *
502  * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
503  * list if it is already on a list.  Because if the buffer is on a list,
504  * it *must* already be on the right one.  If not, the filesystem is being
505  * silly.  This will save a ton of locking.  But first we have to ensure
506  * that buffers are taken *off* the old inode's list when they are freed
507  * (presumably in truncate).  That requires careful auditing of all
508  * filesystems (do it inside bforget()).  It could also be done by bringing
509  * b_inode back.
510  */
511
512 /*
513  * The buffer's backing address_space's private_lock must be held
514  */
515 static void __remove_assoc_queue(struct buffer_head *bh)
516 {
517         list_del_init(&bh->b_assoc_buffers);
518         WARN_ON(!bh->b_assoc_map);
519         if (buffer_write_io_error(bh))
520                 set_bit(AS_EIO, &bh->b_assoc_map->flags);
521         bh->b_assoc_map = NULL;
522 }
523
524 int inode_has_buffers(struct inode *inode)
525 {
526         return !list_empty(&inode->i_data.private_list);
527 }
528
529 /*
530  * osync is designed to support O_SYNC io.  It waits synchronously for
531  * all already-submitted IO to complete, but does not queue any new
532  * writes to the disk.
533  *
534  * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as
535  * you dirty the buffers, and then use osync_inode_buffers to wait for
536  * completion.  Any other dirty buffers which are not yet queued for
537  * write will not be flushed to disk by the osync.
538  */
539 static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
540 {
541         struct buffer_head *bh;
542         struct list_head *p;
543         int err = 0;
544
545         spin_lock(lock);
546 repeat:
547         list_for_each_prev(p, list) {
548                 bh = BH_ENTRY(p);
549                 if (buffer_locked(bh)) {
550                         get_bh(bh);
551                         spin_unlock(lock);
552                         wait_on_buffer(bh);
553                         if (!buffer_uptodate(bh))
554                                 err = -EIO;
555                         brelse(bh);
556                         spin_lock(lock);
557                         goto repeat;
558                 }
559         }
560         spin_unlock(lock);
561         return err;
562 }
563
564 static void do_thaw_one(struct super_block *sb, void *unused)
565 {
566         char b[BDEVNAME_SIZE];
567         while (sb->s_bdev && !thaw_bdev(sb->s_bdev, sb))
568                 printk(KERN_WARNING "Emergency Thaw on %s\n",
569                        bdevname(sb->s_bdev, b));
570 }
571
572 static void do_thaw_all(struct work_struct *work)
573 {
574         iterate_supers(do_thaw_one, NULL);
575         kfree(work);
576         printk(KERN_WARNING "Emergency Thaw complete\n");
577 }
578
579 /**
580  * emergency_thaw_all -- forcibly thaw every frozen filesystem
581  *
582  * Used for emergency unfreeze of all filesystems via SysRq
583  */
584 void emergency_thaw_all(void)
585 {
586         struct work_struct *work;
587
588         work = kmalloc(sizeof(*work), GFP_ATOMIC);
589         if (work) {
590                 INIT_WORK(work, do_thaw_all);
591                 schedule_work(work);
592         }
593 }
594
595 /**
596  * sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers
597  * @mapping: the mapping which wants those buffers written
598  *
599  * Starts I/O against the buffers at mapping->private_list, and waits upon
600  * that I/O.
601  *
602  * Basically, this is a convenience function for fsync().
603  * @mapping is a file or directory which needs those buffers to be written for
604  * a successful fsync().
605  */
606 int sync_mapping_buffers(struct address_space *mapping)
607 {
608         struct address_space *buffer_mapping = mapping->assoc_mapping;
609
610         if (buffer_mapping == NULL || list_empty(&mapping->private_list))
611                 return 0;
612
613         return fsync_buffers_list(&buffer_mapping->private_lock,
614                                         &mapping->private_list);
615 }
616 EXPORT_SYMBOL(sync_mapping_buffers);
617
618 /*
619  * Called when we've recently written block `bblock', and it is known that
620  * `bblock' was for a buffer_boundary() buffer.  This means that the block at
621  * `bblock + 1' is probably a dirty indirect block.  Hunt it down and, if it's
622  * dirty, schedule it for IO.  So that indirects merge nicely with their data.
623  */
624 void write_boundary_block(struct block_device *bdev,
625                         sector_t bblock, unsigned blocksize)
626 {
627         struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
628         if (bh) {
629                 if (buffer_dirty(bh))
630                         ll_rw_block(WRITE, 1, &bh);
631                 put_bh(bh);
632         }
633 }
634
635 void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
636 {
637         struct address_space *mapping = inode->i_mapping;
638         struct address_space *buffer_mapping = bh->b_page->mapping;
639
640         mark_buffer_dirty(bh);
641         if (!mapping->assoc_mapping) {
642                 mapping->assoc_mapping = buffer_mapping;
643         } else {
644                 BUG_ON(mapping->assoc_mapping != buffer_mapping);
645         }
646         if (!bh->b_assoc_map) {
647                 spin_lock(&buffer_mapping->private_lock);
648                 list_move_tail(&bh->b_assoc_buffers,
649                                 &mapping->private_list);
650                 bh->b_assoc_map = mapping;
651                 spin_unlock(&buffer_mapping->private_lock);
652         }
653 }
654 EXPORT_SYMBOL(mark_buffer_dirty_inode);
655
656 /*
657  * Mark the page dirty, and set it dirty in the radix tree, and mark the inode
658  * dirty.
659  *
660  * If warn is true, then emit a warning if the page is not uptodate and has
661  * not been truncated.
662  */
663 static void __set_page_dirty(struct page *page,
664                 struct address_space *mapping, int warn)
665 {
666         spin_lock_irq(&mapping->tree_lock);
667         if (page->mapping) {    /* Race with truncate? */
668                 WARN_ON_ONCE(warn && !PageUptodate(page));
669                 account_page_dirtied(page, mapping);
670                 radix_tree_tag_set(&mapping->page_tree,
671                                 page_index(page), PAGECACHE_TAG_DIRTY);
672         }
673         spin_unlock_irq(&mapping->tree_lock);
674         __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
675 }
676
677 /*
678  * Add a page to the dirty page list.
679  *
680  * It is a sad fact of life that this function is called from several places
681  * deeply under spinlocking.  It may not sleep.
682  *
683  * If the page has buffers, the uptodate buffers are set dirty, to preserve
684  * dirty-state coherency between the page and the buffers.  It the page does
685  * not have buffers then when they are later attached they will all be set
686  * dirty.
687  *
688  * The buffers are dirtied before the page is dirtied.  There's a small race
689  * window in which a writepage caller may see the page cleanness but not the
690  * buffer dirtiness.  That's fine.  If this code were to set the page dirty
691  * before the buffers, a concurrent writepage caller could clear the page dirty
692  * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
693  * page on the dirty page list.
694  *
695  * We use private_lock to lock against try_to_free_buffers while using the
696  * page's buffer list.  Also use this to protect against clean buffers being
697  * added to the page after it was set dirty.
698  *
699  * FIXME: may need to call ->reservepage here as well.  That's rather up to the
700  * address_space though.
701  */
702 int __set_page_dirty_buffers(struct page *page)
703 {
704         int newly_dirty;
705         struct address_space *mapping = page_mapping(page);
706
707         if (unlikely(!mapping))
708                 return !TestSetPageDirty(page);
709
710         spin_lock(&mapping->private_lock);
711         if (page_has_buffers(page)) {
712                 struct buffer_head *head = page_buffers(page);
713                 struct buffer_head *bh = head;
714
715                 do {
716                         set_buffer_dirty(bh);
717                         bh = bh->b_this_page;
718                 } while (bh != head);
719         }
720         newly_dirty = !TestSetPageDirty(page);
721         spin_unlock(&mapping->private_lock);
722
723         if (newly_dirty)
724                 __set_page_dirty(page, mapping, 1);
725         return newly_dirty;
726 }
727 EXPORT_SYMBOL(__set_page_dirty_buffers);
728
729 /*
730  * Write out and wait upon a list of buffers.
731  *
732  * We have conflicting pressures: we want to make sure that all
733  * initially dirty buffers get waited on, but that any subsequently
734  * dirtied buffers don't.  After all, we don't want fsync to last
735  * forever if somebody is actively writing to the file.
736  *
737  * Do this in two main stages: first we copy dirty buffers to a
738  * temporary inode list, queueing the writes as we go.  Then we clean
739  * up, waiting for those writes to complete.
740  * 
741  * During this second stage, any subsequent updates to the file may end
742  * up refiling the buffer on the original inode's dirty list again, so
743  * there is a chance we will end up with a buffer queued for write but
744  * not yet completed on that list.  So, as a final cleanup we go through
745  * the osync code to catch these locked, dirty buffers without requeuing
746  * any newly dirty buffers for write.
747  */
748 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
749 {
750         struct buffer_head *bh;
751         struct list_head tmp;
752         struct address_space *mapping, *prev_mapping = NULL;
753         int err = 0, err2;
754
755         INIT_LIST_HEAD(&tmp);
756
757         spin_lock(lock);
758         while (!list_empty(list)) {
759                 bh = BH_ENTRY(list->next);
760                 mapping = bh->b_assoc_map;
761                 __remove_assoc_queue(bh);
762                 /* Avoid race with mark_buffer_dirty_inode() which does
763                  * a lockless check and we rely on seeing the dirty bit */
764                 smp_mb();
765                 if (buffer_dirty(bh) || buffer_locked(bh)) {
766                         list_add(&bh->b_assoc_buffers, &tmp);
767                         bh->b_assoc_map = mapping;
768                         if (buffer_dirty(bh)) {
769                                 get_bh(bh);
770                                 spin_unlock(lock);
771                                 /*
772                                  * Ensure any pending I/O completes so that
773                                  * ll_rw_block() actually writes the current
774                                  * contents - it is a noop if I/O is still in
775                                  * flight on potentially older contents.
776                                  */
777                                 ll_rw_block(SWRITE_SYNC_PLUG, 1, &bh);
778
779                                 /*
780                                  * Kick off IO for the previous mapping. Note
781                                  * that we will not run the very last mapping,
782                                  * wait_on_buffer() will do that for us
783                                  * through sync_buffer().
784                                  */
785                                 if (prev_mapping && prev_mapping != mapping)
786                                         blk_run_address_space(prev_mapping);
787                                 prev_mapping = mapping;
788
789                                 brelse(bh);
790                                 spin_lock(lock);
791                         }
792                 }
793         }
794
795         while (!list_empty(&tmp)) {
796                 bh = BH_ENTRY(tmp.prev);
797                 get_bh(bh);
798                 mapping = bh->b_assoc_map;
799                 __remove_assoc_queue(bh);
800                 /* Avoid race with mark_buffer_dirty_inode() which does
801                  * a lockless check and we rely on seeing the dirty bit */
802                 smp_mb();
803                 if (buffer_dirty(bh)) {
804                         list_add(&bh->b_assoc_buffers,
805                                  &mapping->private_list);
806                         bh->b_assoc_map = mapping;
807                 }
808                 spin_unlock(lock);
809                 wait_on_buffer(bh);
810                 if (!buffer_uptodate(bh))
811                         err = -EIO;
812                 brelse(bh);
813                 spin_lock(lock);
814         }
815         
816         spin_unlock(lock);
817         err2 = osync_buffers_list(lock, list);
818         if (err)
819                 return err;
820         else
821                 return err2;
822 }
823
824 /*
825  * Invalidate any and all dirty buffers on a given inode.  We are
826  * probably unmounting the fs, but that doesn't mean we have already
827  * done a sync().  Just drop the buffers from the inode list.
828  *
829  * NOTE: we take the inode's blockdev's mapping's private_lock.  Which
830  * assumes that all the buffers are against the blockdev.  Not true
831  * for reiserfs.
832  */
833 void invalidate_inode_buffers(struct inode *inode)
834 {
835         if (inode_has_buffers(inode)) {
836                 struct address_space *mapping = &inode->i_data;
837                 struct list_head *list = &mapping->private_list;
838                 struct address_space *buffer_mapping = mapping->assoc_mapping;
839
840                 spin_lock(&buffer_mapping->private_lock);
841                 while (!list_empty(list))
842                         __remove_assoc_queue(BH_ENTRY(list->next));
843                 spin_unlock(&buffer_mapping->private_lock);
844         }
845 }
846 EXPORT_SYMBOL(invalidate_inode_buffers);
847
848 /*
849  * Remove any clean buffers from the inode's buffer list.  This is called
850  * when we're trying to free the inode itself.  Those buffers can pin it.
851  *
852  * Returns true if all buffers were removed.
853  */
854 int remove_inode_buffers(struct inode *inode)
855 {
856         int ret = 1;
857
858         if (inode_has_buffers(inode)) {
859                 struct address_space *mapping = &inode->i_data;
860                 struct list_head *list = &mapping->private_list;
861                 struct address_space *buffer_mapping = mapping->assoc_mapping;
862
863                 spin_lock(&buffer_mapping->private_lock);
864                 while (!list_empty(list)) {
865                         struct buffer_head *bh = BH_ENTRY(list->next);
866                         if (buffer_dirty(bh)) {
867                                 ret = 0;
868                                 break;
869                         }
870                         __remove_assoc_queue(bh);
871                 }
872                 spin_unlock(&buffer_mapping->private_lock);
873         }
874         return ret;
875 }
876
877 /*
878  * Create the appropriate buffers when given a page for data area and
879  * the size of each buffer.. Use the bh->b_this_page linked list to
880  * follow the buffers created.  Return NULL if unable to create more
881  * buffers.
882  *
883  * The retry flag is used to differentiate async IO (paging, swapping)
884  * which may not fail from ordinary buffer allocations.
885  */
886 struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
887                 int retry)
888 {
889         struct buffer_head *bh, *head;
890         long offset;
891
892 try_again:
893         head = NULL;
894         offset = PAGE_SIZE;
895         while ((offset -= size) >= 0) {
896                 bh = alloc_buffer_head(GFP_NOFS);
897                 if (!bh)
898                         goto no_grow;
899
900                 bh->b_bdev = NULL;
901                 bh->b_this_page = head;
902                 bh->b_blocknr = -1;
903                 head = bh;
904
905                 bh->b_state = 0;
906                 atomic_set(&bh->b_count, 0);
907                 bh->b_private = NULL;
908                 bh->b_size = size;
909
910                 /* Link the buffer to its page */
911                 set_bh_page(bh, page, offset);
912
913                 init_buffer(bh, NULL, NULL);
914         }
915         return head;
916 /*
917  * In case anything failed, we just free everything we got.
918  */
919 no_grow:
920         if (head) {
921                 do {
922                         bh = head;
923                         head = head->b_this_page;
924                         free_buffer_head(bh);
925                 } while (head);
926         }
927
928         /*
929          * Return failure for non-async IO requests.  Async IO requests
930          * are not allowed to fail, so we have to wait until buffer heads
931          * become available.  But we don't want tasks sleeping with 
932          * partially complete buffers, so all were released above.
933          */
934         if (!retry)
935                 return NULL;
936
937         /* We're _really_ low on memory. Now we just
938          * wait for old buffer heads to become free due to
939          * finishing IO.  Since this is an async request and
940          * the reserve list is empty, we're sure there are 
941          * async buffer heads in use.
942          */
943         free_more_memory();
944         goto try_again;
945 }
946 EXPORT_SYMBOL_GPL(alloc_page_buffers);
947
948 static inline void
949 link_dev_buffers(struct page *page, struct buffer_head *head)
950 {
951         struct buffer_head *bh, *tail;
952
953         bh = head;
954         do {
955                 tail = bh;
956                 bh = bh->b_this_page;
957         } while (bh);
958         tail->b_this_page = head;
959         attach_page_buffers(page, head);
960 }
961
962 /*
963  * Initialise the state of a blockdev page's buffers.
964  */ 
965 static void
966 init_page_buffers(struct page *page, struct block_device *bdev,
967                         sector_t block, int size)
968 {
969         struct buffer_head *head = page_buffers(page);
970         struct buffer_head *bh = head;
971         int uptodate = PageUptodate(page);
972
973         do {
974                 if (!buffer_mapped(bh)) {
975                         init_buffer(bh, NULL, NULL);
976                         bh->b_bdev = bdev;
977                         bh->b_blocknr = block;
978                         if (uptodate)
979                                 set_buffer_uptodate(bh);
980                         set_buffer_mapped(bh);
981                 }
982                 block++;
983                 bh = bh->b_this_page;
984         } while (bh != head);
985 }
986
987 /*
988  * Create the page-cache page that contains the requested block.
989  *
990  * This is user purely for blockdev mappings.
991  */
992 static struct page *
993 grow_dev_page(struct block_device *bdev, sector_t block,
994                 pgoff_t index, int size)
995 {
996         struct inode *inode = bdev->bd_inode;
997         struct page *page;
998         struct buffer_head *bh;
999
1000         page = find_or_create_page(inode->i_mapping, index,
1001                 (mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS)|__GFP_MOVABLE);
1002         if (!page)
1003                 return NULL;
1004
1005         BUG_ON(!PageLocked(page));
1006
1007         if (page_has_buffers(page)) {
1008                 bh = page_buffers(page);
1009                 if (bh->b_size == size) {
1010                         init_page_buffers(page, bdev, block, size);
1011                         return page;
1012                 }
1013                 if (!try_to_free_buffers(page))
1014                         goto failed;
1015         }
1016
1017         /*
1018          * Allocate some buffers for this page
1019          */
1020         bh = alloc_page_buffers(page, size, 0);
1021         if (!bh)
1022                 goto failed;
1023
1024         /*
1025          * Link the page to the buffers and initialise them.  Take the
1026          * lock to be atomic wrt __find_get_block(), which does not
1027          * run under the page lock.
1028          */
1029         spin_lock(&inode->i_mapping->private_lock);
1030         link_dev_buffers(page, bh);
1031         init_page_buffers(page, bdev, block, size);
1032         spin_unlock(&inode->i_mapping->private_lock);
1033         return page;
1034
1035 failed:
1036         BUG();
1037         unlock_page(page);
1038         page_cache_release(page);
1039         return NULL;
1040 }
1041
1042 /*
1043  * Create buffers for the specified block device block's page.  If
1044  * that page was dirty, the buffers are set dirty also.
1045  */
1046 static int
1047 grow_buffers(struct block_device *bdev, sector_t block, int size)
1048 {
1049         struct page *page;
1050         pgoff_t index;
1051         int sizebits;
1052
1053         sizebits = -1;
1054         do {
1055                 sizebits++;
1056         } while ((size << sizebits) < PAGE_SIZE);
1057
1058         index = block >> sizebits;
1059
1060         /*
1061          * Check for a block which wants to lie outside our maximum possible
1062          * pagecache index.  (this comparison is done using sector_t types).
1063          */
1064         if (unlikely(index != block >> sizebits)) {
1065                 char b[BDEVNAME_SIZE];
1066
1067                 printk(KERN_ERR "%s: requested out-of-range block %llu for "
1068                         "device %s\n",
1069                         __func__, (unsigned long long)block,
1070                         bdevname(bdev, b));
1071                 return -EIO;
1072         }
1073         block = index << sizebits;
1074         /* Create a page with the proper size buffers.. */
1075         page = grow_dev_page(bdev, block, index, size);
1076         if (!page)
1077                 return 0;
1078         unlock_page(page);
1079         page_cache_release(page);
1080         return 1;
1081 }
1082
1083 static struct buffer_head *
1084 __getblk_slow(struct block_device *bdev, sector_t block, int size)
1085 {
1086         /* Size must be multiple of hard sectorsize */
1087         if (unlikely(size & (bdev_logical_block_size(bdev)-1) ||
1088                         (size < 512 || size > PAGE_SIZE))) {
1089                 printk(KERN_ERR "getblk(): invalid block size %d requested\n",
1090                                         size);
1091                 printk(KERN_ERR "logical block size: %d\n",
1092                                         bdev_logical_block_size(bdev));
1093
1094                 dump_stack();
1095                 return NULL;
1096         }
1097
1098         for (;;) {
1099                 struct buffer_head * bh;
1100                 int ret;
1101
1102                 bh = __find_get_block(bdev, block, size);
1103                 if (bh)
1104                         return bh;
1105
1106                 ret = grow_buffers(bdev, block, size);
1107                 if (ret < 0)
1108                         return NULL;
1109                 if (ret == 0)
1110                         free_more_memory();
1111         }
1112 }
1113
1114 /*
1115  * The relationship between dirty buffers and dirty pages:
1116  *
1117  * Whenever a page has any dirty buffers, the page's dirty bit is set, and
1118  * the page is tagged dirty in its radix tree.
1119  *
1120  * At all times, the dirtiness of the buffers represents the dirtiness of
1121  * subsections of the page.  If the page has buffers, the page dirty bit is
1122  * merely a hint about the true dirty state.
1123  *
1124  * When a page is set dirty in its entirety, all its buffers are marked dirty
1125  * (if the page has buffers).
1126  *
1127  * When a buffer is marked dirty, its page is dirtied, but the page's other
1128  * buffers are not.
1129  *
1130  * Also.  When blockdev buffers are explicitly read with bread(), they
1131  * individually become uptodate.  But their backing page remains not
1132  * uptodate - even if all of its buffers are uptodate.  A subsequent
1133  * block_read_full_page() against that page will discover all the uptodate
1134  * buffers, will set the page uptodate and will perform no I/O.
1135  */
1136
1137 /**
1138  * mark_buffer_dirty - mark a buffer_head as needing writeout
1139  * @bh: the buffer_head to mark dirty
1140  *
1141  * mark_buffer_dirty() will set the dirty bit against the buffer, then set its
1142  * backing page dirty, then tag the page as dirty in its address_space's radix
1143  * tree and then attach the address_space's inode to its superblock's dirty
1144  * inode list.
1145  *
1146  * mark_buffer_dirty() is atomic.  It takes bh->b_page->mapping->private_lock,
1147  * mapping->tree_lock and the global inode_lock.
1148  */
1149 void mark_buffer_dirty(struct buffer_head *bh)
1150 {
1151         WARN_ON_ONCE(!buffer_uptodate(bh));
1152
1153         /*
1154          * Very *carefully* optimize the it-is-already-dirty case.
1155          *
1156          * Don't let the final "is it dirty" escape to before we
1157          * perhaps modified the buffer.
1158          */
1159         if (buffer_dirty(bh)) {
1160                 smp_mb();
1161                 if (buffer_dirty(bh))
1162                         return;
1163         }
1164
1165         if (!test_set_buffer_dirty(bh)) {
1166                 struct page *page = bh->b_page;
1167                 if (!TestSetPageDirty(page)) {
1168                         struct address_space *mapping = page_mapping(page);
1169                         if (mapping)
1170                                 __set_page_dirty(page, mapping, 0);
1171                 }
1172         }
1173 }
1174 EXPORT_SYMBOL(mark_buffer_dirty);
1175
1176 /*
1177  * Decrement a buffer_head's reference count.  If all buffers against a page
1178  * have zero reference count, are clean and unlocked, and if the page is clean
1179  * and unlocked then try_to_free_buffers() may strip the buffers from the page
1180  * in preparation for freeing it (sometimes, rarely, buffers are removed from
1181  * a page but it ends up not being freed, and buffers may later be reattached).
1182  */
1183 void __brelse(struct buffer_head * buf)
1184 {
1185         if (atomic_read(&buf->b_count)) {
1186                 put_bh(buf);
1187                 return;
1188         }
1189         WARN(1, KERN_ERR "VFS: brelse: Trying to free free buffer\n");
1190 }
1191 EXPORT_SYMBOL(__brelse);
1192
1193 /*
1194  * bforget() is like brelse(), except it discards any
1195  * potentially dirty data.
1196  */
1197 void __bforget(struct buffer_head *bh)
1198 {
1199         clear_buffer_dirty(bh);
1200         if (bh->b_assoc_map) {
1201                 struct address_space *buffer_mapping = bh->b_page->mapping;
1202
1203                 spin_lock(&buffer_mapping->private_lock);
1204                 list_del_init(&bh->b_assoc_buffers);
1205                 bh->b_assoc_map = NULL;
1206                 spin_unlock(&buffer_mapping->private_lock);
1207         }
1208         __brelse(bh);
1209 }
1210 EXPORT_SYMBOL(__bforget);
1211
1212 static struct buffer_head *__bread_slow(struct buffer_head *bh)
1213 {
1214         lock_buffer(bh);
1215         if (buffer_uptodate(bh)) {
1216                 unlock_buffer(bh);
1217                 return bh;
1218         } else {
1219                 get_bh(bh);
1220                 bh->b_end_io = end_buffer_read_sync;
1221                 submit_bh(READ, bh);
1222                 wait_on_buffer(bh);
1223                 if (buffer_uptodate(bh))
1224                         return bh;
1225         }
1226         brelse(bh);
1227         return NULL;
1228 }
1229
1230 /*
1231  * Per-cpu buffer LRU implementation.  To reduce the cost of __find_get_block().
1232  * The bhs[] array is sorted - newest buffer is at bhs[0].  Buffers have their
1233  * refcount elevated by one when they're in an LRU.  A buffer can only appear
1234  * once in a particular CPU's LRU.  A single buffer can be present in multiple
1235  * CPU's LRUs at the same time.
1236  *
1237  * This is a transparent caching front-end to sb_bread(), sb_getblk() and
1238  * sb_find_get_block().
1239  *
1240  * The LRUs themselves only need locking against invalidate_bh_lrus.  We use
1241  * a local interrupt disable for that.
1242  */
1243
1244 #define BH_LRU_SIZE     8
1245
1246 struct bh_lru {
1247         struct buffer_head *bhs[BH_LRU_SIZE];
1248 };
1249
1250 static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
1251
1252 #ifdef CONFIG_SMP
1253 #define bh_lru_lock()   local_irq_disable()
1254 #define bh_lru_unlock() local_irq_enable()
1255 #else
1256 #define bh_lru_lock()   preempt_disable()
1257 #define bh_lru_unlock() preempt_enable()
1258 #endif
1259
1260 static inline void check_irqs_on(void)
1261 {
1262 #ifdef irqs_disabled
1263         BUG_ON(irqs_disabled());
1264 #endif
1265 }
1266
1267 /*
1268  * The LRU management algorithm is dopey-but-simple.  Sorry.
1269  */
1270 static void bh_lru_install(struct buffer_head *bh)
1271 {
1272         struct buffer_head *evictee = NULL;
1273         struct bh_lru *lru;
1274
1275         check_irqs_on();
1276         bh_lru_lock();
1277         lru = &__get_cpu_var(bh_lrus);
1278         if (lru->bhs[0] != bh) {
1279                 struct buffer_head *bhs[BH_LRU_SIZE];
1280                 int in;
1281                 int out = 0;
1282
1283                 get_bh(bh);
1284                 bhs[out++] = bh;
1285                 for (in = 0; in < BH_LRU_SIZE; in++) {
1286                         struct buffer_head *bh2 = lru->bhs[in];
1287
1288                         if (bh2 == bh) {
1289                                 __brelse(bh2);
1290                         } else {
1291                                 if (out >= BH_LRU_SIZE) {
1292                                         BUG_ON(evictee != NULL);
1293                                         evictee = bh2;
1294                                 } else {
1295                                         bhs[out++] = bh2;
1296                                 }
1297                         }
1298                 }
1299                 while (out < BH_LRU_SIZE)
1300                         bhs[out++] = NULL;
1301                 memcpy(lru->bhs, bhs, sizeof(bhs));
1302         }
1303         bh_lru_unlock();
1304
1305         if (evictee)
1306                 __brelse(evictee);
1307 }
1308
1309 /*
1310  * Look up the bh in this cpu's LRU.  If it's there, move it to the head.
1311  */
1312 static struct buffer_head *
1313 lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
1314 {
1315         struct buffer_head *ret = NULL;
1316         struct bh_lru *lru;
1317         unsigned int i;
1318
1319         check_irqs_on();
1320         bh_lru_lock();
1321         lru = &__get_cpu_var(bh_lrus);
1322         for (i = 0; i < BH_LRU_SIZE; i++) {
1323                 struct buffer_head *bh = lru->bhs[i];
1324
1325                 if (bh && bh->b_bdev == bdev &&
1326                                 bh->b_blocknr == block && bh->b_size == size) {
1327                         if (i) {
1328                                 while (i) {
1329                                         lru->bhs[i] = lru->bhs[i - 1];
1330                                         i--;
1331                                 }
1332                                 lru->bhs[0] = bh;
1333                         }
1334                         get_bh(bh);
1335                         ret = bh;
1336                         break;
1337                 }
1338         }
1339         bh_lru_unlock();
1340         return ret;
1341 }
1342
1343 /*
1344  * Perform a pagecache lookup for the matching buffer.  If it's there, refresh
1345  * it in the LRU and mark it as accessed.  If it is not present then return
1346  * NULL
1347  */
1348 struct buffer_head *
1349 __find_get_block(struct block_device *bdev, sector_t block, unsigned size)
1350 {
1351         struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
1352
1353         if (bh == NULL) {
1354                 bh = __find_get_block_slow(bdev, block);
1355                 if (bh)
1356                         bh_lru_install(bh);
1357         }
1358         if (bh)
1359                 touch_buffer(bh);
1360         return bh;
1361 }
1362 EXPORT_SYMBOL(__find_get_block);
1363
1364 /*
1365  * __getblk will locate (and, if necessary, create) the buffer_head
1366  * which corresponds to the passed block_device, block and size. The
1367  * returned buffer has its reference count incremented.
1368  *
1369  * __getblk() cannot fail - it just keeps trying.  If you pass it an
1370  * illegal block number, __getblk() will happily return a buffer_head
1371  * which represents the non-existent block.  Very weird.
1372  *
1373  * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers()
1374  * attempt is failing.  FIXME, perhaps?
1375  */
1376 struct buffer_head *
1377 __getblk(struct block_device *bdev, sector_t block, unsigned size)
1378 {
1379         struct buffer_head *bh = __find_get_block(bdev, block, size);
1380
1381         might_sleep();
1382         if (bh == NULL)
1383                 bh = __getblk_slow(bdev, block, size);
1384         return bh;
1385 }
1386 EXPORT_SYMBOL(__getblk);
1387
1388 /*
1389  * Do async read-ahead on a buffer..
1390  */
1391 void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
1392 {
1393         struct buffer_head *bh = __getblk(bdev, block, size);
1394         if (likely(bh)) {
1395                 ll_rw_block(READA, 1, &bh);
1396                 brelse(bh);
1397         }
1398 }
1399 EXPORT_SYMBOL(__breadahead);
1400
1401 /**
1402  *  __bread() - reads a specified block and returns the bh
1403  *  @bdev: the block_device to read from
1404  *  @block: number of block
1405  *  @size: size (in bytes) to read
1406  * 
1407  *  Reads a specified block, and returns buffer head that contains it.
1408  *  It returns NULL if the block was unreadable.
1409  */
1410 struct buffer_head *
1411 __bread(struct block_device *bdev, sector_t block, unsigned size)
1412 {
1413         struct buffer_head *bh = __getblk(bdev, block, size);
1414
1415         if (likely(bh) && !buffer_uptodate(bh))
1416                 bh = __bread_slow(bh);
1417         return bh;
1418 }
1419 EXPORT_SYMBOL(__bread);
1420
1421 /*
1422  * invalidate_bh_lrus() is called rarely - but not only at unmount.
1423  * This doesn't race because it runs in each cpu either in irq
1424  * or with preempt disabled.
1425  */
1426 static void invalidate_bh_lru(void *arg)
1427 {
1428         struct bh_lru *b = &get_cpu_var(bh_lrus);
1429         int i;
1430
1431         for (i = 0; i < BH_LRU_SIZE; i++) {
1432                 brelse(b->bhs[i]);
1433                 b->bhs[i] = NULL;
1434         }
1435         put_cpu_var(bh_lrus);
1436 }
1437         
1438 void invalidate_bh_lrus(void)
1439 {
1440         on_each_cpu(invalidate_bh_lru, NULL, 1);
1441 }
1442 EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
1443
1444 void set_bh_page(struct buffer_head *bh,
1445                 struct page *page, unsigned long offset)
1446 {
1447         bh->b_page = page;
1448         BUG_ON(offset >= PAGE_SIZE);
1449         if (PageHighMem(page))
1450                 /*
1451                  * This catches illegal uses and preserves the offset:
1452                  */
1453                 bh->b_data = (char *)(0 + offset);
1454         else
1455                 bh->b_data = page_address(page) + offset;
1456 }
1457 EXPORT_SYMBOL(set_bh_page);
1458
1459 /*
1460  * Called when truncating a buffer on a page completely.
1461  */
1462 static void discard_buffer(struct buffer_head * bh)
1463 {
1464         lock_buffer(bh);
1465         clear_buffer_dirty(bh);
1466         bh->b_bdev = NULL;
1467         clear_buffer_mapped(bh);
1468         clear_buffer_req(bh);
1469         clear_buffer_new(bh);
1470         clear_buffer_delay(bh);
1471         clear_buffer_unwritten(bh);
1472         unlock_buffer(bh);
1473 }
1474
1475 /**
1476  * block_invalidatepage - invalidate part of all of a buffer-backed page
1477  *
1478  * @page: the page which is affected
1479  * @offset: the index of the truncation point
1480  *
1481  * block_invalidatepage() is called when all or part of the page has become
1482  * invalidatedby a truncate operation.
1483  *
1484  * block_invalidatepage() does not have to release all buffers, but it must
1485  * ensure that no dirty buffer is left outside @offset and that no I/O
1486  * is underway against any of the blocks which are outside the truncation
1487  * point.  Because the caller is about to free (and possibly reuse) those
1488  * blocks on-disk.
1489  */
1490 void block_invalidatepage(struct page *page, unsigned long offset)
1491 {
1492         struct buffer_head *head, *bh, *next;
1493         unsigned int curr_off = 0;
1494
1495         BUG_ON(!PageLocked(page));
1496         if (!page_has_buffers(page))
1497                 goto out;
1498
1499         head = page_buffers(page);
1500         bh = head;
1501         do {
1502                 unsigned int next_off = curr_off + bh->b_size;
1503                 next = bh->b_this_page;
1504
1505                 /*
1506                  * is this block fully invalidated?
1507                  */
1508                 if (offset <= curr_off)
1509                         discard_buffer(bh);
1510                 curr_off = next_off;
1511                 bh = next;
1512         } while (bh != head);
1513
1514         /*
1515          * We release buffers only if the entire page is being invalidated.
1516          * The get_block cached value has been unconditionally invalidated,
1517          * so real IO is not possible anymore.
1518          */
1519         if (offset == 0)
1520                 try_to_release_page(page, 0);
1521 out:
1522         return;
1523 }
1524 EXPORT_SYMBOL(block_invalidatepage);
1525
1526 /*
1527  * We attach and possibly dirty the buffers atomically wrt
1528  * __set_page_dirty_buffers() via private_lock.  try_to_free_buffers
1529  * is already excluded via the page lock.
1530  */
1531 void create_empty_buffers(struct page *page,
1532                         unsigned long blocksize, unsigned long b_state)
1533 {
1534         struct buffer_head *bh, *head, *tail;
1535
1536         head = alloc_page_buffers(page, blocksize, 1);
1537         bh = head;
1538         do {
1539                 bh->b_state |= b_state;
1540                 tail = bh;
1541                 bh = bh->b_this_page;
1542         } while (bh);
1543         tail->b_this_page = head;
1544
1545         spin_lock(&page->mapping->private_lock);
1546         if (PageUptodate(page) || PageDirty(page)) {
1547                 bh = head;
1548                 do {
1549                         if (PageDirty(page))
1550                                 set_buffer_dirty(bh);
1551                         if (PageUptodate(page))
1552                                 set_buffer_uptodate(bh);
1553                         bh = bh->b_this_page;
1554                 } while (bh != head);
1555         }
1556         attach_page_buffers(page, head);
1557         spin_unlock(&page->mapping->private_lock);
1558 }
1559 EXPORT_SYMBOL(create_empty_buffers);
1560
1561 /*
1562  * We are taking a block for data and we don't want any output from any
1563  * buffer-cache aliases starting from return from that function and
1564  * until the moment when something will explicitly mark the buffer
1565  * dirty (hopefully that will not happen until we will free that block ;-)
1566  * We don't even need to mark it not-uptodate - nobody can expect
1567  * anything from a newly allocated buffer anyway. We used to used
1568  * unmap_buffer() for such invalidation, but that was wrong. We definitely
1569  * don't want to mark the alias unmapped, for example - it would confuse
1570  * anyone who might pick it with bread() afterwards...
1571  *
1572  * Also..  Note that bforget() doesn't lock the buffer.  So there can
1573  * be writeout I/O going on against recently-freed buffers.  We don't
1574  * wait on that I/O in bforget() - it's more efficient to wait on the I/O
1575  * only if we really need to.  That happens here.
1576  */
1577 void unmap_underlying_metadata(struct block_device *bdev, sector_t block)
1578 {
1579         struct buffer_head *old_bh;
1580
1581         might_sleep();
1582
1583         old_bh = __find_get_block_slow(bdev, block);
1584         if (old_bh) {
1585                 clear_buffer_dirty(old_bh);
1586                 wait_on_buffer(old_bh);
1587                 clear_buffer_req(old_bh);
1588                 __brelse(old_bh);
1589         }
1590 }
1591 EXPORT_SYMBOL(unmap_underlying_metadata);
1592
1593 /*
1594  * NOTE! All mapped/uptodate combinations are valid:
1595  *
1596  *      Mapped  Uptodate        Meaning
1597  *
1598  *      No      No              "unknown" - must do get_block()
1599  *      No      Yes             "hole" - zero-filled
1600  *      Yes     No              "allocated" - allocated on disk, not read in
1601  *      Yes     Yes             "valid" - allocated and up-to-date in memory.
1602  *
1603  * "Dirty" is valid only with the last case (mapped+uptodate).
1604  */
1605
1606 /*
1607  * While block_write_full_page is writing back the dirty buffers under
1608  * the page lock, whoever dirtied the buffers may decide to clean them
1609  * again at any time.  We handle that by only looking at the buffer
1610  * state inside lock_buffer().
1611  *
1612  * If block_write_full_page() is called for regular writeback
1613  * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
1614  * locked buffer.   This only can happen if someone has written the buffer
1615  * directly, with submit_bh().  At the address_space level PageWriteback
1616  * prevents this contention from occurring.
1617  *
1618  * If block_write_full_page() is called with wbc->sync_mode ==
1619  * WB_SYNC_ALL, the writes are posted using WRITE_SYNC_PLUG; this
1620  * causes the writes to be flagged as synchronous writes, but the
1621  * block device queue will NOT be unplugged, since usually many pages
1622  * will be pushed to the out before the higher-level caller actually
1623  * waits for the writes to be completed.  The various wait functions,
1624  * such as wait_on_writeback_range() will ultimately call sync_page()
1625  * which will ultimately call blk_run_backing_dev(), which will end up
1626  * unplugging the device queue.
1627  */
1628 static int __block_write_full_page(struct inode *inode, struct page *page,
1629                         get_block_t *get_block, struct writeback_control *wbc,
1630                         bh_end_io_t *handler)
1631 {
1632         int err;
1633         sector_t block;
1634         sector_t last_block;
1635         struct buffer_head *bh, *head;
1636         const unsigned blocksize = 1 << inode->i_blkbits;
1637         int nr_underway = 0;
1638         int write_op = (wbc->sync_mode == WB_SYNC_ALL ?
1639                         WRITE_SYNC_PLUG : WRITE);
1640
1641         BUG_ON(!PageLocked(page));
1642
1643         last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
1644
1645         if (!page_has_buffers(page)) {
1646                 create_empty_buffers(page, blocksize,
1647                                         (1 << BH_Dirty)|(1 << BH_Uptodate));
1648         }
1649
1650         /*
1651          * Be very careful.  We have no exclusion from __set_page_dirty_buffers
1652          * here, and the (potentially unmapped) buffers may become dirty at
1653          * any time.  If a buffer becomes dirty here after we've inspected it
1654          * then we just miss that fact, and the page stays dirty.
1655          *
1656          * Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
1657          * handle that here by just cleaning them.
1658          */
1659
1660         block = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1661         head = page_buffers(page);
1662         bh = head;
1663
1664         /*
1665          * Get all the dirty buffers mapped to disk addresses and
1666          * handle any aliases from the underlying blockdev's mapping.
1667          */
1668         do {
1669                 if (block > last_block) {
1670                         /*
1671                          * mapped buffers outside i_size will occur, because
1672                          * this page can be outside i_size when there is a
1673                          * truncate in progress.
1674                          */
1675                         /*
1676                          * The buffer was zeroed by block_write_full_page()
1677                          */
1678                         clear_buffer_dirty(bh);
1679                         set_buffer_uptodate(bh);
1680                 } else if ((!buffer_mapped(bh) || buffer_delay(bh)) &&
1681                            buffer_dirty(bh)) {
1682                         WARN_ON(bh->b_size != blocksize);
1683                         err = get_block(inode, block, bh, 1);
1684                         if (err)
1685                                 goto recover;
1686                         clear_buffer_delay(bh);
1687                         if (buffer_new(bh)) {
1688                                 /* blockdev mappings never come here */
1689                                 clear_buffer_new(bh);
1690                                 unmap_underlying_metadata(bh->b_bdev,
1691                                                         bh->b_blocknr);
1692                         }
1693                 }
1694                 bh = bh->b_this_page;
1695                 block++;
1696         } while (bh != head);
1697
1698         do {
1699                 if (!buffer_mapped(bh))
1700                         continue;
1701                 /*
1702                  * If it's a fully non-blocking write attempt and we cannot
1703                  * lock the buffer then redirty the page.  Note that this can
1704                  * potentially cause a busy-wait loop from writeback threads
1705                  * and kswapd activity, but those code paths have their own
1706                  * higher-level throttling.
1707                  */
1708                 if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) {
1709                         lock_buffer(bh);
1710                 } else if (!trylock_buffer(bh)) {
1711                         redirty_page_for_writepage(wbc, page);
1712                         continue;
1713                 }
1714                 if (test_clear_buffer_dirty(bh)) {
1715                         mark_buffer_async_write_endio(bh, handler);
1716                 } else {
1717                         unlock_buffer(bh);
1718                 }
1719         } while ((bh = bh->b_this_page) != head);
1720
1721         /*
1722          * The page and its buffers are protected by PageWriteback(), so we can
1723          * drop the bh refcounts early.
1724          */
1725         BUG_ON(PageWriteback(page));
1726         set_page_writeback(page);
1727
1728         do {
1729                 struct buffer_head *next = bh->b_this_page;
1730                 if (buffer_async_write(bh)) {
1731                         submit_bh(write_op, bh);
1732                         nr_underway++;
1733                 }
1734                 bh = next;
1735         } while (bh != head);
1736         unlock_page(page);
1737
1738         err = 0;
1739 done:
1740         if (nr_underway == 0) {
1741                 /*
1742                  * The page was marked dirty, but the buffers were
1743                  * clean.  Someone wrote them back by hand with
1744                  * ll_rw_block/submit_bh.  A rare case.
1745                  */
1746                 end_page_writeback(page);
1747
1748                 /*
1749                  * The page and buffer_heads can be released at any time from
1750                  * here on.
1751                  */
1752         }
1753         return err;
1754
1755 recover:
1756         /*
1757          * ENOSPC, or some other error.  We may already have added some
1758          * blocks to the file, so we need to write these out to avoid
1759          * exposing stale data.
1760          * The page is currently locked and not marked for writeback
1761          */
1762         bh = head;
1763         /* Recovery: lock and submit the mapped buffers */
1764         do {
1765                 if (buffer_mapped(bh) && buffer_dirty(bh) &&
1766                     !buffer_delay(bh)) {
1767                         lock_buffer(bh);
1768                         mark_buffer_async_write_endio(bh, handler);
1769                 } else {
1770                         /*
1771                          * The buffer may have been set dirty during
1772                          * attachment to a dirty page.
1773                          */
1774                         clear_buffer_dirty(bh);
1775                 }
1776         } while ((bh = bh->b_this_page) != head);
1777         SetPageError(page);
1778         BUG_ON(PageWriteback(page));
1779         mapping_set_error(page->mapping, err);
1780         set_page_writeback(page);
1781         do {
1782                 struct buffer_head *next = bh->b_this_page;
1783                 if (buffer_async_write(bh)) {
1784                         clear_buffer_dirty(bh);
1785                         submit_bh(write_op, bh);
1786                         nr_underway++;
1787                 }
1788                 bh = next;
1789         } while (bh != head);
1790         unlock_page(page);
1791         goto done;
1792 }
1793
1794 /*
1795  * If a page has any new buffers, zero them out here, and mark them uptodate
1796  * and dirty so they'll be written out (in order to prevent uninitialised
1797  * block data from leaking). And clear the new bit.
1798  */
1799 void page_zero_new_buffers(struct page *page, unsigned from, unsigned to)
1800 {
1801         unsigned int block_start, block_end;
1802         struct buffer_head *head, *bh;
1803
1804         BUG_ON(!PageLocked(page));
1805         if (!page_has_buffers(page))
1806                 return;
1807
1808         bh = head = page_buffers(page);
1809         block_start = 0;
1810         do {
1811                 block_end = block_start + bh->b_size;
1812
1813                 if (buffer_new(bh)) {
1814                         if (block_end > from && block_start < to) {
1815                                 if (!PageUptodate(page)) {
1816                                         unsigned start, size;
1817
1818                                         start = max(from, block_start);
1819                                         size = min(to, block_end) - start;
1820
1821                                         zero_user(page, start, size);
1822                                         set_buffer_uptodate(bh);
1823                                 }
1824
1825                                 clear_buffer_new(bh);
1826                                 mark_buffer_dirty(bh);
1827                         }
1828                 }
1829
1830                 block_start = block_end;
1831                 bh = bh->b_this_page;
1832         } while (bh != head);
1833 }
1834 EXPORT_SYMBOL(page_zero_new_buffers);
1835
1836 int block_prepare_write(struct page *page, unsigned from, unsigned to,
1837                 get_block_t *get_block)
1838 {
1839         struct inode *inode = page->mapping->host;
1840         unsigned block_start, block_end;
1841         sector_t block;
1842         int err = 0;
1843         unsigned blocksize, bbits;
1844         struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
1845
1846         BUG_ON(!PageLocked(page));
1847         BUG_ON(from > PAGE_CACHE_SIZE);
1848         BUG_ON(to > PAGE_CACHE_SIZE);
1849         BUG_ON(from > to);
1850
1851         blocksize = 1 << inode->i_blkbits;
1852         if (!page_has_buffers(page))
1853                 create_empty_buffers(page, blocksize, 0);
1854         head = page_buffers(page);
1855
1856         bbits = inode->i_blkbits;
1857         block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
1858
1859         for(bh = head, block_start = 0; bh != head || !block_start;
1860             block++, block_start=block_end, bh = bh->b_this_page) {
1861                 block_end = block_start + blocksize;
1862                 if (block_end <= from || block_start >= to) {
1863                         if (PageUptodate(page)) {
1864                                 if (!buffer_uptodate(bh))
1865                                         set_buffer_uptodate(bh);
1866                         }
1867                         continue;
1868                 }
1869                 if (buffer_new(bh))
1870                         clear_buffer_new(bh);
1871                 if (!buffer_mapped(bh)) {
1872                         WARN_ON(bh->b_size != blocksize);
1873                         err = get_block(inode, block, bh, 1);
1874                         if (err)
1875                                 break;
1876                         if (buffer_new(bh)) {
1877                                 unmap_underlying_metadata(bh->b_bdev,
1878                                                         bh->b_blocknr);
1879                                 if (PageUptodate(page)) {
1880                                         clear_buffer_new(bh);
1881                                         set_buffer_uptodate(bh);
1882                                         mark_buffer_dirty(bh);
1883                                         continue;
1884                                 }
1885                                 if (block_end > to || block_start < from)
1886                                         zero_user_segments(page,
1887                                                 to, block_end,
1888                                                 block_start, from);
1889                                 continue;
1890                         }
1891                 }
1892                 if (PageUptodate(page)) {
1893                         if (!buffer_uptodate(bh))
1894                                 set_buffer_uptodate(bh);
1895                         continue; 
1896                 }
1897                 if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
1898                     !buffer_unwritten(bh) &&
1899                      (block_start < from || block_end > to)) {
1900                         ll_rw_block(READ, 1, &bh);
1901                         *wait_bh++=bh;
1902                 }
1903         }
1904         /*
1905          * If we issued read requests - let them complete.
1906          */
1907         while(wait_bh > wait) {
1908                 wait_on_buffer(*--wait_bh);
1909                 if (!buffer_uptodate(*wait_bh))
1910                         err = -EIO;
1911         }
1912         if (unlikely(err)) {
1913                 page_zero_new_buffers(page, from, to);
1914                 ClearPageUptodate(page);
1915         }
1916         return err;
1917 }
1918 EXPORT_SYMBOL(block_prepare_write);
1919
1920 static int __block_commit_write(struct inode *inode, struct page *page,
1921                 unsigned from, unsigned to)
1922 {
1923         unsigned block_start, block_end;
1924         int partial = 0;
1925         unsigned blocksize;
1926         struct buffer_head *bh, *head;
1927
1928         blocksize = 1 << inode->i_blkbits;
1929
1930         for(bh = head = page_buffers(page), block_start = 0;
1931             bh != head || !block_start;
1932             block_start=block_end, bh = bh->b_this_page) {
1933                 block_end = block_start + blocksize;
1934                 if (block_end <= from || block_start >= to) {
1935                         if (!buffer_uptodate(bh))
1936                                 partial = 1;
1937                 } else {
1938                         set_buffer_uptodate(bh);
1939                         mark_buffer_dirty(bh);
1940                 }
1941                 clear_buffer_new(bh);
1942         }
1943
1944         /*
1945          * If this is a partial write which happened to make all buffers
1946          * uptodate then we can optimize away a bogus readpage() for
1947          * the next read(). Here we 'discover' whether the page went
1948          * uptodate as a result of this (potentially partial) write.
1949          */
1950         if (!partial)
1951                 SetPageUptodate(page);
1952         return 0;
1953 }
1954
1955 int __block_write_begin(struct page *page, loff_t pos, unsigned len,
1956                 get_block_t *get_block)
1957 {
1958         unsigned start = pos & (PAGE_CACHE_SIZE - 1);
1959
1960         return block_prepare_write(page, start, start + len, get_block);
1961 }
1962 EXPORT_SYMBOL(__block_write_begin);
1963
1964 /*
1965  * Filesystems implementing the new truncate sequence should use the
1966  * _newtrunc postfix variant which won't incorrectly call vmtruncate.
1967  * The filesystem needs to handle block truncation upon failure.
1968  */
1969 int block_write_begin_newtrunc(struct file *file, struct address_space *mapping,
1970                         loff_t pos, unsigned len, unsigned flags,
1971                         struct page **pagep, void **fsdata,
1972                         get_block_t *get_block)
1973 {
1974         pgoff_t index = pos >> PAGE_CACHE_SHIFT;
1975         struct page *page;
1976         int status;
1977
1978         page = grab_cache_page_write_begin(mapping, index, flags);
1979         if (!page)
1980                 return -ENOMEM;
1981
1982         status = __block_write_begin(page, pos, len, get_block);
1983         if (unlikely(status)) {
1984                 unlock_page(page);
1985                 page_cache_release(page);
1986                 page = NULL;
1987         }
1988
1989         *pagep = page;
1990         return status;
1991 }
1992 EXPORT_SYMBOL(block_write_begin_newtrunc);
1993
1994 /*
1995  * block_write_begin takes care of the basic task of block allocation and
1996  * bringing partial write blocks uptodate first.
1997  *
1998  * If *pagep is not NULL, then block_write_begin uses the locked page
1999  * at *pagep rather than allocating its own. In this case, the page will
2000  * not be unlocked or deallocated on failure.
2001  */
2002 int block_write_begin(struct file *file, struct address_space *mapping,
2003                         loff_t pos, unsigned len, unsigned flags,
2004                         struct page **pagep, void **fsdata,
2005                         get_block_t *get_block)
2006 {
2007         int ret;
2008
2009         ret = block_write_begin_newtrunc(file, mapping, pos, len, flags,
2010                                         pagep, fsdata, get_block);
2011
2012         /*
2013          * prepare_write() may have instantiated a few blocks
2014          * outside i_size.  Trim these off again. Don't need
2015          * i_size_read because we hold i_mutex.
2016          *
2017          * Filesystems which pass down their own page also cannot
2018          * call into vmtruncate here because it would lead to lock
2019          * inversion problems (*pagep is locked). This is a further
2020          * example of where the old truncate sequence is inadequate.
2021          */
2022         if (unlikely(ret) && *pagep == NULL) {
2023                 loff_t isize = mapping->host->i_size;
2024                 if (pos + len > isize)
2025                         vmtruncate(mapping->host, isize);
2026         }
2027
2028         return ret;
2029 }
2030 EXPORT_SYMBOL(block_write_begin);
2031
2032 int block_write_end(struct file *file, struct address_space *mapping,
2033                         loff_t pos, unsigned len, unsigned copied,
2034                         struct page *page, void *fsdata)
2035 {
2036         struct inode *inode = mapping->host;
2037         unsigned start;
2038
2039         start = pos & (PAGE_CACHE_SIZE - 1);
2040
2041         if (unlikely(copied < len)) {
2042                 /*
2043                  * The buffers that were written will now be uptodate, so we
2044                  * don't have to worry about a readpage reading them and
2045                  * overwriting a partial write. However if we have encountered
2046                  * a short write and only partially written into a buffer, it
2047                  * will not be marked uptodate, so a readpage might come in and
2048                  * destroy our partial write.
2049                  *
2050                  * Do the simplest thing, and just treat any short write to a
2051                  * non uptodate page as a zero-length write, and force the
2052                  * caller to redo the whole thing.
2053                  */
2054                 if (!PageUptodate(page))
2055                         copied = 0;
2056
2057                 page_zero_new_buffers(page, start+copied, start+len);
2058         }
2059         flush_dcache_page(page);
2060
2061         /* This could be a short (even 0-length) commit */
2062         __block_commit_write(inode, page, start, start+copied);
2063
2064         return copied;
2065 }
2066 EXPORT_SYMBOL(block_write_end);
2067
2068 int generic_write_end(struct file *file, struct address_space *mapping,
2069                         loff_t pos, unsigned len, unsigned copied,
2070                         struct page *page, void *fsdata)
2071 {
2072         struct inode *inode = mapping->host;
2073         int i_size_changed = 0;
2074
2075         copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
2076
2077         /*
2078          * No need to use i_size_read() here, the i_size
2079          * cannot change under us because we hold i_mutex.
2080          *
2081          * But it's important to update i_size while still holding page lock:
2082          * page writeout could otherwise come in and zero beyond i_size.
2083          */
2084         if (pos+copied > inode->i_size) {
2085                 i_size_write(inode, pos+copied);
2086                 i_size_changed = 1;
2087         }
2088
2089         unlock_page(page);
2090         page_cache_release(page);
2091
2092         /*
2093          * Don't mark the inode dirty under page lock. First, it unnecessarily
2094          * makes the holding time of page lock longer. Second, it forces lock
2095          * ordering of page lock and transaction start for journaling
2096          * filesystems.
2097          */
2098         if (i_size_changed)
2099                 mark_inode_dirty(inode);
2100
2101         return copied;
2102 }
2103 EXPORT_SYMBOL(generic_write_end);
2104
2105 /*
2106  * block_is_partially_uptodate checks whether buffers within a page are
2107  * uptodate or not.
2108  *
2109  * Returns true if all buffers which correspond to a file portion
2110  * we want to read are uptodate.
2111  */
2112 int block_is_partially_uptodate(struct page *page, read_descriptor_t *desc,
2113                                         unsigned long from)
2114 {
2115         struct inode *inode = page->mapping->host;
2116         unsigned block_start, block_end, blocksize;
2117         unsigned to;
2118         struct buffer_head *bh, *head;
2119         int ret = 1;
2120
2121         if (!page_has_buffers(page))
2122                 return 0;
2123
2124         blocksize = 1 << inode->i_blkbits;
2125         to = min_t(unsigned, PAGE_CACHE_SIZE - from, desc->count);
2126         to = from + to;
2127         if (from < blocksize && to > PAGE_CACHE_SIZE - blocksize)
2128                 return 0;
2129
2130         head = page_buffers(page);
2131         bh = head;
2132         block_start = 0;
2133         do {
2134                 block_end = block_start + blocksize;
2135                 if (block_end > from && block_start < to) {
2136                         if (!buffer_uptodate(bh)) {
2137                                 ret = 0;
2138                                 break;
2139                         }
2140                         if (block_end >= to)
2141                                 break;
2142                 }
2143                 block_start = block_end;
2144                 bh = bh->b_this_page;
2145         } while (bh != head);
2146
2147         return ret;
2148 }
2149 EXPORT_SYMBOL(block_is_partially_uptodate);
2150
2151 /*
2152  * Generic "read page" function for block devices that have the normal
2153  * get_block functionality. This is most of the block device filesystems.
2154  * Reads the page asynchronously --- the unlock_buffer() and
2155  * set/clear_buffer_uptodate() functions propagate buffer state into the
2156  * page struct once IO has completed.
2157  */
2158 int block_read_full_page(struct page *page, get_block_t *get_block)
2159 {
2160         struct inode *inode = page->mapping->host;
2161         sector_t iblock, lblock;
2162         struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
2163         unsigned int blocksize;
2164         int nr, i;
2165         int fully_mapped = 1;
2166
2167         BUG_ON(!PageLocked(page));
2168         blocksize = 1 << inode->i_blkbits;
2169         if (!page_has_buffers(page))
2170                 create_empty_buffers(page, blocksize, 0);
2171         head = page_buffers(page);
2172
2173         iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2174         lblock = (i_size_read(inode)+blocksize-1) >> inode->i_blkbits;
2175         bh = head;
2176         nr = 0;
2177         i = 0;
2178
2179         do {
2180                 if (buffer_uptodate(bh))
2181                         continue;
2182
2183                 if (!buffer_mapped(bh)) {
2184                         int err = 0;
2185
2186                         fully_mapped = 0;
2187                         if (iblock < lblock) {
2188                                 WARN_ON(bh->b_size != blocksize);
2189                                 err = get_block(inode, iblock, bh, 0);
2190                                 if (err)
2191                                         SetPageError(page);
2192                         }
2193                         if (!buffer_mapped(bh)) {
2194                                 zero_user(page, i * blocksize, blocksize);
2195                                 if (!err)
2196                                         set_buffer_uptodate(bh);
2197                                 continue;
2198                         }
2199                         /*
2200                          * get_block() might have updated the buffer
2201                          * synchronously
2202                          */
2203                         if (buffer_uptodate(bh))
2204                                 continue;
2205                 }
2206                 arr[nr++] = bh;
2207         } while (i++, iblock++, (bh = bh->b_this_page) != head);
2208
2209         if (fully_mapped)
2210                 SetPageMappedToDisk(page);
2211
2212         if (!nr) {
2213                 /*
2214                  * All buffers are uptodate - we can set the page uptodate
2215                  * as well. But not if get_block() returned an error.
2216                  */
2217                 if (!PageError(page))
2218                         SetPageUptodate(page);
2219                 unlock_page(page);
2220                 return 0;
2221         }
2222
2223         /* Stage two: lock the buffers */
2224         for (i = 0; i < nr; i++) {
2225                 bh = arr[i];
2226                 lock_buffer(bh);
2227                 mark_buffer_async_read(bh);
2228         }
2229
2230         /*
2231          * Stage 3: start the IO.  Check for uptodateness
2232          * inside the buffer lock in case another process reading
2233          * the underlying blockdev brought it uptodate (the sct fix).
2234          */
2235         for (i = 0; i < nr; i++) {
2236                 bh = arr[i];
2237                 if (buffer_uptodate(bh))
2238                         end_buffer_async_read(bh, 1);
2239                 else
2240                         submit_bh(READ, bh);
2241         }
2242         return 0;
2243 }
2244 EXPORT_SYMBOL(block_read_full_page);
2245
2246 /* utility function for filesystems that need to do work on expanding
2247  * truncates.  Uses filesystem pagecache writes to allow the filesystem to
2248  * deal with the hole.  
2249  */
2250 int generic_cont_expand_simple(struct inode *inode, loff_t size)
2251 {
2252         struct address_space *mapping = inode->i_mapping;
2253         struct page *page;
2254         void *fsdata;
2255         int err;
2256
2257         err = inode_newsize_ok(inode, size);
2258         if (err)
2259                 goto out;
2260
2261         err = pagecache_write_begin(NULL, mapping, size, 0,
2262                                 AOP_FLAG_UNINTERRUPTIBLE|AOP_FLAG_CONT_EXPAND,
2263                                 &page, &fsdata);
2264         if (err)
2265                 goto out;
2266
2267         err = pagecache_write_end(NULL, mapping, size, 0, 0, page, fsdata);
2268         BUG_ON(err > 0);
2269
2270 out:
2271         return err;
2272 }
2273 EXPORT_SYMBOL(generic_cont_expand_simple);
2274
2275 static int cont_expand_zero(struct file *file, struct address_space *mapping,
2276                             loff_t pos, loff_t *bytes)
2277 {
2278         struct inode *inode = mapping->host;
2279         unsigned blocksize = 1 << inode->i_blkbits;
2280         struct page *page;
2281         void *fsdata;
2282         pgoff_t index, curidx;
2283         loff_t curpos;
2284         unsigned zerofrom, offset, len;
2285         int err = 0;
2286
2287         index = pos >> PAGE_CACHE_SHIFT;
2288         offset = pos & ~PAGE_CACHE_MASK;
2289
2290         while (index > (curidx = (curpos = *bytes)>>PAGE_CACHE_SHIFT)) {
2291                 zerofrom = curpos & ~PAGE_CACHE_MASK;
2292                 if (zerofrom & (blocksize-1)) {
2293                         *bytes |= (blocksize-1);
2294                         (*bytes)++;
2295                 }
2296                 len = PAGE_CACHE_SIZE - zerofrom;
2297
2298                 err = pagecache_write_begin(file, mapping, curpos, len,
2299                                                 AOP_FLAG_UNINTERRUPTIBLE,
2300                                                 &page, &fsdata);
2301                 if (err)
2302                         goto out;
2303                 zero_user(page, zerofrom, len);
2304                 err = pagecache_write_end(file, mapping, curpos, len, len,
2305                                                 page, fsdata);
2306                 if (err < 0)
2307                         goto out;
2308                 BUG_ON(err != len);
2309                 err = 0;
2310
2311                 balance_dirty_pages_ratelimited(mapping);
2312         }
2313
2314         /* page covers the boundary, find the boundary offset */
2315         if (index == curidx) {
2316                 zerofrom = curpos & ~PAGE_CACHE_MASK;
2317                 /* if we will expand the thing last block will be filled */
2318                 if (offset <= zerofrom) {
2319                         goto out;
2320                 }
2321                 if (zerofrom & (blocksize-1)) {
2322                         *bytes |= (blocksize-1);
2323                         (*bytes)++;
2324                 }
2325                 len = offset - zerofrom;
2326
2327                 err = pagecache_write_begin(file, mapping, curpos, len,
2328                                                 AOP_FLAG_UNINTERRUPTIBLE,
2329                                                 &page, &fsdata);
2330                 if (err)
2331                         goto out;
2332                 zero_user(page, zerofrom, len);
2333                 err = pagecache_write_end(file, mapping, curpos, len, len,
2334                                                 page, fsdata);
2335                 if (err < 0)
2336                         goto out;
2337                 BUG_ON(err != len);
2338                 err = 0;
2339         }
2340 out:
2341         return err;
2342 }
2343
2344 /*
2345  * For moronic filesystems that do not allow holes in file.
2346  * We may have to extend the file.
2347  */
2348 int cont_write_begin(struct file *file, struct address_space *mapping,
2349                         loff_t pos, unsigned len, unsigned flags,
2350                         struct page **pagep, void **fsdata,
2351                         get_block_t *get_block, loff_t *bytes)
2352 {
2353         struct inode *inode = mapping->host;
2354         unsigned blocksize = 1 << inode->i_blkbits;
2355         unsigned zerofrom;
2356         int err;
2357
2358         err = cont_expand_zero(file, mapping, pos, bytes);
2359         if (err)
2360                 goto out;
2361
2362         zerofrom = *bytes & ~PAGE_CACHE_MASK;
2363         if (pos+len > *bytes && zerofrom & (blocksize-1)) {
2364                 *bytes |= (blocksize-1);
2365                 (*bytes)++;
2366         }
2367
2368         *pagep = NULL;
2369         err = block_write_begin_newtrunc(file, mapping, pos, len,
2370                                 flags, pagep, fsdata, get_block);
2371 out:
2372         return err;
2373 }
2374 EXPORT_SYMBOL(cont_write_begin);
2375
2376 int block_commit_write(struct page *page, unsigned from, unsigned to)
2377 {
2378         struct inode *inode = page->mapping->host;
2379         __block_commit_write(inode,page,from,to);
2380         return 0;
2381 }
2382 EXPORT_SYMBOL(block_commit_write);
2383
2384 /*
2385  * block_page_mkwrite() is not allowed to change the file size as it gets
2386  * called from a page fault handler when a page is first dirtied. Hence we must
2387  * be careful to check for EOF conditions here. We set the page up correctly
2388  * for a written page which means we get ENOSPC checking when writing into
2389  * holes and correct delalloc and unwritten extent mapping on filesystems that
2390  * support these features.
2391  *
2392  * We are not allowed to take the i_mutex here so we have to play games to
2393  * protect against truncate races as the page could now be beyond EOF.  Because
2394  * truncate writes the inode size before removing pages, once we have the
2395  * page lock we can determine safely if the page is beyond EOF. If it is not
2396  * beyond EOF, then the page is guaranteed safe against truncation until we
2397  * unlock the page.
2398  */
2399 int
2400 block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
2401                    get_block_t get_block)
2402 {
2403         struct page *page = vmf->page;
2404         struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
2405         unsigned long end;
2406         loff_t size;
2407         int ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */
2408
2409         lock_page(page);
2410         size = i_size_read(inode);
2411         if ((page->mapping != inode->i_mapping) ||
2412             (page_offset(page) > size)) {
2413                 /* page got truncated out from underneath us */
2414                 unlock_page(page);
2415                 goto out;
2416         }
2417
2418         /* page is wholly or partially inside EOF */
2419         if (((page->index + 1) << PAGE_CACHE_SHIFT) > size)
2420                 end = size & ~PAGE_CACHE_MASK;
2421         else
2422                 end = PAGE_CACHE_SIZE;
2423
2424         ret = block_prepare_write(page, 0, end, get_block);
2425         if (!ret)
2426                 ret = block_commit_write(page, 0, end);
2427
2428         if (unlikely(ret)) {
2429                 unlock_page(page);
2430                 if (ret == -ENOMEM)
2431                         ret = VM_FAULT_OOM;
2432                 else /* -ENOSPC, -EIO, etc */
2433                         ret = VM_FAULT_SIGBUS;
2434         } else
2435                 ret = VM_FAULT_LOCKED;
2436
2437 out:
2438         return ret;
2439 }
2440 EXPORT_SYMBOL(block_page_mkwrite);
2441
2442 /*
2443  * nobh_write_begin()'s prereads are special: the buffer_heads are freed
2444  * immediately, while under the page lock.  So it needs a special end_io
2445  * handler which does not touch the bh after unlocking it.
2446  */
2447 static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate)
2448 {
2449         __end_buffer_read_notouch(bh, uptodate);
2450 }
2451
2452 /*
2453  * Attach the singly-linked list of buffers created by nobh_write_begin, to
2454  * the page (converting it to circular linked list and taking care of page
2455  * dirty races).
2456  */
2457 static void attach_nobh_buffers(struct page *page, struct buffer_head *head)
2458 {
2459         struct buffer_head *bh;
2460
2461         BUG_ON(!PageLocked(page));
2462
2463         spin_lock(&page->mapping->private_lock);
2464         bh = head;
2465         do {
2466                 if (PageDirty(page))
2467                         set_buffer_dirty(bh);
2468                 if (!bh->b_this_page)
2469                         bh->b_this_page = head;
2470                 bh = bh->b_this_page;
2471         } while (bh != head);
2472         attach_page_buffers(page, head);
2473         spin_unlock(&page->mapping->private_lock);
2474 }
2475
2476 /*
2477  * On entry, the page is fully not uptodate.
2478  * On exit the page is fully uptodate in the areas outside (from,to)
2479  * The filesystem needs to handle block truncation upon failure.
2480  */
2481 int nobh_write_begin(struct address_space *mapping,
2482                         loff_t pos, unsigned len, unsigned flags,
2483                         struct page **pagep, void **fsdata,
2484                         get_block_t *get_block)
2485 {
2486         struct inode *inode = mapping->host;
2487         const unsigned blkbits = inode->i_blkbits;
2488         const unsigned blocksize = 1 << blkbits;
2489         struct buffer_head *head, *bh;
2490         struct page *page;
2491         pgoff_t index;
2492         unsigned from, to;
2493         unsigned block_in_page;
2494         unsigned block_start, block_end;
2495         sector_t block_in_file;
2496         int nr_reads = 0;
2497         int ret = 0;
2498         int is_mapped_to_disk = 1;
2499
2500         index = pos >> PAGE_CACHE_SHIFT;
2501         from = pos & (PAGE_CACHE_SIZE - 1);
2502         to = from + len;
2503
2504         page = grab_cache_page_write_begin(mapping, index, flags);
2505         if (!page)
2506                 return -ENOMEM;
2507         *pagep = page;
2508         *fsdata = NULL;
2509
2510         if (page_has_buffers(page)) {
2511                 unlock_page(page);
2512                 page_cache_release(page);
2513                 *pagep = NULL;
2514                 return block_write_begin_newtrunc(NULL, mapping, pos, len,
2515                                         flags, pagep, fsdata, get_block);
2516         }
2517
2518         if (PageMappedToDisk(page))
2519                 return 0;
2520
2521         /*
2522          * Allocate buffers so that we can keep track of state, and potentially
2523          * attach them to the page if an error occurs. In the common case of
2524          * no error, they will just be freed again without ever being attached
2525          * to the page (which is all OK, because we're under the page lock).
2526          *
2527          * Be careful: the buffer linked list is a NULL terminated one, rather
2528          * than the circular one we're used to.
2529          */
2530         head = alloc_page_buffers(page, blocksize, 0);
2531         if (!head) {
2532                 ret = -ENOMEM;
2533                 goto out_release;
2534         }
2535
2536         block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
2537
2538         /*
2539          * We loop across all blocks in the page, whether or not they are
2540          * part of the affected region.  This is so we can discover if the
2541          * page is fully mapped-to-disk.
2542          */
2543         for (block_start = 0, block_in_page = 0, bh = head;
2544                   block_start < PAGE_CACHE_SIZE;
2545                   block_in_page++, block_start += blocksize, bh = bh->b_this_page) {
2546                 int create;
2547
2548                 block_end = block_start + blocksize;
2549                 bh->b_state = 0;
2550                 create = 1;
2551                 if (block_start >= to)
2552                         create = 0;
2553                 ret = get_block(inode, block_in_file + block_in_page,
2554                                         bh, create);
2555                 if (ret)
2556                         goto failed;
2557                 if (!buffer_mapped(bh))
2558                         is_mapped_to_disk = 0;
2559                 if (buffer_new(bh))
2560                         unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
2561                 if (PageUptodate(page)) {
2562                         set_buffer_uptodate(bh);
2563                         continue;
2564                 }
2565                 if (buffer_new(bh) || !buffer_mapped(bh)) {
2566                         zero_user_segments(page, block_start, from,
2567                                                         to, block_end);
2568                         continue;
2569                 }
2570                 if (buffer_uptodate(bh))
2571                         continue;       /* reiserfs does this */
2572                 if (block_start < from || block_end > to) {
2573                         lock_buffer(bh);
2574                         bh->b_end_io = end_buffer_read_nobh;
2575                         submit_bh(READ, bh);
2576                         nr_reads++;
2577                 }
2578         }
2579
2580         if (nr_reads) {
2581                 /*
2582                  * The page is locked, so these buffers are protected from
2583                  * any VM or truncate activity.  Hence we don't need to care
2584                  * for the buffer_head refcounts.
2585                  */
2586                 for (bh = head; bh; bh = bh->b_this_page) {
2587                         wait_on_buffer(bh);
2588                         if (!buffer_uptodate(bh))
2589                                 ret = -EIO;
2590                 }
2591                 if (ret)
2592                         goto failed;
2593         }
2594
2595         if (is_mapped_to_disk)
2596                 SetPageMappedToDisk(page);
2597
2598         *fsdata = head; /* to be released by nobh_write_end */
2599
2600         return 0;
2601
2602 failed:
2603         BUG_ON(!ret);
2604         /*
2605          * Error recovery is a bit difficult. We need to zero out blocks that
2606          * were newly allocated, and dirty them to ensure they get written out.
2607          * Buffers need to be attached to the page at this point, otherwise
2608          * the handling of potential IO errors during writeout would be hard
2609          * (could try doing synchronous writeout, but what if that fails too?)
2610          */
2611         attach_nobh_buffers(page, head);
2612         page_zero_new_buffers(page, from, to);
2613
2614 out_release:
2615         unlock_page(page);
2616         page_cache_release(page);
2617         *pagep = NULL;
2618
2619         return ret;
2620 }
2621 EXPORT_SYMBOL(nobh_write_begin);
2622
2623 int nobh_write_end(struct file *file, struct address_space *mapping,
2624                         loff_t pos, unsigned len, unsigned copied,
2625                         struct page *page, void *fsdata)
2626 {
2627         struct inode *inode = page->mapping->host;
2628         struct buffer_head *head = fsdata;
2629         struct buffer_head *bh;
2630         BUG_ON(fsdata != NULL && page_has_buffers(page));
2631
2632         if (unlikely(copied < len) && head)
2633                 attach_nobh_buffers(page, head);
2634         if (page_has_buffers(page))
2635                 return generic_write_end(file, mapping, pos, len,
2636                                         copied, page, fsdata);
2637
2638         SetPageUptodate(page);
2639         set_page_dirty(page);
2640         if (pos+copied > inode->i_size) {
2641                 i_size_write(inode, pos+copied);
2642                 mark_inode_dirty(inode);
2643         }
2644
2645         unlock_page(page);
2646         page_cache_release(page);
2647
2648         while (head) {
2649                 bh = head;
2650                 head = head->b_this_page;
2651                 free_buffer_head(bh);
2652         }
2653
2654         return copied;
2655 }
2656 EXPORT_SYMBOL(nobh_write_end);
2657
2658 /*
2659  * nobh_writepage() - based on block_full_write_page() except
2660  * that it tries to operate without attaching bufferheads to
2661  * the page.
2662  */
2663 int nobh_writepage(struct page *page, get_block_t *get_block,
2664                         struct writeback_control *wbc)
2665 {
2666         struct inode * const inode = page->mapping->host;
2667         loff_t i_size = i_size_read(inode);
2668         const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2669         unsigned offset;
2670         int ret;
2671
2672         /* Is the page fully inside i_size? */
2673         if (page->index < end_index)
2674                 goto out;
2675
2676         /* Is the page fully outside i_size? (truncate in progress) */
2677         offset = i_size & (PAGE_CACHE_SIZE-1);
2678         if (page->index >= end_index+1 || !offset) {
2679                 /*
2680                  * The page may have dirty, unmapped buffers.  For example,
2681                  * they may have been added in ext3_writepage().  Make them
2682                  * freeable here, so the page does not leak.
2683                  */
2684 #if 0
2685                 /* Not really sure about this  - do we need this ? */
2686                 if (page->mapping->a_ops->invalidatepage)
2687                         page->mapping->a_ops->invalidatepage(page, offset);
2688 #endif
2689                 unlock_page(page);
2690                 return 0; /* don't care */
2691         }
2692
2693         /*
2694          * The page straddles i_size.  It must be zeroed out on each and every
2695          * writepage invocation because it may be mmapped.  "A file is mapped
2696          * in multiples of the page size.  For a file that is not a multiple of
2697          * the  page size, the remaining memory is zeroed when mapped, and
2698          * writes to that region are not written out to the file."
2699          */
2700         zero_user_segment(page, offset, PAGE_CACHE_SIZE);
2701 out:
2702         ret = mpage_writepage(page, get_block, wbc);
2703         if (ret == -EAGAIN)
2704                 ret = __block_write_full_page(inode, page, get_block, wbc,
2705                                               end_buffer_async_write);
2706         return ret;
2707 }
2708 EXPORT_SYMBOL(nobh_writepage);
2709
2710 int nobh_truncate_page(struct address_space *mapping,
2711                         loff_t from, get_block_t *get_block)
2712 {
2713         pgoff_t index = from >> PAGE_CACHE_SHIFT;
2714         unsigned offset = from & (PAGE_CACHE_SIZE-1);
2715         unsigned blocksize;
2716         sector_t iblock;
2717         unsigned length, pos;
2718         struct inode *inode = mapping->host;
2719         struct page *page;
2720         struct buffer_head map_bh;
2721         int err;
2722
2723         blocksize = 1 << inode->i_blkbits;
2724         length = offset & (blocksize - 1);
2725
2726         /* Block boundary? Nothing to do */
2727         if (!length)
2728                 return 0;
2729
2730         length = blocksize - length;
2731         iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2732
2733         page = grab_cache_page(mapping, index);
2734         err = -ENOMEM;
2735         if (!page)
2736                 goto out;
2737
2738         if (page_has_buffers(page)) {
2739 has_buffers:
2740                 unlock_page(page);
2741                 page_cache_release(page);
2742                 return block_truncate_page(mapping, from, get_block);
2743         }
2744
2745         /* Find the buffer that contains "offset" */
2746         pos = blocksize;
2747         while (offset >= pos) {
2748                 iblock++;
2749                 pos += blocksize;
2750         }
2751
2752         map_bh.b_size = blocksize;
2753         map_bh.b_state = 0;
2754         err = get_block(inode, iblock, &map_bh, 0);
2755         if (err)
2756                 goto unlock;
2757         /* unmapped? It's a hole - nothing to do */
2758         if (!buffer_mapped(&map_bh))
2759                 goto unlock;
2760
2761         /* Ok, it's mapped. Make sure it's up-to-date */
2762         if (!PageUptodate(page)) {
2763                 err = mapping->a_ops->readpage(NULL, page);
2764                 if (err) {
2765                         page_cache_release(page);
2766                         goto out;
2767                 }
2768                 lock_page(page);
2769                 if (!PageUptodate(page)) {
2770                         err = -EIO;
2771                         goto unlock;
2772                 }
2773                 if (page_has_buffers(page))
2774                         goto has_buffers;
2775         }
2776         zero_user(page, offset, length);
2777         set_page_dirty(page);
2778         err = 0;
2779
2780 unlock:
2781         unlock_page(page);
2782         page_cache_release(page);
2783 out:
2784         return err;
2785 }
2786 EXPORT_SYMBOL(nobh_truncate_page);
2787
2788 int block_truncate_page(struct address_space *mapping,
2789                         loff_t from, get_block_t *get_block)
2790 {
2791         pgoff_t index = from >> PAGE_CACHE_SHIFT;
2792         unsigned offset = from & (PAGE_CACHE_SIZE-1);
2793         unsigned blocksize;
2794         sector_t iblock;
2795         unsigned length, pos;
2796         struct inode *inode = mapping->host;
2797         struct page *page;
2798         struct buffer_head *bh;
2799         int err;
2800
2801         blocksize = 1 << inode->i_blkbits;
2802         length = offset & (blocksize - 1);
2803
2804         /* Block boundary? Nothing to do */
2805         if (!length)
2806                 return 0;
2807
2808         length = blocksize - length;
2809         iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2810         
2811         page = grab_cache_page(mapping, index);
2812         err = -ENOMEM;
2813         if (!page)
2814                 goto out;
2815
2816         if (!page_has_buffers(page))
2817                 create_empty_buffers(page, blocksize, 0);
2818
2819         /* Find the buffer that contains "offset" */
2820         bh = page_buffers(page);
2821         pos = blocksize;
2822         while (offset >= pos) {
2823                 bh = bh->b_this_page;
2824                 iblock++;
2825                 pos += blocksize;
2826         }
2827
2828         err = 0;
2829         if (!buffer_mapped(bh)) {
2830                 WARN_ON(bh->b_size != blocksize);
2831                 err = get_block(inode, iblock, bh, 0);
2832                 if (err)
2833                         goto unlock;
2834                 /* unmapped? It's a hole - nothing to do */
2835                 if (!buffer_mapped(bh))
2836                         goto unlock;
2837         }
2838
2839         /* Ok, it's mapped. Make sure it's up-to-date */
2840         if (PageUptodate(page))
2841                 set_buffer_uptodate(bh);
2842
2843         if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) {
2844                 err = -EIO;
2845                 ll_rw_block(READ, 1, &bh);
2846                 wait_on_buffer(bh);
2847                 /* Uhhuh. Read error. Complain and punt. */
2848                 if (!buffer_uptodate(bh))
2849                         goto unlock;
2850         }
2851
2852         zero_user(page, offset, length);
2853         mark_buffer_dirty(bh);
2854         err = 0;
2855
2856 unlock:
2857         unlock_page(page);
2858         page_cache_release(page);
2859 out:
2860         return err;
2861 }
2862 EXPORT_SYMBOL(block_truncate_page);
2863
2864 /*
2865  * The generic ->writepage function for buffer-backed address_spaces
2866  * this form passes in the end_io handler used to finish the IO.
2867  */
2868 int block_write_full_page_endio(struct page *page, get_block_t *get_block,
2869                         struct writeback_control *wbc, bh_end_io_t *handler)
2870 {
2871         struct inode * const inode = page->mapping->host;
2872         loff_t i_size = i_size_read(inode);
2873         const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2874         unsigned offset;
2875
2876         /* Is the page fully inside i_size? */
2877         if (page->index < end_index)
2878                 return __block_write_full_page(inode, page, get_block, wbc,
2879                                                handler);
2880
2881         /* Is the page fully outside i_size? (truncate in progress) */
2882         offset = i_size & (PAGE_CACHE_SIZE-1);
2883         if (page->index >= end_index+1 || !offset) {
2884                 /*
2885                  * The page may have dirty, unmapped buffers.  For example,
2886                  * they may have been added in ext3_writepage().  Make them
2887                  * freeable here, so the page does not leak.
2888                  */
2889                 do_invalidatepage(page, 0);
2890                 unlock_page(page);
2891                 return 0; /* don't care */
2892         }
2893
2894         /*
2895          * The page straddles i_size.  It must be zeroed out on each and every
2896          * writepage invocation because it may be mmapped.  "A file is mapped
2897          * in multiples of the page size.  For a file that is not a multiple of
2898          * the  page size, the remaining memory is zeroed when mapped, and
2899          * writes to that region are not written out to the file."
2900          */
2901         zero_user_segment(page, offset, PAGE_CACHE_SIZE);
2902         return __block_write_full_page(inode, page, get_block, wbc, handler);
2903 }
2904 EXPORT_SYMBOL(block_write_full_page_endio);
2905
2906 /*
2907  * The generic ->writepage function for buffer-backed address_spaces
2908  */
2909 int block_write_full_page(struct page *page, get_block_t *get_block,
2910                         struct writeback_control *wbc)
2911 {
2912         return block_write_full_page_endio(page, get_block, wbc,
2913                                            end_buffer_async_write);
2914 }
2915 EXPORT_SYMBOL(block_write_full_page);
2916
2917 sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
2918                             get_block_t *get_block)
2919 {
2920         struct buffer_head tmp;
2921         struct inode *inode = mapping->host;
2922         tmp.b_state = 0;
2923         tmp.b_blocknr = 0;
2924         tmp.b_size = 1 << inode->i_blkbits;
2925         get_block(inode, block, &tmp, 0);
2926         return tmp.b_blocknr;
2927 }
2928 EXPORT_SYMBOL(generic_block_bmap);
2929
2930 static void end_bio_bh_io_sync(struct bio *bio, int err)
2931 {
2932         struct buffer_head *bh = bio->bi_private;
2933
2934         if (err == -EOPNOTSUPP) {
2935                 set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
2936                 set_bit(BH_Eopnotsupp, &bh->b_state);
2937         }
2938
2939         if (unlikely (test_bit(BIO_QUIET,&bio->bi_flags)))
2940                 set_bit(BH_Quiet, &bh->b_state);
2941
2942         bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags));
2943         bio_put(bio);
2944 }
2945
2946 int submit_bh(int rw, struct buffer_head * bh)
2947 {
2948         struct bio *bio;
2949         int ret = 0;
2950
2951         BUG_ON(!buffer_locked(bh));
2952         BUG_ON(!buffer_mapped(bh));
2953         BUG_ON(!bh->b_end_io);
2954         BUG_ON(buffer_delay(bh));
2955         BUG_ON(buffer_unwritten(bh));
2956
2957         /*
2958          * Mask in barrier bit for a write (could be either a WRITE or a
2959          * WRITE_SYNC
2960          */
2961         if (buffer_ordered(bh) && (rw & WRITE))
2962                 rw |= WRITE_BARRIER;
2963
2964         /*
2965          * Only clear out a write error when rewriting
2966          */
2967         if (test_set_buffer_req(bh) && (rw & WRITE))
2968                 clear_buffer_write_io_error(bh);
2969
2970         /*
2971          * from here on down, it's all bio -- do the initial mapping,
2972          * submit_bio -> generic_make_request may further map this bio around
2973          */
2974         bio = bio_alloc(GFP_NOIO, 1);
2975
2976         bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
2977         bio->bi_bdev = bh->b_bdev;
2978         bio->bi_io_vec[0].bv_page = bh->b_page;
2979         bio->bi_io_vec[0].bv_len = bh->b_size;
2980         bio->bi_io_vec[0].bv_offset = bh_offset(bh);
2981
2982         bio->bi_vcnt = 1;
2983         bio->bi_idx = 0;
2984         bio->bi_size = bh->b_size;
2985
2986         bio->bi_end_io = end_bio_bh_io_sync;
2987         bio->bi_private = bh;
2988
2989         bio_get(bio);
2990         submit_bio(rw, bio);
2991
2992         if (bio_flagged(bio, BIO_EOPNOTSUPP))
2993                 ret = -EOPNOTSUPP;
2994
2995         bio_put(bio);
2996         return ret;
2997 }
2998 EXPORT_SYMBOL(submit_bh);
2999
3000 /**
3001  * ll_rw_block: low-level access to block devices (DEPRECATED)
3002  * @rw: whether to %READ or %WRITE or %SWRITE or maybe %READA (readahead)
3003  * @nr: number of &struct buffer_heads in the array
3004  * @bhs: array of pointers to &struct buffer_head
3005  *
3006  * ll_rw_block() takes an array of pointers to &struct buffer_heads, and
3007  * requests an I/O operation on them, either a %READ or a %WRITE.  The third
3008  * %SWRITE is like %WRITE only we make sure that the *current* data in buffers
3009  * are sent to disk. The fourth %READA option is described in the documentation
3010  * for generic_make_request() which ll_rw_block() calls.
3011  *
3012  * This function drops any buffer that it cannot get a lock on (with the
3013  * BH_Lock state bit) unless SWRITE is required, any buffer that appears to be
3014  * clean when doing a write request, and any buffer that appears to be
3015  * up-to-date when doing read request.  Further it marks as clean buffers that
3016  * are processed for writing (the buffer cache won't assume that they are
3017  * actually clean until the buffer gets unlocked).
3018  *
3019  * ll_rw_block sets b_end_io to simple completion handler that marks
3020  * the buffer up-to-date (if approriate), unlocks the buffer and wakes
3021  * any waiters. 
3022  *
3023  * All of the buffers must be for the same device, and must also be a
3024  * multiple of the current approved size for the device.
3025  */
3026 void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
3027 {
3028         int i;
3029
3030         for (i = 0; i < nr; i++) {
3031                 struct buffer_head *bh = bhs[i];
3032
3033                 if (rw == SWRITE || rw == SWRITE_SYNC || rw == SWRITE_SYNC_PLUG)
3034                         lock_buffer(bh);
3035                 else if (!trylock_buffer(bh))
3036                         continue;
3037
3038                 if (rw == WRITE || rw == SWRITE || rw == SWRITE_SYNC ||
3039                     rw == SWRITE_SYNC_PLUG) {
3040                         if (test_clear_buffer_dirty(bh)) {
3041                                 bh->b_end_io = end_buffer_write_sync;
3042                                 get_bh(bh);
3043                                 if (rw == SWRITE_SYNC)
3044                                         submit_bh(WRITE_SYNC, bh);
3045                                 else
3046                                         submit_bh(WRITE, bh);
3047                                 continue;
3048                         }
3049                 } else {
3050                         if (!buffer_uptodate(bh)) {
3051                                 bh->b_end_io = end_buffer_read_sync;
3052                                 get_bh(bh);
3053                                 submit_bh(rw, bh);
3054                                 continue;
3055                         }
3056                 }
3057                 unlock_buffer(bh);
3058         }
3059 }
3060 EXPORT_SYMBOL(ll_rw_block);
3061
3062 /*
3063  * For a data-integrity writeout, we need to wait upon any in-progress I/O
3064  * and then start new I/O and then wait upon it.  The caller must have a ref on
3065  * the buffer_head.
3066  */
3067 int sync_dirty_buffer(struct buffer_head *bh)
3068 {
3069         int ret = 0;
3070
3071         WARN_ON(atomic_read(&bh->b_count) < 1);
3072         lock_buffer(bh);
3073         if (test_clear_buffer_dirty(bh)) {
3074                 get_bh(bh);
3075                 bh->b_end_io = end_buffer_write_sync;
3076                 ret = submit_bh(WRITE_SYNC, bh);
3077                 wait_on_buffer(bh);
3078                 if (buffer_eopnotsupp(bh)) {
3079                         clear_buffer_eopnotsupp(bh);
3080                         ret = -EOPNOTSUPP;
3081                 }
3082                 if (!ret && !buffer_uptodate(bh))
3083                         ret = -EIO;
3084         } else {
3085                 unlock_buffer(bh);
3086         }
3087         return ret;
3088 }
3089 EXPORT_SYMBOL(sync_dirty_buffer);
3090
3091 /*
3092  * try_to_free_buffers() checks if all the buffers on this particular page
3093  * are unused, and releases them if so.
3094  *
3095  * Exclusion against try_to_free_buffers may be obtained by either
3096  * locking the page or by holding its mapping's private_lock.
3097  *
3098  * If the page is dirty but all the buffers are clean then we need to
3099  * be sure to mark the page clean as well.  This is because the page
3100  * may be against a block device, and a later reattachment of buffers
3101  * to a dirty page will set *all* buffers dirty.  Which would corrupt
3102  * filesystem data on the same device.
3103  *
3104  * The same applies to regular filesystem pages: if all the buffers are
3105  * clean then we set the page clean and proceed.  To do that, we require
3106  * total exclusion from __set_page_dirty_buffers().  That is obtained with
3107  * private_lock.
3108  *
3109  * try_to_free_buffers() is non-blocking.
3110  */
3111 static inline int buffer_busy(struct buffer_head *bh)
3112 {
3113         return atomic_read(&bh->b_count) |
3114                 (bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
3115 }
3116
3117 static int
3118 drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
3119 {
3120         struct buffer_head *head = page_buffers(page);
3121         struct buffer_head *bh;
3122
3123         bh = head;
3124         do {
3125                 if (buffer_write_io_error(bh) && page->mapping)
3126                         set_bit(AS_EIO, &page->mapping->flags);
3127                 if (buffer_busy(bh))
3128                         goto failed;
3129                 bh = bh->b_this_page;
3130         } while (bh != head);
3131
3132         do {
3133                 struct buffer_head *next = bh->b_this_page;
3134
3135                 if (bh->b_assoc_map)
3136                         __remove_assoc_queue(bh);
3137                 bh = next;
3138         } while (bh != head);
3139         *buffers_to_free = head;
3140         __clear_page_buffers(page);
3141         return 1;
3142 failed:
3143         return 0;
3144 }
3145
3146 int try_to_free_buffers(struct page *page)
3147 {
3148         struct address_space * const mapping = page->mapping;
3149         struct buffer_head *buffers_to_free = NULL;
3150         int ret = 0;
3151
3152         BUG_ON(!PageLocked(page));
3153         if (PageWriteback(page))
3154                 return 0;
3155
3156         if (mapping == NULL) {          /* can this still happen? */
3157                 ret = drop_buffers(page, &buffers_to_free);
3158                 goto out;
3159         }
3160
3161         spin_lock(&mapping->private_lock);
3162         ret = drop_buffers(page, &buffers_to_free);
3163
3164         /*
3165          * If the filesystem writes its buffers by hand (eg ext3)
3166          * then we can have clean buffers against a dirty page.  We
3167          * clean the page here; otherwise the VM will never notice
3168          * that the filesystem did any IO at all.
3169          *
3170          * Also, during truncate, discard_buffer will have marked all
3171          * the page's buffers clean.  We discover that here and clean
3172          * the page also.
3173          *
3174          * private_lock must be held over this entire operation in order
3175          * to synchronise against __set_page_dirty_buffers and prevent the
3176          * dirty bit from being lost.
3177          */
3178         if (ret)
3179                 cancel_dirty_page(page, PAGE_CACHE_SIZE);
3180         spin_unlock(&mapping->private_lock);
3181 out:
3182         if (buffers_to_free) {
3183                 struct buffer_head *bh = buffers_to_free;
3184
3185                 do {
3186                         struct buffer_head *next = bh->b_this_page;
3187                         free_buffer_head(bh);
3188                         bh = next;
3189                 } while (bh != buffers_to_free);
3190         }
3191         return ret;
3192 }
3193 EXPORT_SYMBOL(try_to_free_buffers);
3194
3195 void block_sync_page(struct page *page)
3196 {
3197         struct address_space *mapping;
3198
3199         smp_mb();
3200         mapping = page_mapping(page);
3201         if (mapping)
3202                 blk_run_backing_dev(mapping->backing_dev_info, page);
3203 }
3204 EXPORT_SYMBOL(block_sync_page);
3205
3206 /*
3207  * There are no bdflush tunables left.  But distributions are
3208  * still running obsolete flush daemons, so we terminate them here.
3209  *
3210  * Use of bdflush() is deprecated and will be removed in a future kernel.
3211  * The `flush-X' kernel threads fully replace bdflush daemons and this call.
3212  */
3213 SYSCALL_DEFINE2(bdflush, int, func, long, data)
3214 {
3215         static int msg_count;
3216
3217         if (!capable(CAP_SYS_ADMIN))
3218                 return -EPERM;
3219
3220         if (msg_count < 5) {
3221                 msg_count++;
3222                 printk(KERN_INFO
3223                         "warning: process `%s' used the obsolete bdflush"
3224                         " system call\n", current->comm);
3225                 printk(KERN_INFO "Fix your initscripts?\n");
3226         }
3227
3228         if (func == 1)
3229                 do_exit(0);
3230         return 0;
3231 }
3232
3233 /*
3234  * Buffer-head allocation
3235  */
3236 static struct kmem_cache *bh_cachep;
3237
3238 /*
3239  * Once the number of bh's in the machine exceeds this level, we start
3240  * stripping them in writeback.
3241  */
3242 static int max_buffer_heads;
3243
3244 int buffer_heads_over_limit;
3245
3246 struct bh_accounting {
3247         int nr;                 /* Number of live bh's */
3248         int ratelimit;          /* Limit cacheline bouncing */
3249 };
3250
3251 static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
3252
3253 static void recalc_bh_state(void)
3254 {
3255         int i;
3256         int tot = 0;
3257
3258         if (__get_cpu_var(bh_accounting).ratelimit++ < 4096)
3259                 return;
3260         __get_cpu_var(bh_accounting).ratelimit = 0;
3261         for_each_online_cpu(i)
3262                 tot += per_cpu(bh_accounting, i).nr;
3263         buffer_heads_over_limit = (tot > max_buffer_heads);
3264 }
3265         
3266 struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
3267 {
3268         struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags);
3269         if (ret) {
3270                 INIT_LIST_HEAD(&ret->b_assoc_buffers);
3271                 get_cpu_var(bh_accounting).nr++;
3272                 recalc_bh_state();
3273                 put_cpu_var(bh_accounting);
3274         }
3275         return ret;
3276 }
3277 EXPORT_SYMBOL(alloc_buffer_head);
3278
3279 void free_buffer_head(struct buffer_head *bh)
3280 {
3281         BUG_ON(!list_empty(&bh->b_assoc_buffers));
3282         kmem_cache_free(bh_cachep, bh);
3283         get_cpu_var(bh_accounting).nr--;
3284         recalc_bh_state();
3285         put_cpu_var(bh_accounting);
3286 }
3287 EXPORT_SYMBOL(free_buffer_head);
3288
3289 static void buffer_exit_cpu(int cpu)
3290 {
3291         int i;
3292         struct bh_lru *b = &per_cpu(bh_lrus, cpu);
3293
3294         for (i = 0; i < BH_LRU_SIZE; i++) {
3295                 brelse(b->bhs[i]);
3296                 b->bhs[i] = NULL;
3297         }
3298         get_cpu_var(bh_accounting).nr += per_cpu(bh_accounting, cpu).nr;
3299         per_cpu(bh_accounting, cpu).nr = 0;
3300         put_cpu_var(bh_accounting);
3301 }
3302
3303 static int buffer_cpu_notify(struct notifier_block *self,
3304                               unsigned long action, void *hcpu)
3305 {
3306         if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
3307                 buffer_exit_cpu((unsigned long)hcpu);
3308         return NOTIFY_OK;
3309 }
3310
3311 /**
3312  * bh_uptodate_or_lock - Test whether the buffer is uptodate
3313  * @bh: struct buffer_head
3314  *
3315  * Return true if the buffer is up-to-date and false,
3316  * with the buffer locked, if not.
3317  */
3318 int bh_uptodate_or_lock(struct buffer_head *bh)
3319 {
3320         if (!buffer_uptodate(bh)) {
3321                 lock_buffer(bh);
3322                 if (!buffer_uptodate(bh))
3323                         return 0;
3324                 unlock_buffer(bh);
3325         }
3326         return 1;
3327 }
3328 EXPORT_SYMBOL(bh_uptodate_or_lock);
3329
3330 /**
3331  * bh_submit_read - Submit a locked buffer for reading
3332  * @bh: struct buffer_head
3333  *
3334  * Returns zero on success and -EIO on error.
3335  */
3336 int bh_submit_read(struct buffer_head *bh)
3337 {
3338         BUG_ON(!buffer_locked(bh));
3339
3340         if (buffer_uptodate(bh)) {
3341                 unlock_buffer(bh);
3342                 return 0;
3343         }
3344
3345         get_bh(bh);
3346         bh->b_end_io = end_buffer_read_sync;
3347         submit_bh(READ, bh);
3348         wait_on_buffer(bh);
3349         if (buffer_uptodate(bh))
3350                 return 0;
3351         return -EIO;
3352 }
3353 EXPORT_SYMBOL(bh_submit_read);
3354
3355 void __init buffer_init(void)
3356 {
3357         int nrpages;
3358
3359         bh_cachep = kmem_cache_create("buffer_head",
3360                         sizeof(struct buffer_head), 0,
3361                                 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
3362                                 SLAB_MEM_SPREAD),
3363                                 NULL);
3364
3365         /*
3366          * Limit the bh occupancy to 10% of ZONE_NORMAL
3367          */
3368         nrpages = (nr_free_buffer_pages() * 10) / 100;
3369         max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
3370         hotcpu_notifier(buffer_cpu_notify, 0);
3371 }