]> Pileus Git - ~andy/linux/blob - fs/xfs/linux-2.6/xfs_lrw.c
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/hskinnemoen...
[~andy/linux] / fs / xfs / linux-2.6 / xfs_lrw.c
1 /*
2  * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
3  * All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it would be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write the Free Software Foundation,
16  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17  */
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_bit.h"
21 #include "xfs_log.h"
22 #include "xfs_inum.h"
23 #include "xfs_trans.h"
24 #include "xfs_sb.h"
25 #include "xfs_ag.h"
26 #include "xfs_dir2.h"
27 #include "xfs_alloc.h"
28 #include "xfs_dmapi.h"
29 #include "xfs_quota.h"
30 #include "xfs_mount.h"
31 #include "xfs_bmap_btree.h"
32 #include "xfs_alloc_btree.h"
33 #include "xfs_ialloc_btree.h"
34 #include "xfs_dir2_sf.h"
35 #include "xfs_attr_sf.h"
36 #include "xfs_dinode.h"
37 #include "xfs_inode.h"
38 #include "xfs_bmap.h"
39 #include "xfs_btree.h"
40 #include "xfs_ialloc.h"
41 #include "xfs_rtalloc.h"
42 #include "xfs_error.h"
43 #include "xfs_itable.h"
44 #include "xfs_rw.h"
45 #include "xfs_acl.h"
46 #include "xfs_attr.h"
47 #include "xfs_inode_item.h"
48 #include "xfs_buf_item.h"
49 #include "xfs_utils.h"
50 #include "xfs_iomap.h"
51 #include "xfs_vnodeops.h"
52
53 #include <linux/capability.h>
54 #include <linux/mount.h>
55 #include <linux/writeback.h>
56
57
58 #if defined(XFS_RW_TRACE)
59 void
60 xfs_rw_enter_trace(
61         int                     tag,
62         xfs_inode_t             *ip,
63         void                    *data,
64         size_t                  segs,
65         loff_t                  offset,
66         int                     ioflags)
67 {
68         if (ip->i_rwtrace == NULL)
69                 return;
70         ktrace_enter(ip->i_rwtrace,
71                 (void *)(unsigned long)tag,
72                 (void *)ip,
73                 (void *)((unsigned long)((ip->i_d.di_size >> 32) & 0xffffffff)),
74                 (void *)((unsigned long)(ip->i_d.di_size & 0xffffffff)),
75                 (void *)data,
76                 (void *)((unsigned long)segs),
77                 (void *)((unsigned long)((offset >> 32) & 0xffffffff)),
78                 (void *)((unsigned long)(offset & 0xffffffff)),
79                 (void *)((unsigned long)ioflags),
80                 (void *)((unsigned long)((ip->i_new_size >> 32) & 0xffffffff)),
81                 (void *)((unsigned long)(ip->i_new_size & 0xffffffff)),
82                 (void *)((unsigned long)current_pid()),
83                 (void *)NULL,
84                 (void *)NULL,
85                 (void *)NULL,
86                 (void *)NULL);
87 }
88
89 void
90 xfs_inval_cached_trace(
91         xfs_inode_t     *ip,
92         xfs_off_t       offset,
93         xfs_off_t       len,
94         xfs_off_t       first,
95         xfs_off_t       last)
96 {
97
98         if (ip->i_rwtrace == NULL)
99                 return;
100         ktrace_enter(ip->i_rwtrace,
101                 (void *)(__psint_t)XFS_INVAL_CACHED,
102                 (void *)ip,
103                 (void *)((unsigned long)((offset >> 32) & 0xffffffff)),
104                 (void *)((unsigned long)(offset & 0xffffffff)),
105                 (void *)((unsigned long)((len >> 32) & 0xffffffff)),
106                 (void *)((unsigned long)(len & 0xffffffff)),
107                 (void *)((unsigned long)((first >> 32) & 0xffffffff)),
108                 (void *)((unsigned long)(first & 0xffffffff)),
109                 (void *)((unsigned long)((last >> 32) & 0xffffffff)),
110                 (void *)((unsigned long)(last & 0xffffffff)),
111                 (void *)((unsigned long)current_pid()),
112                 (void *)NULL,
113                 (void *)NULL,
114                 (void *)NULL,
115                 (void *)NULL,
116                 (void *)NULL);
117 }
118 #endif
119
120 /*
121  *      xfs_iozero
122  *
123  *      xfs_iozero clears the specified range of buffer supplied,
124  *      and marks all the affected blocks as valid and modified.  If
125  *      an affected block is not allocated, it will be allocated.  If
126  *      an affected block is not completely overwritten, and is not
127  *      valid before the operation, it will be read from disk before
128  *      being partially zeroed.
129  */
130 STATIC int
131 xfs_iozero(
132         struct xfs_inode        *ip,    /* inode                        */
133         loff_t                  pos,    /* offset in file               */
134         size_t                  count)  /* size of data to zero         */
135 {
136         struct page             *page;
137         struct address_space    *mapping;
138         int                     status;
139
140         mapping = ip->i_vnode->i_mapping;
141         do {
142                 unsigned offset, bytes;
143                 void *fsdata;
144
145                 offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */
146                 bytes = PAGE_CACHE_SIZE - offset;
147                 if (bytes > count)
148                         bytes = count;
149
150                 status = pagecache_write_begin(NULL, mapping, pos, bytes,
151                                         AOP_FLAG_UNINTERRUPTIBLE,
152                                         &page, &fsdata);
153                 if (status)
154                         break;
155
156                 zero_user(page, offset, bytes);
157
158                 status = pagecache_write_end(NULL, mapping, pos, bytes, bytes,
159                                         page, fsdata);
160                 WARN_ON(status <= 0); /* can't return less than zero! */
161                 pos += bytes;
162                 count -= bytes;
163                 status = 0;
164         } while (count);
165
166         return (-status);
167 }
168
169 ssize_t                 /* bytes read, or (-)  error */
170 xfs_read(
171         xfs_inode_t             *ip,
172         struct kiocb            *iocb,
173         const struct iovec      *iovp,
174         unsigned int            segs,
175         loff_t                  *offset,
176         int                     ioflags)
177 {
178         struct file             *file = iocb->ki_filp;
179         struct inode            *inode = file->f_mapping->host;
180         xfs_mount_t             *mp = ip->i_mount;
181         size_t                  size = 0;
182         ssize_t                 ret = 0;
183         xfs_fsize_t             n;
184         unsigned long           seg;
185
186
187         XFS_STATS_INC(xs_read_calls);
188
189         /* START copy & waste from filemap.c */
190         for (seg = 0; seg < segs; seg++) {
191                 const struct iovec *iv = &iovp[seg];
192
193                 /*
194                  * If any segment has a negative length, or the cumulative
195                  * length ever wraps negative then return -EINVAL.
196                  */
197                 size += iv->iov_len;
198                 if (unlikely((ssize_t)(size|iv->iov_len) < 0))
199                         return XFS_ERROR(-EINVAL);
200         }
201         /* END copy & waste from filemap.c */
202
203         if (unlikely(ioflags & IO_ISDIRECT)) {
204                 xfs_buftarg_t   *target =
205                         XFS_IS_REALTIME_INODE(ip) ?
206                                 mp->m_rtdev_targp : mp->m_ddev_targp;
207                 if ((*offset & target->bt_smask) ||
208                     (size & target->bt_smask)) {
209                         if (*offset == ip->i_size) {
210                                 return (0);
211                         }
212                         return -XFS_ERROR(EINVAL);
213                 }
214         }
215
216         n = XFS_MAXIOFFSET(mp) - *offset;
217         if ((n <= 0) || (size == 0))
218                 return 0;
219
220         if (n < size)
221                 size = n;
222
223         if (XFS_FORCED_SHUTDOWN(mp))
224                 return -EIO;
225
226         if (unlikely(ioflags & IO_ISDIRECT))
227                 mutex_lock(&inode->i_mutex);
228         xfs_ilock(ip, XFS_IOLOCK_SHARED);
229
230         if (DM_EVENT_ENABLED(ip, DM_EVENT_READ) && !(ioflags & IO_INVIS)) {
231                 int dmflags = FILP_DELAY_FLAG(file) | DM_SEM_FLAG_RD(ioflags);
232                 int iolock = XFS_IOLOCK_SHARED;
233
234                 ret = -XFS_SEND_DATA(mp, DM_EVENT_READ, ip, *offset, size,
235                                         dmflags, &iolock);
236                 if (ret) {
237                         xfs_iunlock(ip, XFS_IOLOCK_SHARED);
238                         if (unlikely(ioflags & IO_ISDIRECT))
239                                 mutex_unlock(&inode->i_mutex);
240                         return ret;
241                 }
242         }
243
244         if (unlikely(ioflags & IO_ISDIRECT)) {
245                 if (inode->i_mapping->nrpages)
246                         ret = xfs_flushinval_pages(ip, (*offset & PAGE_CACHE_MASK),
247                                                     -1, FI_REMAPF_LOCKED);
248                 mutex_unlock(&inode->i_mutex);
249                 if (ret) {
250                         xfs_iunlock(ip, XFS_IOLOCK_SHARED);
251                         return ret;
252                 }
253         }
254
255         xfs_rw_enter_trace(XFS_READ_ENTER, ip,
256                                 (void *)iovp, segs, *offset, ioflags);
257
258         iocb->ki_pos = *offset;
259         ret = generic_file_aio_read(iocb, iovp, segs, *offset);
260         if (ret == -EIOCBQUEUED && !(ioflags & IO_ISAIO))
261                 ret = wait_on_sync_kiocb(iocb);
262         if (ret > 0)
263                 XFS_STATS_ADD(xs_read_bytes, ret);
264
265         xfs_iunlock(ip, XFS_IOLOCK_SHARED);
266         return ret;
267 }
268
269 ssize_t
270 xfs_splice_read(
271         xfs_inode_t             *ip,
272         struct file             *infilp,
273         loff_t                  *ppos,
274         struct pipe_inode_info  *pipe,
275         size_t                  count,
276         int                     flags,
277         int                     ioflags)
278 {
279         xfs_mount_t             *mp = ip->i_mount;
280         ssize_t                 ret;
281
282         XFS_STATS_INC(xs_read_calls);
283         if (XFS_FORCED_SHUTDOWN(ip->i_mount))
284                 return -EIO;
285
286         xfs_ilock(ip, XFS_IOLOCK_SHARED);
287
288         if (DM_EVENT_ENABLED(ip, DM_EVENT_READ) && !(ioflags & IO_INVIS)) {
289                 int iolock = XFS_IOLOCK_SHARED;
290                 int error;
291
292                 error = XFS_SEND_DATA(mp, DM_EVENT_READ, ip, *ppos, count,
293                                         FILP_DELAY_FLAG(infilp), &iolock);
294                 if (error) {
295                         xfs_iunlock(ip, XFS_IOLOCK_SHARED);
296                         return -error;
297                 }
298         }
299         xfs_rw_enter_trace(XFS_SPLICE_READ_ENTER, ip,
300                            pipe, count, *ppos, ioflags);
301         ret = generic_file_splice_read(infilp, ppos, pipe, count, flags);
302         if (ret > 0)
303                 XFS_STATS_ADD(xs_read_bytes, ret);
304
305         xfs_iunlock(ip, XFS_IOLOCK_SHARED);
306         return ret;
307 }
308
309 ssize_t
310 xfs_splice_write(
311         xfs_inode_t             *ip,
312         struct pipe_inode_info  *pipe,
313         struct file             *outfilp,
314         loff_t                  *ppos,
315         size_t                  count,
316         int                     flags,
317         int                     ioflags)
318 {
319         xfs_mount_t             *mp = ip->i_mount;
320         ssize_t                 ret;
321         struct inode            *inode = outfilp->f_mapping->host;
322         xfs_fsize_t             isize, new_size;
323
324         XFS_STATS_INC(xs_write_calls);
325         if (XFS_FORCED_SHUTDOWN(ip->i_mount))
326                 return -EIO;
327
328         xfs_ilock(ip, XFS_IOLOCK_EXCL);
329
330         if (DM_EVENT_ENABLED(ip, DM_EVENT_WRITE) && !(ioflags & IO_INVIS)) {
331                 int iolock = XFS_IOLOCK_EXCL;
332                 int error;
333
334                 error = XFS_SEND_DATA(mp, DM_EVENT_WRITE, ip, *ppos, count,
335                                         FILP_DELAY_FLAG(outfilp), &iolock);
336                 if (error) {
337                         xfs_iunlock(ip, XFS_IOLOCK_EXCL);
338                         return -error;
339                 }
340         }
341
342         new_size = *ppos + count;
343
344         xfs_ilock(ip, XFS_ILOCK_EXCL);
345         if (new_size > ip->i_size)
346                 ip->i_new_size = new_size;
347         xfs_iunlock(ip, XFS_ILOCK_EXCL);
348
349         xfs_rw_enter_trace(XFS_SPLICE_WRITE_ENTER, ip,
350                            pipe, count, *ppos, ioflags);
351         ret = generic_file_splice_write(pipe, outfilp, ppos, count, flags);
352         if (ret > 0)
353                 XFS_STATS_ADD(xs_write_bytes, ret);
354
355         isize = i_size_read(inode);
356         if (unlikely(ret < 0 && ret != -EFAULT && *ppos > isize))
357                 *ppos = isize;
358
359         if (*ppos > ip->i_size) {
360                 xfs_ilock(ip, XFS_ILOCK_EXCL);
361                 if (*ppos > ip->i_size)
362                         ip->i_size = *ppos;
363                 xfs_iunlock(ip, XFS_ILOCK_EXCL);
364         }
365
366         if (ip->i_new_size) {
367                 xfs_ilock(ip, XFS_ILOCK_EXCL);
368                 ip->i_new_size = 0;
369                 if (ip->i_d.di_size > ip->i_size)
370                         ip->i_d.di_size = ip->i_size;
371                 xfs_iunlock(ip, XFS_ILOCK_EXCL);
372         }
373         xfs_iunlock(ip, XFS_IOLOCK_EXCL);
374         return ret;
375 }
376
377 /*
378  * This routine is called to handle zeroing any space in the last
379  * block of the file that is beyond the EOF.  We do this since the
380  * size is being increased without writing anything to that block
381  * and we don't want anyone to read the garbage on the disk.
382  */
383 STATIC int                              /* error (positive) */
384 xfs_zero_last_block(
385         xfs_inode_t     *ip,
386         xfs_fsize_t     offset,
387         xfs_fsize_t     isize)
388 {
389         xfs_fileoff_t   last_fsb;
390         xfs_mount_t     *mp = ip->i_mount;
391         int             nimaps;
392         int             zero_offset;
393         int             zero_len;
394         int             error = 0;
395         xfs_bmbt_irec_t imap;
396
397         ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE) != 0);
398
399         zero_offset = XFS_B_FSB_OFFSET(mp, isize);
400         if (zero_offset == 0) {
401                 /*
402                  * There are no extra bytes in the last block on disk to
403                  * zero, so return.
404                  */
405                 return 0;
406         }
407
408         last_fsb = XFS_B_TO_FSBT(mp, isize);
409         nimaps = 1;
410         error = xfs_bmapi(NULL, ip, last_fsb, 1, 0, NULL, 0, &imap,
411                           &nimaps, NULL, NULL);
412         if (error) {
413                 return error;
414         }
415         ASSERT(nimaps > 0);
416         /*
417          * If the block underlying isize is just a hole, then there
418          * is nothing to zero.
419          */
420         if (imap.br_startblock == HOLESTARTBLOCK) {
421                 return 0;
422         }
423         /*
424          * Zero the part of the last block beyond the EOF, and write it
425          * out sync.  We need to drop the ilock while we do this so we
426          * don't deadlock when the buffer cache calls back to us.
427          */
428         xfs_iunlock(ip, XFS_ILOCK_EXCL| XFS_EXTSIZE_RD);
429
430         zero_len = mp->m_sb.sb_blocksize - zero_offset;
431         if (isize + zero_len > offset)
432                 zero_len = offset - isize;
433         error = xfs_iozero(ip, isize, zero_len);
434
435         xfs_ilock(ip, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD);
436         ASSERT(error >= 0);
437         return error;
438 }
439
440 /*
441  * Zero any on disk space between the current EOF and the new,
442  * larger EOF.  This handles the normal case of zeroing the remainder
443  * of the last block in the file and the unusual case of zeroing blocks
444  * out beyond the size of the file.  This second case only happens
445  * with fixed size extents and when the system crashes before the inode
446  * size was updated but after blocks were allocated.  If fill is set,
447  * then any holes in the range are filled and zeroed.  If not, the holes
448  * are left alone as holes.
449  */
450
451 int                                     /* error (positive) */
452 xfs_zero_eof(
453         xfs_inode_t     *ip,
454         xfs_off_t       offset,         /* starting I/O offset */
455         xfs_fsize_t     isize)          /* current inode size */
456 {
457         xfs_mount_t     *mp = ip->i_mount;
458         xfs_fileoff_t   start_zero_fsb;
459         xfs_fileoff_t   end_zero_fsb;
460         xfs_fileoff_t   zero_count_fsb;
461         xfs_fileoff_t   last_fsb;
462         xfs_fileoff_t   zero_off;
463         xfs_fsize_t     zero_len;
464         int             nimaps;
465         int             error = 0;
466         xfs_bmbt_irec_t imap;
467
468         ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE));
469         ASSERT(ismrlocked(&ip->i_iolock, MR_UPDATE));
470         ASSERT(offset > isize);
471
472         /*
473          * First handle zeroing the block on which isize resides.
474          * We only zero a part of that block so it is handled specially.
475          */
476         error = xfs_zero_last_block(ip, offset, isize);
477         if (error) {
478                 ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE));
479                 ASSERT(ismrlocked(&ip->i_iolock, MR_UPDATE));
480                 return error;
481         }
482
483         /*
484          * Calculate the range between the new size and the old
485          * where blocks needing to be zeroed may exist.  To get the
486          * block where the last byte in the file currently resides,
487          * we need to subtract one from the size and truncate back
488          * to a block boundary.  We subtract 1 in case the size is
489          * exactly on a block boundary.
490          */
491         last_fsb = isize ? XFS_B_TO_FSBT(mp, isize - 1) : (xfs_fileoff_t)-1;
492         start_zero_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)isize);
493         end_zero_fsb = XFS_B_TO_FSBT(mp, offset - 1);
494         ASSERT((xfs_sfiloff_t)last_fsb < (xfs_sfiloff_t)start_zero_fsb);
495         if (last_fsb == end_zero_fsb) {
496                 /*
497                  * The size was only incremented on its last block.
498                  * We took care of that above, so just return.
499                  */
500                 return 0;
501         }
502
503         ASSERT(start_zero_fsb <= end_zero_fsb);
504         while (start_zero_fsb <= end_zero_fsb) {
505                 nimaps = 1;
506                 zero_count_fsb = end_zero_fsb - start_zero_fsb + 1;
507                 error = xfs_bmapi(NULL, ip, start_zero_fsb, zero_count_fsb,
508                                   0, NULL, 0, &imap, &nimaps, NULL, NULL);
509                 if (error) {
510                         ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE));
511                         ASSERT(ismrlocked(&ip->i_iolock, MR_UPDATE));
512                         return error;
513                 }
514                 ASSERT(nimaps > 0);
515
516                 if (imap.br_state == XFS_EXT_UNWRITTEN ||
517                     imap.br_startblock == HOLESTARTBLOCK) {
518                         /*
519                          * This loop handles initializing pages that were
520                          * partially initialized by the code below this
521                          * loop. It basically zeroes the part of the page
522                          * that sits on a hole and sets the page as P_HOLE
523                          * and calls remapf if it is a mapped file.
524                          */
525                         start_zero_fsb = imap.br_startoff + imap.br_blockcount;
526                         ASSERT(start_zero_fsb <= (end_zero_fsb + 1));
527                         continue;
528                 }
529
530                 /*
531                  * There are blocks we need to zero.
532                  * Drop the inode lock while we're doing the I/O.
533                  * We'll still have the iolock to protect us.
534                  */
535                 xfs_iunlock(ip, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD);
536
537                 zero_off = XFS_FSB_TO_B(mp, start_zero_fsb);
538                 zero_len = XFS_FSB_TO_B(mp, imap.br_blockcount);
539
540                 if ((zero_off + zero_len) > offset)
541                         zero_len = offset - zero_off;
542
543                 error = xfs_iozero(ip, zero_off, zero_len);
544                 if (error) {
545                         goto out_lock;
546                 }
547
548                 start_zero_fsb = imap.br_startoff + imap.br_blockcount;
549                 ASSERT(start_zero_fsb <= (end_zero_fsb + 1));
550
551                 xfs_ilock(ip, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD);
552         }
553
554         return 0;
555
556 out_lock:
557         xfs_ilock(ip, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD);
558         ASSERT(error >= 0);
559         return error;
560 }
561
562 ssize_t                         /* bytes written, or (-) error */
563 xfs_write(
564         struct xfs_inode        *xip,
565         struct kiocb            *iocb,
566         const struct iovec      *iovp,
567         unsigned int            nsegs,
568         loff_t                  *offset,
569         int                     ioflags)
570 {
571         struct file             *file = iocb->ki_filp;
572         struct address_space    *mapping = file->f_mapping;
573         struct inode            *inode = mapping->host;
574         unsigned long           segs = nsegs;
575         xfs_mount_t             *mp;
576         ssize_t                 ret = 0, error = 0;
577         xfs_fsize_t             isize, new_size;
578         int                     iolock;
579         int                     eventsent = 0;
580         size_t                  ocount = 0, count;
581         loff_t                  pos;
582         int                     need_i_mutex;
583
584         XFS_STATS_INC(xs_write_calls);
585
586         error = generic_segment_checks(iovp, &segs, &ocount, VERIFY_READ);
587         if (error)
588                 return error;
589
590         count = ocount;
591         pos = *offset;
592
593         if (count == 0)
594                 return 0;
595
596         mp = xip->i_mount;
597
598         xfs_wait_for_freeze(mp, SB_FREEZE_WRITE);
599
600         if (XFS_FORCED_SHUTDOWN(mp))
601                 return -EIO;
602
603 relock:
604         if (ioflags & IO_ISDIRECT) {
605                 iolock = XFS_IOLOCK_SHARED;
606                 need_i_mutex = 0;
607         } else {
608                 iolock = XFS_IOLOCK_EXCL;
609                 need_i_mutex = 1;
610                 mutex_lock(&inode->i_mutex);
611         }
612
613         xfs_ilock(xip, XFS_ILOCK_EXCL|iolock);
614
615 start:
616         error = -generic_write_checks(file, &pos, &count,
617                                         S_ISBLK(inode->i_mode));
618         if (error) {
619                 xfs_iunlock(xip, XFS_ILOCK_EXCL|iolock);
620                 goto out_unlock_mutex;
621         }
622
623         if ((DM_EVENT_ENABLED(xip, DM_EVENT_WRITE) &&
624             !(ioflags & IO_INVIS) && !eventsent)) {
625                 int             dmflags = FILP_DELAY_FLAG(file);
626
627                 if (need_i_mutex)
628                         dmflags |= DM_FLAGS_IMUX;
629
630                 xfs_iunlock(xip, XFS_ILOCK_EXCL);
631                 error = XFS_SEND_DATA(xip->i_mount, DM_EVENT_WRITE, xip,
632                                       pos, count, dmflags, &iolock);
633                 if (error) {
634                         goto out_unlock_internal;
635                 }
636                 xfs_ilock(xip, XFS_ILOCK_EXCL);
637                 eventsent = 1;
638
639                 /*
640                  * The iolock was dropped and reacquired in XFS_SEND_DATA
641                  * so we have to recheck the size when appending.
642                  * We will only "goto start;" once, since having sent the
643                  * event prevents another call to XFS_SEND_DATA, which is
644                  * what allows the size to change in the first place.
645                  */
646                 if ((file->f_flags & O_APPEND) && pos != xip->i_size)
647                         goto start;
648         }
649
650         if (ioflags & IO_ISDIRECT) {
651                 xfs_buftarg_t   *target =
652                         XFS_IS_REALTIME_INODE(xip) ?
653                                 mp->m_rtdev_targp : mp->m_ddev_targp;
654
655                 if ((pos & target->bt_smask) || (count & target->bt_smask)) {
656                         xfs_iunlock(xip, XFS_ILOCK_EXCL|iolock);
657                         return XFS_ERROR(-EINVAL);
658                 }
659
660                 if (!need_i_mutex && (mapping->nrpages || pos > xip->i_size)) {
661                         xfs_iunlock(xip, XFS_ILOCK_EXCL|iolock);
662                         iolock = XFS_IOLOCK_EXCL;
663                         need_i_mutex = 1;
664                         mutex_lock(&inode->i_mutex);
665                         xfs_ilock(xip, XFS_ILOCK_EXCL|iolock);
666                         goto start;
667                 }
668         }
669
670         new_size = pos + count;
671         if (new_size > xip->i_size)
672                 xip->i_new_size = new_size;
673
674         /*
675          * We're not supposed to change timestamps in readonly-mounted
676          * filesystems.  Throw it away if anyone asks us.
677          */
678         if (likely(!(ioflags & IO_INVIS) &&
679                    !mnt_want_write(file->f_path.mnt))) {
680                 file_update_time(file);
681                 xfs_ichgtime_fast(xip, inode,
682                                   XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
683                 mnt_drop_write(file->f_path.mnt);
684         }
685
686         /*
687          * If the offset is beyond the size of the file, we have a couple
688          * of things to do. First, if there is already space allocated
689          * we need to either create holes or zero the disk or ...
690          *
691          * If there is a page where the previous size lands, we need
692          * to zero it out up to the new size.
693          */
694
695         if (pos > xip->i_size) {
696                 error = xfs_zero_eof(xip, pos, xip->i_size);
697                 if (error) {
698                         xfs_iunlock(xip, XFS_ILOCK_EXCL);
699                         goto out_unlock_internal;
700                 }
701         }
702         xfs_iunlock(xip, XFS_ILOCK_EXCL);
703
704         /*
705          * If we're writing the file then make sure to clear the
706          * setuid and setgid bits if the process is not being run
707          * by root.  This keeps people from modifying setuid and
708          * setgid binaries.
709          */
710
711         if (((xip->i_d.di_mode & S_ISUID) ||
712             ((xip->i_d.di_mode & (S_ISGID | S_IXGRP)) ==
713                 (S_ISGID | S_IXGRP))) &&
714              !capable(CAP_FSETID)) {
715                 error = xfs_write_clear_setuid(xip);
716                 if (likely(!error))
717                         error = -remove_suid(file->f_path.dentry);
718                 if (unlikely(error)) {
719                         goto out_unlock_internal;
720                 }
721         }
722
723 retry:
724         /* We can write back this queue in page reclaim */
725         current->backing_dev_info = mapping->backing_dev_info;
726
727         if ((ioflags & IO_ISDIRECT)) {
728                 if (mapping->nrpages) {
729                         WARN_ON(need_i_mutex == 0);
730                         xfs_inval_cached_trace(xip, pos, -1,
731                                         (pos & PAGE_CACHE_MASK), -1);
732                         error = xfs_flushinval_pages(xip,
733                                         (pos & PAGE_CACHE_MASK),
734                                         -1, FI_REMAPF_LOCKED);
735                         if (error)
736                                 goto out_unlock_internal;
737                 }
738
739                 if (need_i_mutex) {
740                         /* demote the lock now the cached pages are gone */
741                         xfs_ilock_demote(xip, XFS_IOLOCK_EXCL);
742                         mutex_unlock(&inode->i_mutex);
743
744                         iolock = XFS_IOLOCK_SHARED;
745                         need_i_mutex = 0;
746                 }
747
748                 xfs_rw_enter_trace(XFS_DIOWR_ENTER, xip, (void *)iovp, segs,
749                                 *offset, ioflags);
750                 ret = generic_file_direct_write(iocb, iovp,
751                                 &segs, pos, offset, count, ocount);
752
753                 /*
754                  * direct-io write to a hole: fall through to buffered I/O
755                  * for completing the rest of the request.
756                  */
757                 if (ret >= 0 && ret != count) {
758                         XFS_STATS_ADD(xs_write_bytes, ret);
759
760                         pos += ret;
761                         count -= ret;
762
763                         ioflags &= ~IO_ISDIRECT;
764                         xfs_iunlock(xip, iolock);
765                         goto relock;
766                 }
767         } else {
768                 xfs_rw_enter_trace(XFS_WRITE_ENTER, xip, (void *)iovp, segs,
769                                 *offset, ioflags);
770                 ret = generic_file_buffered_write(iocb, iovp, segs,
771                                 pos, offset, count, ret);
772         }
773
774         current->backing_dev_info = NULL;
775
776         if (ret == -EIOCBQUEUED && !(ioflags & IO_ISAIO))
777                 ret = wait_on_sync_kiocb(iocb);
778
779         if (ret == -ENOSPC &&
780             DM_EVENT_ENABLED(xip, DM_EVENT_NOSPACE) && !(ioflags & IO_INVIS)) {
781                 xfs_iunlock(xip, iolock);
782                 if (need_i_mutex)
783                         mutex_unlock(&inode->i_mutex);
784                 error = XFS_SEND_NAMESP(xip->i_mount, DM_EVENT_NOSPACE, xip,
785                                 DM_RIGHT_NULL, xip, DM_RIGHT_NULL, NULL, NULL,
786                                 0, 0, 0); /* Delay flag intentionally  unused */
787                 if (need_i_mutex)
788                         mutex_lock(&inode->i_mutex);
789                 xfs_ilock(xip, iolock);
790                 if (error)
791                         goto out_unlock_internal;
792                 pos = xip->i_size;
793                 ret = 0;
794                 goto retry;
795         }
796
797         isize = i_size_read(inode);
798         if (unlikely(ret < 0 && ret != -EFAULT && *offset > isize))
799                 *offset = isize;
800
801         if (*offset > xip->i_size) {
802                 xfs_ilock(xip, XFS_ILOCK_EXCL);
803                 if (*offset > xip->i_size)
804                         xip->i_size = *offset;
805                 xfs_iunlock(xip, XFS_ILOCK_EXCL);
806         }
807
808         error = -ret;
809         if (ret <= 0)
810                 goto out_unlock_internal;
811
812         XFS_STATS_ADD(xs_write_bytes, ret);
813
814         /* Handle various SYNC-type writes */
815         if ((file->f_flags & O_SYNC) || IS_SYNC(inode)) {
816                 int error2;
817
818                 xfs_iunlock(xip, iolock);
819                 if (need_i_mutex)
820                         mutex_unlock(&inode->i_mutex);
821                 error2 = sync_page_range(inode, mapping, pos, ret);
822                 if (!error)
823                         error = error2;
824                 if (need_i_mutex)
825                         mutex_lock(&inode->i_mutex);
826                 xfs_ilock(xip, iolock);
827                 error2 = xfs_write_sync_logforce(mp, xip);
828                 if (!error)
829                         error = error2;
830         }
831
832  out_unlock_internal:
833         if (xip->i_new_size) {
834                 xfs_ilock(xip, XFS_ILOCK_EXCL);
835                 xip->i_new_size = 0;
836                 /*
837                  * If this was a direct or synchronous I/O that failed (such
838                  * as ENOSPC) then part of the I/O may have been written to
839                  * disk before the error occured.  In this case the on-disk
840                  * file size may have been adjusted beyond the in-memory file
841                  * size and now needs to be truncated back.
842                  */
843                 if (xip->i_d.di_size > xip->i_size)
844                         xip->i_d.di_size = xip->i_size;
845                 xfs_iunlock(xip, XFS_ILOCK_EXCL);
846         }
847         xfs_iunlock(xip, iolock);
848  out_unlock_mutex:
849         if (need_i_mutex)
850                 mutex_unlock(&inode->i_mutex);
851         return -error;
852 }
853
854 /*
855  * All xfs metadata buffers except log state machine buffers
856  * get this attached as their b_bdstrat callback function.
857  * This is so that we can catch a buffer
858  * after prematurely unpinning it to forcibly shutdown the filesystem.
859  */
860 int
861 xfs_bdstrat_cb(struct xfs_buf *bp)
862 {
863         xfs_mount_t     *mp;
864
865         mp = XFS_BUF_FSPRIVATE3(bp, xfs_mount_t *);
866         if (!XFS_FORCED_SHUTDOWN(mp)) {
867                 xfs_buf_iorequest(bp);
868                 return 0;
869         } else {
870                 xfs_buftrace("XFS__BDSTRAT IOERROR", bp);
871                 /*
872                  * Metadata write that didn't get logged but
873                  * written delayed anyway. These aren't associated
874                  * with a transaction, and can be ignored.
875                  */
876                 if (XFS_BUF_IODONE_FUNC(bp) == NULL &&
877                     (XFS_BUF_ISREAD(bp)) == 0)
878                         return (xfs_bioerror_relse(bp));
879                 else
880                         return (xfs_bioerror(bp));
881         }
882 }
883
884 /*
885  * Wrapper around bdstrat so that we can stop data from going to disk in case
886  * we are shutting down the filesystem.  Typically user data goes thru this
887  * path; one of the exceptions is the superblock.
888  */
889 void
890 xfsbdstrat(
891         struct xfs_mount        *mp,
892         struct xfs_buf          *bp)
893 {
894         ASSERT(mp);
895         if (!XFS_FORCED_SHUTDOWN(mp)) {
896                 xfs_buf_iorequest(bp);
897                 return;
898         }
899
900         xfs_buftrace("XFSBDSTRAT IOERROR", bp);
901         xfs_bioerror_relse(bp);
902 }
903
904 /*
905  * If the underlying (data/log/rt) device is readonly, there are some
906  * operations that cannot proceed.
907  */
908 int
909 xfs_dev_is_read_only(
910         xfs_mount_t             *mp,
911         char                    *message)
912 {
913         if (xfs_readonly_buftarg(mp->m_ddev_targp) ||
914             xfs_readonly_buftarg(mp->m_logdev_targp) ||
915             (mp->m_rtdev_targp && xfs_readonly_buftarg(mp->m_rtdev_targp))) {
916                 cmn_err(CE_NOTE,
917                         "XFS: %s required on read-only device.", message);
918                 cmn_err(CE_NOTE,
919                         "XFS: write access unavailable, cannot proceed.");
920                 return EROFS;
921         }
922         return 0;
923 }