]> Pileus Git - ~andy/linux/commitdiff
Merge branch 'for-linus' of git://git.kernel.dk/linux-block
authorLinus Torvalds <torvalds@linux-foundation.org>
Tue, 3 Jul 2012 22:45:10 +0000 (15:45 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 3 Jul 2012 22:45:10 +0000 (15:45 -0700)
Pull block bits from Jens Axboe:
 "As vacation is coming up, thought I'd better get rid of my pending
  changes in my for-linus branch for this iteration.  It contains:

   - Two patches for mtip32xx.  Killing a non-compliant sysfs interface
     and moving it to debugfs, where it belongs.

   - A few patches from Asias.  Two legit bug fixes, and one killing an
     interface that is no longer in use.

   - A patch from Jan, making the annoying partition ioctl warning a bit
     less annoying, by restricting it to !CAP_SYS_RAWIO only.

   - Three bug fixes for drbd from Lars Ellenberg.

   - A fix for an old regression for umem, it hasn't really worked since
     the plugging scheme was changed in 3.0.

   - A few fixes from Tejun.

   - A splice fix from Eric Dumazet, fixing an issue with pipe
     resizing."

* 'for-linus' of git://git.kernel.dk/linux-block:
  scsi: Silence unnecessary warnings about ioctl to partition
  block: Drop dead function blk_abort_queue()
  block: Mitigate lock unbalance caused by lock switching
  block: Avoid missed wakeup in request waitqueue
  umem: fix up unplugging
  splice: fix racy pipe->buffers uses
  drbd: fix null pointer dereference with on-congestion policy when diskless
  drbd: fix list corruption by failing but already aborted reads
  drbd: fix access of unallocated pages and kernel panic
  xen/blkfront: Add WARN to deal with misbehaving backends.
  blkcg: drop local variable @q from blkg_destroy()
  mtip32xx: Create debugfs entries for troubleshooting
  mtip32xx: Remove 'registers' and 'flags' from sysfs
  blkcg: fix blkg_alloc() failure path
  block: blkcg_policy_cfq shouldn't be used if !CONFIG_CFQ_GROUP_IOSCHED
  block: fix return value on cfq_init() failure
  mtip32xx: Remove version.h header file inclusion
  xen/blkback: Copy id field when doing BLKIF_DISCARD.

1  2 
kernel/trace/trace.c
mm/shmem.c
net/core/skbuff.c

diff --combined kernel/trace/trace.c
index 49249c28690dbb21b6914e2907b16af56e1094c9,288488082224fb60cadd649ef2eb52f84eae34f1..a7fa0702be1cd5b269ed6f15e9624f4b02968b11
@@@ -371,7 -371,7 +371,7 @@@ EXPORT_SYMBOL_GPL(tracing_on)
  void tracing_off(void)
  {
        if (global_trace.buffer)
 -              ring_buffer_record_on(global_trace.buffer);
 +              ring_buffer_record_off(global_trace.buffer);
        /*
         * This flag is only looked at when buffers haven't been
         * allocated yet. We don't really care about the race
@@@ -3609,6 -3609,7 +3609,7 @@@ static ssize_t tracing_splice_read_pipe
                .pages          = pages_def,
                .partial        = partial_def,
                .nr_pages       = 0, /* This gets updated below. */
+               .nr_pages_max   = PIPE_DEF_BUFFERS,
                .flags          = flags,
                .ops            = &tracing_pipe_buf_ops,
                .spd_release    = tracing_spd_release_pipe,
  
        ret = splice_to_pipe(pipe, &spd);
  out:
-       splice_shrink_spd(pipe, &spd);
+       splice_shrink_spd(&spd);
        return ret;
  
  out_err:
@@@ -4231,6 -4232,7 +4232,7 @@@ tracing_buffers_splice_read(struct fil
        struct splice_pipe_desc spd = {
                .pages          = pages_def,
                .partial        = partial_def,
+               .nr_pages_max   = PIPE_DEF_BUFFERS,
                .flags          = flags,
                .ops            = &buffer_pipe_buf_ops,
                .spd_release    = buffer_spd_release,
        }
  
        ret = splice_to_pipe(pipe, &spd);
-       splice_shrink_spd(pipe, &spd);
+       splice_shrink_spd(&spd);
  out:
        return ret;
  }
diff --combined mm/shmem.c
index a15a466d0d1d14b21bd82de2bfd3df31654420db,c244e93a70fa7824248d97c267bf458e820d440b..4ce02e0673db612befb63def38911d2d2b0638af
@@@ -683,21 -683,10 +683,21 @@@ static int shmem_unuse_inode(struct shm
                mutex_lock(&shmem_swaplist_mutex);
                /*
                 * We needed to drop mutex to make that restrictive page
 -               * allocation; but the inode might already be freed by now,
 -               * and we cannot refer to inode or mapping or info to check.
 -               * However, we do hold page lock on the PageSwapCache page,
 -               * so can check if that still has our reference remaining.
 +               * allocation, but the inode might have been freed while we
 +               * dropped it: although a racing shmem_evict_inode() cannot
 +               * complete without emptying the radix_tree, our page lock
 +               * on this swapcache page is not enough to prevent that -
 +               * free_swap_and_cache() of our swap entry will only
 +               * trylock_page(), removing swap from radix_tree whatever.
 +               *
 +               * We must not proceed to shmem_add_to_page_cache() if the
 +               * inode has been freed, but of course we cannot rely on
 +               * inode or mapping or info to check that.  However, we can
 +               * safely check if our swap entry is still in use (and here
 +               * it can't have got reused for another page): if it's still
 +               * in use, then the inode cannot have been freed yet, and we
 +               * can safely proceed (if it's no longer in use, that tells
 +               * nothing about the inode, but we don't need to unuse swap).
                 */
                if (!page_swapcount(*pagep))
                        error = -ENOENT;
@@@ -741,9 -730,9 +741,9 @@@ int shmem_unuse(swp_entry_t swap, struc
  
        /*
         * There's a faint possibility that swap page was replaced before
 -       * caller locked it: it will come back later with the right page.
 +       * caller locked it: caller will come back later with the right page.
         */
 -      if (unlikely(!PageSwapCache(page)))
 +      if (unlikely(!PageSwapCache(page) || page_private(page) != swap.val))
                goto out;
  
        /*
@@@ -1006,15 -995,21 +1006,15 @@@ static int shmem_replace_page(struct pa
        newpage = shmem_alloc_page(gfp, info, index);
        if (!newpage)
                return -ENOMEM;
 -      VM_BUG_ON(shmem_should_replace_page(newpage, gfp));
  
 -      *pagep = newpage;
        page_cache_get(newpage);
        copy_highpage(newpage, oldpage);
 +      flush_dcache_page(newpage);
  
 -      VM_BUG_ON(!PageLocked(oldpage));
        __set_page_locked(newpage);
 -      VM_BUG_ON(!PageUptodate(oldpage));
        SetPageUptodate(newpage);
 -      VM_BUG_ON(!PageSwapBacked(oldpage));
        SetPageSwapBacked(newpage);
 -      VM_BUG_ON(!swap_index);
        set_page_private(newpage, swap_index);
 -      VM_BUG_ON(!PageSwapCache(oldpage));
        SetPageSwapCache(newpage);
  
        /*
        spin_lock_irq(&swap_mapping->tree_lock);
        error = shmem_radix_tree_replace(swap_mapping, swap_index, oldpage,
                                                                   newpage);
 -      __inc_zone_page_state(newpage, NR_FILE_PAGES);
 -      __dec_zone_page_state(oldpage, NR_FILE_PAGES);
 +      if (!error) {
 +              __inc_zone_page_state(newpage, NR_FILE_PAGES);
 +              __dec_zone_page_state(oldpage, NR_FILE_PAGES);
 +      }
        spin_unlock_irq(&swap_mapping->tree_lock);
 -      BUG_ON(error);
  
 -      mem_cgroup_replace_page_cache(oldpage, newpage);
 -      lru_cache_add_anon(newpage);
 +      if (unlikely(error)) {
 +              /*
 +               * Is this possible?  I think not, now that our callers check
 +               * both PageSwapCache and page_private after getting page lock;
 +               * but be defensive.  Reverse old to newpage for clear and free.
 +               */
 +              oldpage = newpage;
 +      } else {
 +              mem_cgroup_replace_page_cache(oldpage, newpage);
 +              lru_cache_add_anon(newpage);
 +              *pagep = newpage;
 +      }
  
        ClearPageSwapCache(oldpage);
        set_page_private(oldpage, 0);
        unlock_page(oldpage);
        page_cache_release(oldpage);
        page_cache_release(oldpage);
 -      return 0;
 +      return error;
  }
  
  /*
@@@ -1123,8 -1107,7 +1123,8 @@@ repeat
  
                /* We have to do this with page locked to prevent races */
                lock_page(page);
 -              if (!PageSwapCache(page) || page->mapping) {
 +              if (!PageSwapCache(page) || page_private(page) != swap.val ||
 +                  page->mapping) {
                        error = -EEXIST;        /* try again */
                        goto failed;
                }
@@@ -1594,6 -1577,7 +1594,7 @@@ static ssize_t shmem_file_splice_read(s
        struct splice_pipe_desc spd = {
                .pages = pages,
                .partial = partial,
+               .nr_pages_max = PIPE_DEF_BUFFERS,
                .flags = flags,
                .ops = &page_cache_pipe_buf_ops,
                .spd_release = spd_release_page,
        if (spd.nr_pages)
                error = splice_to_pipe(pipe, &spd);
  
-       splice_shrink_spd(pipe, &spd);
+       splice_shrink_spd(&spd);
  
        if (error > 0) {
                *ppos += error;
diff --combined net/core/skbuff.c
index d78671e9d545be838f9ab5140c9ee07fb1309c26,bac3c5756d63989c2f578ffaeccfee76310be789..46a3d23d259e51c17140798554874e929145d6bb
@@@ -1755,6 -1755,7 +1755,7 @@@ int skb_splice_bits(struct sk_buff *skb
        struct splice_pipe_desc spd = {
                .pages = pages,
                .partial = partial,
+               .nr_pages_max = MAX_SKB_FRAGS,
                .flags = flags,
                .ops = &sock_pipe_buf_ops,
                .spd_release = sock_spd_release,
@@@ -3361,7 -3362,7 +3362,7 @@@ EXPORT_SYMBOL(kfree_skb_partial)
   * @to: prior buffer
   * @from: buffer to add
   * @fragstolen: pointer to boolean
 - *
 + * @delta_truesize: how much more was allocated than was requested
   */
  bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
                      bool *fragstolen, int *delta_truesize)