]> Pileus Git - ~andy/linux/commitdiff
Merge tag 'usb-3.11-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 16 Aug 2013 16:57:38 +0000 (09:57 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 16 Aug 2013 16:57:38 +0000 (09:57 -0700)
Pull USB fixes from Greg KH:
 "Here are some small USB fixes for 3.11-rc6 that have accumulated.

  Nothing huge, a EHCI fix that solves a much-reported audio USB
  problem, some usb-serial driver endian fixes and other minor fixes, a
  wireless USB oops fix, and two new quirks"

* tag 'usb-3.11-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb:
  USB: keyspan: fix null-deref at disconnect and release
  USB: mos7720: fix broken control requests
  usb: add two quirky touchscreen
  USB: ti_usb_3410_5052: fix big-endian firmware handling
  USB: adutux: fix big-endian device-type reporting
  USB: usbtmc: fix big-endian probe of Rigol devices
  USB: mos7840: fix big-endian probe
  USB-Serial: Fix error handling of usb_wwan
  wusbcore: fix kernel panic when disconnecting a wireless USB->serial device
  USB: EHCI: accept very late isochronous URBs

144 files changed:
MAINTAINERS
arch/Kconfig
arch/arm/include/asm/tlb.h
arch/arm/kernel/perf_event.c
arch/arm64/include/asm/tlb.h
arch/hexagon/Kconfig
arch/ia64/include/asm/tlb.h
arch/microblaze/Kconfig
arch/openrisc/Kconfig
arch/s390/include/asm/tlb.h
arch/score/Kconfig
arch/sh/include/asm/tlb.h
arch/um/include/asm/tlb.h
arch/x86/include/asm/pgtable-2level.h
arch/x86/include/asm/pgtable-3level.h
arch/x86/include/asm/pgtable.h
arch/x86/include/asm/pgtable_types.h
arch/x86/include/asm/spinlock.h
arch/x86/kernel/cpu/perf_event_intel.c
arch/x86/kernel/cpu/perf_event_intel_uncore.c
arch/x86/kernel/sys_x86_64.c
arch/x86/mm/mmap.c
drivers/block/aoe/aoecmd.c
drivers/net/bonding/bond_main.c
drivers/net/can/usb/peak_usb/pcan_usb.c
drivers/net/ethernet/arc/emac_main.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
drivers/net/ethernet/broadcom/tg3.c
drivers/net/ethernet/chelsio/cxgb3/sge.c
drivers/net/ethernet/emulex/benet/be_cmds.c
drivers/net/ethernet/emulex/benet/be_cmds.h
drivers/net/ethernet/marvell/skge.c
drivers/net/ethernet/mellanox/mlx5/core/cmd.c
drivers/net/ethernet/mellanox/mlx5/core/eq.c
drivers/net/ethernet/mellanox/mlx5/core/fw.c
drivers/net/ethernet/mellanox/mlx5/core/health.c
drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
drivers/net/ethernet/realtek/8139cp.c
drivers/net/ethernet/stmicro/stmmac/ring_mode.c
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/via/via-velocity.c
drivers/net/macvlan.c
drivers/net/macvtap.c
drivers/net/tun.c
drivers/net/vxlan.c
drivers/net/wireless/cw1200/sta.c
drivers/net/wireless/iwlegacy/4965-mac.c
drivers/net/wireless/iwlegacy/common.c
drivers/rtc/rtc-stmp3xxx.c
fs/cifs/cifsencrypt.c
fs/cifs/cifsfs.c
fs/cifs/cifsglob.h
fs/cifs/cifsproto.h
fs/cifs/connect.c
fs/cifs/file.c
fs/cifs/link.c
fs/cifs/readdir.c
fs/cifs/sess.c
fs/cifs/smb1ops.c
fs/cifs/smb2transport.c
fs/exec.c
fs/ext4/ioctl.c
fs/ext4/super.c
fs/hugetlbfs/inode.c
fs/ocfs2/aops.c
fs/ocfs2/dir.c
fs/ocfs2/file.c
fs/ocfs2/journal.h
fs/ocfs2/move_extents.c
fs/ocfs2/refcounttree.c
fs/ocfs2/refcounttree.h
fs/proc/task_mmu.c
include/asm-generic/pgtable.h
include/asm-generic/tlb.h
include/linux/mlx5/device.h
include/linux/mlx5/driver.h
include/linux/sched.h
include/linux/spinlock.h
include/linux/swapops.h
include/linux/syscalls.h
include/net/busy_poll.h
include/net/ip_tunnels.h
include/net/sch_generic.h
include/uapi/linux/pkt_sched.h
include/uapi/linux/snmp.h
kernel/fork.c
kernel/mutex.c
kernel/sched/core.c
kernel/sched/cpupri.c
kernel/sched/fair.c
mm/fremap.c
mm/hugetlb.c
mm/memcontrol.c
mm/memory.c
mm/mmap.c
mm/rmap.c
mm/swapfile.c
net/8021q/vlan_core.c
net/batman-adv/bridge_loop_avoidance.c
net/batman-adv/gateway_client.c
net/batman-adv/gateway_client.h
net/batman-adv/soft-interface.c
net/batman-adv/unicast.c
net/bridge/br_multicast.c
net/bridge/br_sysfs_br.c
net/core/flow_dissector.c
net/core/neighbour.c
net/core/rtnetlink.c
net/ipv4/esp4.c
net/ipv4/fib_trie.c
net/ipv4/ip_gre.c
net/ipv4/ip_tunnel_core.c
net/ipv4/proc.c
net/ipv4/tcp_cubic.c
net/ipv6/esp6.c
net/ipv6/ip6_fib.c
net/mac80211/mlme.c
net/netfilter/nf_conntrack_proto_tcp.c
net/netfilter/nfnetlink_log.c
net/netfilter/nfnetlink_queue_core.c
net/netfilter/xt_TCPMSS.c
net/netfilter/xt_TCPOPTSTRIP.c
net/netlink/genetlink.c
net/openvswitch/actions.c
net/openvswitch/datapath.c
net/openvswitch/flow.c
net/sched/sch_api.c
net/sched/sch_generic.c
net/sched/sch_htb.c
net/sctp/associola.c
net/sctp/transport.c
net/tipc/bearer.c
net/vmw_vsock/af_vsock.c
net/wireless/core.c
net/wireless/nl80211.c

index 7cacc88dc79c56a22c52cf94e7065d66de64646c..b8045c7f78c9360ea9552a7282c503e5fff7dc29 100644 (file)
@@ -5581,9 +5581,9 @@ S:        Maintained
 F:     drivers/media/tuners/mxl5007t.*
 
 MYRICOM MYRI-10G 10GbE DRIVER (MYRI10GE)
-M:     Andrew Gallatin <gallatin@myri.com>
+M:     Hyong-Youb Kim <hykim@myri.com>
 L:     netdev@vger.kernel.org
-W:     http://www.myri.com/scs/download-Myri10GE.html
+W:     https://www.myricom.com/support/downloads/myri10ge.html
 S:     Supported
 F:     drivers/net/ethernet/myricom/myri10ge/
 
index 8d2ae24b9f4a815e253340c75c633be0a18fae9d..1feb169274fe613cc5f0360c797668bf84497c4b 100644 (file)
@@ -407,6 +407,12 @@ config CLONE_BACKWARDS2
        help
          Architecture has the first two arguments of clone(2) swapped.
 
+config CLONE_BACKWARDS3
+       bool
+       help
+         Architecture has tls passed as the 3rd argument of clone(2),
+         not the 5th one.
+
 config ODD_RT_SIGACTION
        bool
        help
index 46e7cfb3e7219d2ac4db9013d5dc673c155167ed..0baf7f0d939484264b089c772112657cb9f15c75 100644 (file)
@@ -43,6 +43,7 @@ struct mmu_gather {
        struct mm_struct        *mm;
        unsigned int            fullmm;
        struct vm_area_struct   *vma;
+       unsigned long           start, end;
        unsigned long           range_start;
        unsigned long           range_end;
        unsigned int            nr;
@@ -107,10 +108,12 @@ static inline void tlb_flush_mmu(struct mmu_gather *tlb)
 }
 
 static inline void
-tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int fullmm)
+tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
 {
        tlb->mm = mm;
-       tlb->fullmm = fullmm;
+       tlb->fullmm = !(start | (end+1));
+       tlb->start = start;
+       tlb->end = end;
        tlb->vma = NULL;
        tlb->max = ARRAY_SIZE(tlb->local);
        tlb->pages = tlb->local;
index d9f5cd4e533fef948f68510e3a6fb3b4996b750b..21f77906602c2e5b0ae9911fb2df65e71430eb9e 100644 (file)
@@ -53,7 +53,12 @@ armpmu_map_cache_event(const unsigned (*cache_map)
 static int
 armpmu_map_hw_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config)
 {
-       int mapping = (*event_map)[config];
+       int mapping;
+
+       if (config >= PERF_COUNT_HW_MAX)
+               return -ENOENT;
+
+       mapping = (*event_map)[config];
        return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping;
 }
 
index 46b3beb4b773c85cd381f72301b8f2e9dfdc7a97..717031a762c27966aabc7786f6d2a900034b0b08 100644 (file)
@@ -35,6 +35,7 @@ struct mmu_gather {
        struct mm_struct        *mm;
        unsigned int            fullmm;
        struct vm_area_struct   *vma;
+       unsigned long           start, end;
        unsigned long           range_start;
        unsigned long           range_end;
        unsigned int            nr;
@@ -97,10 +98,12 @@ static inline void tlb_flush_mmu(struct mmu_gather *tlb)
 }
 
 static inline void
-tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int fullmm)
+tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
 {
        tlb->mm = mm;
-       tlb->fullmm = fullmm;
+       tlb->fullmm = !(start | (end+1));
+       tlb->start = start;
+       tlb->end = end;
        tlb->vma = NULL;
        tlb->max = ARRAY_SIZE(tlb->local);
        tlb->pages = tlb->local;
index 33a97929d055ca9d1131ffce9c517b3a4597bb38..77d442ab28c8625860c1787df12ac9dd74d7f004 100644 (file)
@@ -158,6 +158,7 @@ source "kernel/Kconfig.hz"
 endmenu
 
 source "init/Kconfig"
+source "kernel/Kconfig.freezer"
 source "drivers/Kconfig"
 source "fs/Kconfig"
 
index ef3a9de01954511a352fa5b24285136789425e21..bc5efc7c3f3f8ead3608780ba5e2f5b9e212e20c 100644 (file)
@@ -22,7 +22,7 @@
  * unmapping a portion of the virtual address space, these hooks are called according to
  * the following template:
  *
- *     tlb <- tlb_gather_mmu(mm, full_mm_flush);       // start unmap for address space MM
+ *     tlb <- tlb_gather_mmu(mm, start, end);          // start unmap for address space MM
  *     {
  *       for each vma that needs a shootdown do {
  *         tlb_start_vma(tlb, vma);
@@ -58,6 +58,7 @@ struct mmu_gather {
        unsigned int            max;
        unsigned char           fullmm;         /* non-zero means full mm flush */
        unsigned char           need_flush;     /* really unmapped some PTEs? */
+       unsigned long           start, end;
        unsigned long           start_addr;
        unsigned long           end_addr;
        struct page             **pages;
@@ -155,13 +156,15 @@ static inline void __tlb_alloc_page(struct mmu_gather *tlb)
 
 
 static inline void
-tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int full_mm_flush)
+tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
 {
        tlb->mm = mm;
        tlb->max = ARRAY_SIZE(tlb->local);
        tlb->pages = tlb->local;
        tlb->nr = 0;
-       tlb->fullmm = full_mm_flush;
+       tlb->fullmm = !(start | (end+1));
+       tlb->start = start;
+       tlb->end = end;
        tlb->start_addr = ~0UL;
 }
 
index d22a4ecffff422f542e96f81527b45a4e3f9c6c2..4fab52294d9874b5b07558682ec5160a470c1092 100644 (file)
@@ -28,7 +28,7 @@ config MICROBLAZE
        select GENERIC_CLOCKEVENTS
        select GENERIC_IDLE_POLL_SETUP
        select MODULES_USE_ELF_RELA
-       select CLONE_BACKWARDS
+       select CLONE_BACKWARDS3
 
 config SWAP
        def_bool n
index 99dbab1c59ac2ef78d4f4566da9c47b3e3a6350f..d60bf98fa5cf399430d5beaaed9e3cd9c7188e67 100644 (file)
@@ -55,6 +55,7 @@ config GENERIC_CSUM
 
 source "init/Kconfig"
 
+source "kernel/Kconfig.freezer"
 
 menu "Processor type and features"
 
index b75d7d686684975278e1881a9aaf74590a6949ed..23a64d25f2b1fc441689ea089d0a13cad3e4dde9 100644 (file)
@@ -32,6 +32,7 @@ struct mmu_gather {
        struct mm_struct *mm;
        struct mmu_table_batch *batch;
        unsigned int fullmm;
+       unsigned long start, unsigned long end;
 };
 
 struct mmu_table_batch {
@@ -48,10 +49,13 @@ extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
 
 static inline void tlb_gather_mmu(struct mmu_gather *tlb,
                                  struct mm_struct *mm,
-                                 unsigned int full_mm_flush)
+                                 unsigned long start,
+                                 unsigned long end)
 {
        tlb->mm = mm;
-       tlb->fullmm = full_mm_flush;
+       tlb->start = start;
+       tlb->end = end;
+       tlb->fullmm = !(start | (end+1));
        tlb->batch = NULL;
        if (tlb->fullmm)
                __tlb_flush_mm(mm);
index c8def8bc90209578a5a6d5cfaf8f9f258c4ec7fb..5fc237581caf3a6f3b40c41246f4d39d28f8ee55 100644 (file)
@@ -87,6 +87,8 @@ config STACKTRACE_SUPPORT
 
 source "init/Kconfig"
 
+source "kernel/Kconfig.freezer"
+
 config MMU
        def_bool y
 
index e61d43d9f689c0ef61bc0e1e5e43b3826b66a760..362192ed12fef1789d2c23d21c654f19632006a0 100644 (file)
@@ -36,10 +36,12 @@ static inline void init_tlb_gather(struct mmu_gather *tlb)
 }
 
 static inline void
-tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int full_mm_flush)
+tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
 {
        tlb->mm = mm;
-       tlb->fullmm = full_mm_flush;
+       tlb->start = start;
+       tlb->end = end;
+       tlb->fullmm = !(start | (end+1));
 
        init_tlb_gather(tlb);
 }
index 4febacd1a8a1a3dee2c24e3b3447081e7ca94ebb..29b0301c18aab26f2a613d397da9856a8d1eec3b 100644 (file)
@@ -45,10 +45,12 @@ static inline void init_tlb_gather(struct mmu_gather *tlb)
 }
 
 static inline void
-tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int full_mm_flush)
+tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
 {
        tlb->mm = mm;
-       tlb->fullmm = full_mm_flush;
+       tlb->start = start;
+       tlb->end = end;
+       tlb->fullmm = !(start | (end+1));
 
        init_tlb_gather(tlb);
 }
index f2b489cf1602349092d26d6ea68109cca3589c4d..3bf2dd0cf61fec9615253fe3b70d3c3764a35e93 100644 (file)
@@ -55,9 +55,53 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
 #define native_pmdp_get_and_clear(xp) native_local_pmdp_get_and_clear(xp)
 #endif
 
+#ifdef CONFIG_MEM_SOFT_DIRTY
+
+/*
+ * Bits _PAGE_BIT_PRESENT, _PAGE_BIT_FILE, _PAGE_BIT_SOFT_DIRTY and
+ * _PAGE_BIT_PROTNONE are taken, split up the 28 bits of offset
+ * into this range.
+ */
+#define PTE_FILE_MAX_BITS      28
+#define PTE_FILE_SHIFT1                (_PAGE_BIT_PRESENT + 1)
+#define PTE_FILE_SHIFT2                (_PAGE_BIT_FILE + 1)
+#define PTE_FILE_SHIFT3                (_PAGE_BIT_PROTNONE + 1)
+#define PTE_FILE_SHIFT4                (_PAGE_BIT_SOFT_DIRTY + 1)
+#define PTE_FILE_BITS1         (PTE_FILE_SHIFT2 - PTE_FILE_SHIFT1 - 1)
+#define PTE_FILE_BITS2         (PTE_FILE_SHIFT3 - PTE_FILE_SHIFT2 - 1)
+#define PTE_FILE_BITS3         (PTE_FILE_SHIFT4 - PTE_FILE_SHIFT3 - 1)
+
+#define pte_to_pgoff(pte)                                              \
+       ((((pte).pte_low >> (PTE_FILE_SHIFT1))                          \
+         & ((1U << PTE_FILE_BITS1) - 1)))                              \
+       + ((((pte).pte_low >> (PTE_FILE_SHIFT2))                        \
+           & ((1U << PTE_FILE_BITS2) - 1))                             \
+          << (PTE_FILE_BITS1))                                         \
+       + ((((pte).pte_low >> (PTE_FILE_SHIFT3))                        \
+           & ((1U << PTE_FILE_BITS3) - 1))                             \
+          << (PTE_FILE_BITS1 + PTE_FILE_BITS2))                        \
+       + ((((pte).pte_low >> (PTE_FILE_SHIFT4)))                       \
+           << (PTE_FILE_BITS1 + PTE_FILE_BITS2 + PTE_FILE_BITS3))
+
+#define pgoff_to_pte(off)                                              \
+       ((pte_t) { .pte_low =                                           \
+        ((((off)) & ((1U << PTE_FILE_BITS1) - 1)) << PTE_FILE_SHIFT1)  \
+        + ((((off) >> PTE_FILE_BITS1)                                  \
+            & ((1U << PTE_FILE_BITS2) - 1))                            \
+           << PTE_FILE_SHIFT2)                                         \
+        + ((((off) >> (PTE_FILE_BITS1 + PTE_FILE_BITS2))               \
+            & ((1U << PTE_FILE_BITS3) - 1))                            \
+           << PTE_FILE_SHIFT3)                                         \
+        + ((((off) >>                                                  \
+             (PTE_FILE_BITS1 + PTE_FILE_BITS2 + PTE_FILE_BITS3)))      \
+           << PTE_FILE_SHIFT4)                                         \
+        + _PAGE_FILE })
+
+#else /* CONFIG_MEM_SOFT_DIRTY */
+
 /*
  * Bits _PAGE_BIT_PRESENT, _PAGE_BIT_FILE and _PAGE_BIT_PROTNONE are taken,
- * split up the 29 bits of offset into this range:
+ * split up the 29 bits of offset into this range.
  */
 #define PTE_FILE_MAX_BITS      29
 #define PTE_FILE_SHIFT1                (_PAGE_BIT_PRESENT + 1)
@@ -88,6 +132,8 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
            << PTE_FILE_SHIFT3)                                         \
         + _PAGE_FILE })
 
+#endif /* CONFIG_MEM_SOFT_DIRTY */
+
 /* Encode and de-code a swap entry */
 #if _PAGE_BIT_FILE < _PAGE_BIT_PROTNONE
 #define SWP_TYPE_BITS (_PAGE_BIT_FILE - _PAGE_BIT_PRESENT - 1)
index 4cc9f2b7cdc35a854f185b2ab91d49a9fcd1be4f..81bb91b49a88fa22b2c07fae620e76d674692419 100644 (file)
@@ -179,6 +179,9 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *pmdp)
 /*
  * Bits 0, 6 and 7 are taken in the low part of the pte,
  * put the 32 bits of offset into the high part.
+ *
+ * For soft-dirty tracking 11 bit is taken from
+ * the low part of pte as well.
  */
 #define pte_to_pgoff(pte) ((pte).pte_high)
 #define pgoff_to_pte(off)                                              \
index 7dc305a46058a230ad5eef2df9f5e54b66ac0017..1c00631164c29c1d2d676f8c384141931483363b 100644 (file)
@@ -314,6 +314,36 @@ static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
        return pmd_set_flags(pmd, _PAGE_SOFT_DIRTY);
 }
 
+static inline pte_t pte_swp_mksoft_dirty(pte_t pte)
+{
+       return pte_set_flags(pte, _PAGE_SWP_SOFT_DIRTY);
+}
+
+static inline int pte_swp_soft_dirty(pte_t pte)
+{
+       return pte_flags(pte) & _PAGE_SWP_SOFT_DIRTY;
+}
+
+static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
+{
+       return pte_clear_flags(pte, _PAGE_SWP_SOFT_DIRTY);
+}
+
+static inline pte_t pte_file_clear_soft_dirty(pte_t pte)
+{
+       return pte_clear_flags(pte, _PAGE_SOFT_DIRTY);
+}
+
+static inline pte_t pte_file_mksoft_dirty(pte_t pte)
+{
+       return pte_set_flags(pte, _PAGE_SOFT_DIRTY);
+}
+
+static inline int pte_file_soft_dirty(pte_t pte)
+{
+       return pte_flags(pte) & _PAGE_SOFT_DIRTY;
+}
+
 /*
  * Mask out unsupported bits in a present pgprot.  Non-present pgprots
  * can use those bits for other purposes, so leave them be.
index c98ac63aae487580727cac5d9f194ef04d9913cb..f4843e031131d224280c4c0d845def9a094f3082 100644 (file)
  * they do not conflict with each other.
  */
 
+#define _PAGE_BIT_SOFT_DIRTY   _PAGE_BIT_HIDDEN
+
 #ifdef CONFIG_MEM_SOFT_DIRTY
-#define _PAGE_SOFT_DIRTY       (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
+#define _PAGE_SOFT_DIRTY       (_AT(pteval_t, 1) << _PAGE_BIT_SOFT_DIRTY)
 #else
 #define _PAGE_SOFT_DIRTY       (_AT(pteval_t, 0))
 #endif
 
+/*
+ * Tracking soft dirty bit when a page goes to a swap is tricky.
+ * We need a bit which can be stored in pte _and_ not conflict
+ * with swap entry format. On x86 bits 6 and 7 are *not* involved
+ * into swap entry computation, but bit 6 is used for nonlinear
+ * file mapping, so we borrow bit 7 for soft dirty tracking.
+ */
+#ifdef CONFIG_MEM_SOFT_DIRTY
+#define _PAGE_SWP_SOFT_DIRTY   _PAGE_PSE
+#else
+#define _PAGE_SWP_SOFT_DIRTY   (_AT(pteval_t, 0))
+#endif
+
 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
 #define _PAGE_NX       (_AT(pteval_t, 1) << _PAGE_BIT_NX)
 #else
index 33692eaabab58619c7481cbfc7c6f01be929e5e9..e3ddd7db723f666a98c1baefce55282632fd711b 100644 (file)
@@ -233,8 +233,4 @@ static inline void arch_write_unlock(arch_rwlock_t *rw)
 #define arch_read_relax(lock)  cpu_relax()
 #define arch_write_relax(lock) cpu_relax()
 
-/* The {read|write|spin}_lock() on x86 are full memory barriers. */
-static inline void smp_mb__after_lock(void) { }
-#define ARCH_HAS_SMP_MB_AFTER_LOCK
-
 #endif /* _ASM_X86_SPINLOCK_H */
index fbc9210b45bcbe62fd2c1b765a46701c2eb21212..a45d8d4ace1016b2ea7903569c6cacb43884df20 100644 (file)
@@ -2270,6 +2270,7 @@ __init int intel_pmu_init(void)
        case 70:
        case 71:
        case 63:
+       case 69:
                x86_pmu.late_ack = true;
                memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, sizeof(hw_cache_event_ids));
                memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
index cad791dbde9540840185c3509d04c74f8859598b..1fb6c72717bd9e721a19e634852dc06d0af6f485 100644 (file)
@@ -314,8 +314,8 @@ static struct uncore_event_desc snbep_uncore_imc_events[] = {
 static struct uncore_event_desc snbep_uncore_qpi_events[] = {
        INTEL_UNCORE_EVENT_DESC(clockticks,       "event=0x14"),
        INTEL_UNCORE_EVENT_DESC(txl_flits_active, "event=0x00,umask=0x06"),
-       INTEL_UNCORE_EVENT_DESC(drs_data,         "event=0x02,umask=0x08"),
-       INTEL_UNCORE_EVENT_DESC(ncb_data,         "event=0x03,umask=0x04"),
+       INTEL_UNCORE_EVENT_DESC(drs_data,         "event=0x102,umask=0x08"),
+       INTEL_UNCORE_EVENT_DESC(ncb_data,         "event=0x103,umask=0x04"),
        { /* end: all zeroes */ },
 };
 
index dbded5aedb818d511a46967ccb1d26d77233110f..48f8375e4c6b07edfbcefd819a8210f6e0839dfe 100644 (file)
@@ -101,7 +101,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
                                *begin = new_begin;
                }
        } else {
-               *begin = TASK_UNMAPPED_BASE;
+               *begin = mmap_legacy_base();
                *end = TASK_SIZE;
        }
 }
index 62c29a5bfe26aeb67bf63fb545294b79765e445f..f63778cb2363981ad8f98d0e068e4d789c2136b0 100644 (file)
@@ -98,7 +98,7 @@ static unsigned long mmap_base(void)
  * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
  * does, but not when emulating X86_32
  */
-static unsigned long mmap_legacy_base(void)
+unsigned long mmap_legacy_base(void)
 {
        if (mmap_is_ia32())
                return TASK_UNMAPPED_BASE;
index 99cb944a002dc4dfd5f2010a6f2de9d75c09ebc8..4d45dba7fb8f9f1c6e97d8fa4192ff4e63d6230d 100644 (file)
@@ -906,16 +906,10 @@ bio_pageinc(struct bio *bio)
        int i;
 
        bio_for_each_segment(bv, bio, i) {
-               page = bv->bv_page;
                /* Non-zero page count for non-head members of
-                * compound pages is no longer allowed by the kernel,
-                * but this has never been seen here.
+                * compound pages is no longer allowed by the kernel.
                 */
-               if (unlikely(PageCompound(page)))
-                       if (compound_trans_head(page) != page) {
-                               pr_crit("page tail used for block I/O\n");
-                               BUG();
-                       }
+               page = compound_trans_head(bv->bv_page);
                atomic_inc(&page->_count);
        }
 }
@@ -924,10 +918,13 @@ static void
 bio_pagedec(struct bio *bio)
 {
        struct bio_vec *bv;
+       struct page *page;
        int i;
 
-       bio_for_each_segment(bv, bio, i)
-               atomic_dec(&bv->bv_page->_count);
+       bio_for_each_segment(bv, bio, i) {
+               page = compound_trans_head(bv->bv_page);
+               atomic_dec(&page->_count);
+       }
 }
 
 static void
index 07f257d44a1e00a67b84a58685c5a2e490ce5ca8..e48cb339c0c6e5b71eb4ec24bef6d4f8238ea665 100644 (file)
@@ -3714,11 +3714,17 @@ static int bond_neigh_init(struct neighbour *n)
  * The bonding ndo_neigh_setup is called at init time beofre any
  * slave exists. So we must declare proxy setup function which will
  * be used at run time to resolve the actual slave neigh param setup.
+ *
+ * It's also called by master devices (such as vlans) to setup their
+ * underlying devices. In that case - do nothing, we're already set up from
+ * our init.
  */
 static int bond_neigh_setup(struct net_device *dev,
                            struct neigh_parms *parms)
 {
-       parms->neigh_setup   = bond_neigh_init;
+       /* modify only our neigh_parms */
+       if (parms->dev == dev)
+               parms->neigh_setup = bond_neigh_init;
 
        return 0;
 }
index 25723d8ee20130b19ad95b885a99e5c1ba019517..925ab8ec9329b5bbfadb6e94faa12de0b7f91ceb 100644 (file)
@@ -649,7 +649,7 @@ static int pcan_usb_decode_data(struct pcan_usb_msg_context *mc, u8 status_len)
                if ((mc->ptr + rec_len) > mc->end)
                        goto decode_failed;
 
-               memcpy(cf->data, mc->ptr, rec_len);
+               memcpy(cf->data, mc->ptr, cf->can_dlc);
                mc->ptr += rec_len;
        }
 
index f1b121ee5525bfa974010663da5807e9b4d038e5..55d79cb53a79637bb09297d49f1cf1d1ed533147 100644 (file)
@@ -199,7 +199,7 @@ static int arc_emac_rx(struct net_device *ndev, int budget)
        struct arc_emac_priv *priv = netdev_priv(ndev);
        unsigned int work_done;
 
-       for (work_done = 0; work_done <= budget; work_done++) {
+       for (work_done = 0; work_done < budget; work_done++) {
                unsigned int *last_rx_bd = &priv->last_rx_bd;
                struct net_device_stats *stats = &priv->stats;
                struct buffer_state *rx_buff = &priv->rx_buff[*last_rx_bd];
index d80e34b8285ff4457b6660e1e2fe6fe338f2e63e..ce9b387b5a1962949582354f7b0dcfecb621a08d 100644 (file)
@@ -1502,6 +1502,7 @@ struct bnx2x {
 #define BC_SUPPORTS_DCBX_MSG_NON_PMF   (1 << 21)
 #define IS_VF_FLAG                     (1 << 22)
 #define INTERRUPTS_ENABLED_FLAG                (1 << 23)
+#define BC_SUPPORTS_RMMOD_CMD          (1 << 24)
 
 #define BP_NOMCP(bp)                   ((bp)->flags & NO_MCP_FLAG)
 
@@ -1830,6 +1831,8 @@ struct bnx2x {
 
        int fp_array_size;
        u32 dump_preset_idx;
+       bool                                    stats_started;
+       struct semaphore                        stats_sema;
 };
 
 /* Tx queues may be less or equal to Rx queues */
@@ -2451,4 +2454,6 @@ enum bnx2x_pci_bus_speed {
        BNX2X_PCI_LINK_SPEED_5000 = 5000,
        BNX2X_PCI_LINK_SPEED_8000 = 8000
 };
+
+void bnx2x_set_local_cmng(struct bnx2x *bp);
 #endif /* bnx2x.h */
index 0c94df47e0e8ee46d273488413be3d56d9c29ef6..f9122f2d6b657d0e674c7b036635b60e97586609 100644 (file)
@@ -753,6 +753,10 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state)
                bnx2x_pfc_set_pfc(bp);
 
                bnx2x_dcbx_update_ets_params(bp);
+
+               /* ets may affect cmng configuration: reinit it in hw */
+               bnx2x_set_local_cmng(bp);
+
                bnx2x_dcbx_resume_hw_tx(bp);
 
                return;
index 5018e52ae2ad8fac5a194b0f137b66d5b8aaf0b4..32767f6aa33f473a126259e4877fa608ed51b3bf 100644 (file)
@@ -1300,6 +1300,9 @@ struct drv_func_mb {
 
        #define DRV_MSG_CODE_EEE_RESULTS_ACK            0xda000000
 
+       #define DRV_MSG_CODE_RMMOD                      0xdb000000
+       #define REQ_BC_VER_4_RMMOD_CMD                  0x0007080f
+
        #define DRV_MSG_CODE_SET_MF_BW                  0xe0000000
        #define REQ_BC_VER_4_SET_MF_BW                  0x00060202
        #define DRV_MSG_CODE_SET_MF_BW_ACK              0xe1000000
@@ -1372,6 +1375,8 @@ struct drv_func_mb {
 
        #define FW_MSG_CODE_EEE_RESULS_ACK              0xda100000
 
+       #define FW_MSG_CODE_RMMOD_ACK                   0xdb100000
+
        #define FW_MSG_CODE_SET_MF_BW_SENT              0xe0000000
        #define FW_MSG_CODE_SET_MF_BW_DONE              0xe1000000
 
index e06186c305d8f88d584a184175abcfd829300d4d..955d6cfd9cb7c48179b587a7bf5239a572b7e9e8 100644 (file)
@@ -2476,7 +2476,7 @@ static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
 
        input.port_rate = bp->link_vars.line_speed;
 
-       if (cmng_type == CMNG_FNS_MINMAX) {
+       if (cmng_type == CMNG_FNS_MINMAX && input.port_rate) {
                int vn;
 
                /* read mf conf from shmem */
@@ -2533,6 +2533,21 @@ static void storm_memset_cmng(struct bnx2x *bp,
        }
 }
 
+/* init cmng mode in HW according to local configuration */
+void bnx2x_set_local_cmng(struct bnx2x *bp)
+{
+       int cmng_fns = bnx2x_get_cmng_fns_mode(bp);
+
+       if (cmng_fns != CMNG_FNS_NONE) {
+               bnx2x_cmng_fns_init(bp, false, cmng_fns);
+               storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
+       } else {
+               /* rate shaping and fairness are disabled */
+               DP(NETIF_MSG_IFUP,
+                  "single function mode without fairness\n");
+       }
+}
+
 /* This function is called upon link interrupt */
 static void bnx2x_link_attn(struct bnx2x *bp)
 {
@@ -2568,17 +2583,8 @@ static void bnx2x_link_attn(struct bnx2x *bp)
                        bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
        }
 
-       if (bp->link_vars.link_up && bp->link_vars.line_speed) {
-               int cmng_fns = bnx2x_get_cmng_fns_mode(bp);
-
-               if (cmng_fns != CMNG_FNS_NONE) {
-                       bnx2x_cmng_fns_init(bp, false, cmng_fns);
-                       storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
-               } else
-                       /* rate shaping and fairness are disabled */
-                       DP(NETIF_MSG_IFUP,
-                          "single function mode without fairness\n");
-       }
+       if (bp->link_vars.link_up && bp->link_vars.line_speed)
+               bnx2x_set_local_cmng(bp);
 
        __bnx2x_link_report(bp);
 
@@ -10362,6 +10368,10 @@ static void bnx2x_get_common_hwinfo(struct bnx2x *bp)
 
        bp->flags |= (val >= REQ_BC_VER_4_DCBX_ADMIN_MSG_NON_PMF) ?
                        BC_SUPPORTS_DCBX_MSG_NON_PMF : 0;
+
+       bp->flags |= (val >= REQ_BC_VER_4_RMMOD_CMD) ?
+                       BC_SUPPORTS_RMMOD_CMD : 0;
+
        boot_mode = SHMEM_RD(bp,
                        dev_info.port_feature_config[BP_PORT(bp)].mba_config) &
                        PORT_FEATURE_MBA_BOOT_AGENT_TYPE_MASK;
@@ -11524,6 +11534,7 @@ static int bnx2x_init_bp(struct bnx2x *bp)
        mutex_init(&bp->port.phy_mutex);
        mutex_init(&bp->fw_mb_mutex);
        spin_lock_init(&bp->stats_lock);
+       sema_init(&bp->stats_sema, 1);
 
        INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
        INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task);
@@ -12817,13 +12828,17 @@ static void __bnx2x_remove(struct pci_dev *pdev,
        bnx2x_dcbnl_update_applist(bp, true);
 #endif
 
+       if (IS_PF(bp) &&
+           !BP_NOMCP(bp) &&
+           (bp->flags & BC_SUPPORTS_RMMOD_CMD))
+               bnx2x_fw_command(bp, DRV_MSG_CODE_RMMOD, 0);
+
        /* Close the interface - either directly or implicitly */
        if (remove_netdev) {
                unregister_netdev(dev);
        } else {
                rtnl_lock();
-               if (netif_running(dev))
-                       bnx2x_close(dev);
+               dev_close(dev);
                rtnl_unlock();
        }
 
index 95861efb505187f07bd1a176640e4f44af14a5de..44104fb27947fba144575a33a0be3cf1343fe39d 100644 (file)
@@ -3463,7 +3463,7 @@ int bnx2x_vf_pci_alloc(struct bnx2x *bp)
 alloc_mem_err:
        BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping,
                       sizeof(struct bnx2x_vf_mbx_msg));
-       BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping,
+       BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->pf2vf_bulletin_mapping,
                       sizeof(union pf_vf_bulletin));
        return -ENOMEM;
 }
index 98366abd02bda520cf1876189cced5926175288f..d63d1327b051e4a80c76027b03e723c332d0a317 100644 (file)
@@ -221,7 +221,8 @@ static int bnx2x_stats_comp(struct bnx2x *bp)
  * Statistics service functions
  */
 
-static void bnx2x_stats_pmf_update(struct bnx2x *bp)
+/* should be called under stats_sema */
+static void __bnx2x_stats_pmf_update(struct bnx2x *bp)
 {
        struct dmae_command *dmae;
        u32 opcode;
@@ -518,7 +519,8 @@ static void bnx2x_func_stats_init(struct bnx2x *bp)
        *stats_comp = 0;
 }
 
-static void bnx2x_stats_start(struct bnx2x *bp)
+/* should be called under stats_sema */
+static void __bnx2x_stats_start(struct bnx2x *bp)
 {
        /* vfs travel through here as part of the statistics FSM, but no action
         * is required
@@ -534,13 +536,34 @@ static void bnx2x_stats_start(struct bnx2x *bp)
 
        bnx2x_hw_stats_post(bp);
        bnx2x_storm_stats_post(bp);
+
+       bp->stats_started = true;
+}
+
+static void bnx2x_stats_start(struct bnx2x *bp)
+{
+       if (down_timeout(&bp->stats_sema, HZ/10))
+               BNX2X_ERR("Unable to acquire stats lock\n");
+       __bnx2x_stats_start(bp);
+       up(&bp->stats_sema);
 }
 
 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
 {
+       if (down_timeout(&bp->stats_sema, HZ/10))
+               BNX2X_ERR("Unable to acquire stats lock\n");
        bnx2x_stats_comp(bp);
-       bnx2x_stats_pmf_update(bp);
-       bnx2x_stats_start(bp);
+       __bnx2x_stats_pmf_update(bp);
+       __bnx2x_stats_start(bp);
+       up(&bp->stats_sema);
+}
+
+static void bnx2x_stats_pmf_update(struct bnx2x *bp)
+{
+       if (down_timeout(&bp->stats_sema, HZ/10))
+               BNX2X_ERR("Unable to acquire stats lock\n");
+       __bnx2x_stats_pmf_update(bp);
+       up(&bp->stats_sema);
 }
 
 static void bnx2x_stats_restart(struct bnx2x *bp)
@@ -550,8 +573,11 @@ static void bnx2x_stats_restart(struct bnx2x *bp)
         */
        if (IS_VF(bp))
                return;
+       if (down_timeout(&bp->stats_sema, HZ/10))
+               BNX2X_ERR("Unable to acquire stats lock\n");
        bnx2x_stats_comp(bp);
-       bnx2x_stats_start(bp);
+       __bnx2x_stats_start(bp);
+       up(&bp->stats_sema);
 }
 
 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
@@ -888,9 +914,7 @@ static int bnx2x_storm_stats_validate_counters(struct bnx2x *bp)
        /* Make sure we use the value of the counter
         * used for sending the last stats ramrod.
         */
-       spin_lock_bh(&bp->stats_lock);
        cur_stats_counter = bp->stats_counter - 1;
-       spin_unlock_bh(&bp->stats_lock);
 
        /* are storm stats valid? */
        if (le16_to_cpu(counters->xstats_counter) != cur_stats_counter) {
@@ -1227,12 +1251,18 @@ static void bnx2x_stats_update(struct bnx2x *bp)
 {
        u32 *stats_comp = bnx2x_sp(bp, stats_comp);
 
-       if (bnx2x_edebug_stats_stopped(bp))
+       /* we run update from timer context, so give up
+        * if somebody is in the middle of transition
+        */
+       if (down_trylock(&bp->stats_sema))
                return;
 
+       if (bnx2x_edebug_stats_stopped(bp) || !bp->stats_started)
+               goto out;
+
        if (IS_PF(bp)) {
                if (*stats_comp != DMAE_COMP_VAL)
-                       return;
+                       goto out;
 
                if (bp->port.pmf)
                        bnx2x_hw_stats_update(bp);
@@ -1242,7 +1272,7 @@ static void bnx2x_stats_update(struct bnx2x *bp)
                                BNX2X_ERR("storm stats were not updated for 3 times\n");
                                bnx2x_panic();
                        }
-                       return;
+                       goto out;
                }
        } else {
                /* vf doesn't collect HW statistics, and doesn't get completions
@@ -1256,7 +1286,7 @@ static void bnx2x_stats_update(struct bnx2x *bp)
 
        /* vf is done */
        if (IS_VF(bp))
-               return;
+               goto out;
 
        if (netif_msg_timer(bp)) {
                struct bnx2x_eth_stats *estats = &bp->eth_stats;
@@ -1267,6 +1297,9 @@ static void bnx2x_stats_update(struct bnx2x *bp)
 
        bnx2x_hw_stats_post(bp);
        bnx2x_storm_stats_post(bp);
+
+out:
+       up(&bp->stats_sema);
 }
 
 static void bnx2x_port_stats_stop(struct bnx2x *bp)
@@ -1332,6 +1365,11 @@ static void bnx2x_stats_stop(struct bnx2x *bp)
 {
        int update = 0;
 
+       if (down_timeout(&bp->stats_sema, HZ/10))
+               BNX2X_ERR("Unable to acquire stats lock\n");
+
+       bp->stats_started = false;
+
        bnx2x_stats_comp(bp);
 
        if (bp->port.pmf)
@@ -1348,6 +1386,8 @@ static void bnx2x_stats_stop(struct bnx2x *bp)
                bnx2x_hw_stats_post(bp);
                bnx2x_stats_comp(bp);
        }
+
+       up(&bp->stats_sema);
 }
 
 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
@@ -1376,15 +1416,17 @@ static const struct {
 void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
 {
        enum bnx2x_stats_state state;
+       void (*action)(struct bnx2x *bp);
        if (unlikely(bp->panic))
                return;
 
        spin_lock_bh(&bp->stats_lock);
        state = bp->stats_state;
        bp->stats_state = bnx2x_stats_stm[state][event].next_state;
+       action = bnx2x_stats_stm[state][event].action;
        spin_unlock_bh(&bp->stats_lock);
 
-       bnx2x_stats_stm[state][event].action(bp);
+       action(bp);
 
        if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp))
                DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
index ddebc7a5dda0d14f32e8df6b7066b2d75c752a72..0da2214ef1b9e895903612809b21ed070cf32fa3 100644 (file)
@@ -17796,8 +17796,10 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
 
 done:
        if (state == pci_channel_io_perm_failure) {
-               tg3_napi_enable(tp);
-               dev_close(netdev);
+               if (netdev) {
+                       tg3_napi_enable(tp);
+                       dev_close(netdev);
+               }
                err = PCI_ERS_RESULT_DISCONNECT;
        } else {
                pci_disable_device(pdev);
@@ -17827,7 +17829,8 @@ static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
        rtnl_lock();
 
        if (pci_enable_device(pdev)) {
-               netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
+               dev_err(&pdev->dev,
+                       "Cannot re-enable PCI device after reset.\n");
                goto done;
        }
 
@@ -17835,7 +17838,7 @@ static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
        pci_restore_state(pdev);
        pci_save_state(pdev);
 
-       if (!netif_running(netdev)) {
+       if (!netdev || !netif_running(netdev)) {
                rc = PCI_ERS_RESULT_RECOVERED;
                goto done;
        }
@@ -17847,7 +17850,7 @@ static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
        rc = PCI_ERS_RESULT_RECOVERED;
 
 done:
-       if (rc != PCI_ERS_RESULT_RECOVERED && netif_running(netdev)) {
+       if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) {
                tg3_napi_enable(tp);
                dev_close(netdev);
        }
index 687ec4a8bb48de001cfa9d39ef233ee7c69a5d96..9c89dc8fe1057dc1c410728bd252f135f9a80879 100644 (file)
@@ -455,11 +455,6 @@ static int alloc_pg_chunk(struct adapter *adapter, struct sge_fl *q,
                q->pg_chunk.offset = 0;
                mapping = pci_map_page(adapter->pdev, q->pg_chunk.page,
                                       0, q->alloc_size, PCI_DMA_FROMDEVICE);
-               if (unlikely(pci_dma_mapping_error(adapter->pdev, mapping))) {
-                       __free_pages(q->pg_chunk.page, order);
-                       q->pg_chunk.page = NULL;
-                       return -EIO;
-               }
                q->pg_chunk.mapping = mapping;
        }
        sd->pg_chunk = q->pg_chunk;
@@ -954,75 +949,40 @@ static inline unsigned int calc_tx_descs(const struct sk_buff *skb)
        return flits_to_desc(flits);
 }
 
-
-/*     map_skb - map a packet main body and its page fragments
- *     @pdev: the PCI device
- *     @skb: the packet
- *     @addr: placeholder to save the mapped addresses
- *
- *     map the main body of an sk_buff and its page fragments, if any.
- */
-static int map_skb(struct pci_dev *pdev, const struct sk_buff *skb,
-                  dma_addr_t *addr)
-{
-       const skb_frag_t *fp, *end;
-       const struct skb_shared_info *si;
-
-       *addr = pci_map_single(pdev, skb->data, skb_headlen(skb),
-                              PCI_DMA_TODEVICE);
-       if (pci_dma_mapping_error(pdev, *addr))
-               goto out_err;
-
-       si = skb_shinfo(skb);
-       end = &si->frags[si->nr_frags];
-
-       for (fp = si->frags; fp < end; fp++) {
-               *++addr = skb_frag_dma_map(&pdev->dev, fp, 0, skb_frag_size(fp),
-                                          DMA_TO_DEVICE);
-               if (pci_dma_mapping_error(pdev, *addr))
-                       goto unwind;
-       }
-       return 0;
-
-unwind:
-       while (fp-- > si->frags)
-               dma_unmap_page(&pdev->dev, *--addr, skb_frag_size(fp),
-                              DMA_TO_DEVICE);
-
-       pci_unmap_single(pdev, addr[-1], skb_headlen(skb), PCI_DMA_TODEVICE);
-out_err:
-       return -ENOMEM;
-}
-
 /**
- *     write_sgl - populate a scatter/gather list for a packet
+ *     make_sgl - populate a scatter/gather list for a packet
  *     @skb: the packet
  *     @sgp: the SGL to populate
  *     @start: start address of skb main body data to include in the SGL
  *     @len: length of skb main body data to include in the SGL
- *     @addr: the list of the mapped addresses
+ *     @pdev: the PCI device
  *
- *     Copies the scatter/gather list for the buffers that make up a packet
+ *     Generates a scatter/gather list for the buffers that make up a packet
  *     and returns the SGL size in 8-byte words.  The caller must size the SGL
  *     appropriately.
  */
-static inline unsigned int write_sgl(const struct sk_buff *skb,
+static inline unsigned int make_sgl(const struct sk_buff *skb,
                                    struct sg_ent *sgp, unsigned char *start,
-                                   unsigned int len, const dma_addr_t *addr)
+                                   unsigned int len, struct pci_dev *pdev)
 {
-       unsigned int i, j = 0, k = 0, nfrags;
+       dma_addr_t mapping;
+       unsigned int i, j = 0, nfrags;
 
        if (len) {
+               mapping = pci_map_single(pdev, start, len, PCI_DMA_TODEVICE);
                sgp->len[0] = cpu_to_be32(len);
-               sgp->addr[j++] = cpu_to_be64(addr[k++]);
+               sgp->addr[0] = cpu_to_be64(mapping);
+               j = 1;
        }
 
        nfrags = skb_shinfo(skb)->nr_frags;
        for (i = 0; i < nfrags; i++) {
                const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
 
+               mapping = skb_frag_dma_map(&pdev->dev, frag, 0, skb_frag_size(frag),
+                                          DMA_TO_DEVICE);
                sgp->len[j] = cpu_to_be32(skb_frag_size(frag));
-               sgp->addr[j] = cpu_to_be64(addr[k++]);
+               sgp->addr[j] = cpu_to_be64(mapping);
                j ^= 1;
                if (j == 0)
                        ++sgp;
@@ -1178,7 +1138,7 @@ static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
                            const struct port_info *pi,
                            unsigned int pidx, unsigned int gen,
                            struct sge_txq *q, unsigned int ndesc,
-                           unsigned int compl, const dma_addr_t *addr)
+                           unsigned int compl)
 {
        unsigned int flits, sgl_flits, cntrl, tso_info;
        struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1];
@@ -1236,7 +1196,7 @@ static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
        }
 
        sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
-       sgl_flits = write_sgl(skb, sgp, skb->data, skb_headlen(skb), addr);
+       sgl_flits = make_sgl(skb, sgp, skb->data, skb_headlen(skb), adap->pdev);
 
        write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, gen,
                         htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | compl),
@@ -1267,7 +1227,6 @@ netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
        struct netdev_queue *txq;
        struct sge_qset *qs;
        struct sge_txq *q;
-       dma_addr_t addr[MAX_SKB_FRAGS + 1];
 
        /*
         * The chip min packet length is 9 octets but play safe and reject
@@ -1296,11 +1255,6 @@ netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
                return NETDEV_TX_BUSY;
        }
 
-       if (unlikely(map_skb(adap->pdev, skb, addr) < 0)) {
-               dev_kfree_skb(skb);
-               return NETDEV_TX_OK;
-       }
-
        q->in_use += ndesc;
        if (unlikely(credits - ndesc < q->stop_thres)) {
                t3_stop_tx_queue(txq, qs, q);
@@ -1358,7 +1312,7 @@ netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
        if (likely(!skb_shared(skb)))
                skb_orphan(skb);
 
-       write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl, addr);
+       write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl);
        check_ring_tx_db(adap, q);
        return NETDEV_TX_OK;
 }
@@ -1623,8 +1577,7 @@ static void setup_deferred_unmapping(struct sk_buff *skb, struct pci_dev *pdev,
  */
 static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb,
                          struct sge_txq *q, unsigned int pidx,
-                         unsigned int gen, unsigned int ndesc,
-                         const dma_addr_t *addr)
+                         unsigned int gen, unsigned int ndesc)
 {
        unsigned int sgl_flits, flits;
        struct work_request_hdr *from;
@@ -1645,9 +1598,9 @@ static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb,
 
        flits = skb_transport_offset(skb) / 8;
        sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
-       sgl_flits = write_sgl(skb, sgp, skb_transport_header(skb),
-                            skb_tail_pointer(skb) -
-                            skb_transport_header(skb), addr);
+       sgl_flits = make_sgl(skb, sgp, skb_transport_header(skb),
+                            skb->tail - skb->transport_header,
+                            adap->pdev);
        if (need_skb_unmap()) {
                setup_deferred_unmapping(skb, adap->pdev, sgp, sgl_flits);
                skb->destructor = deferred_unmap_destructor;
@@ -1705,11 +1658,6 @@ again:   reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
                goto again;
        }
 
-       if (map_skb(adap->pdev, skb, (dma_addr_t *)skb->head)) {
-               spin_unlock(&q->lock);
-               return NET_XMIT_SUCCESS;
-       }
-
        gen = q->gen;
        q->in_use += ndesc;
        pidx = q->pidx;
@@ -1720,7 +1668,7 @@ again:    reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
        }
        spin_unlock(&q->lock);
 
-       write_ofld_wr(adap, skb, q, pidx, gen, ndesc, (dma_addr_t *)skb->head);
+       write_ofld_wr(adap, skb, q, pidx, gen, ndesc);
        check_ring_tx_db(adap, q);
        return NET_XMIT_SUCCESS;
 }
@@ -1738,7 +1686,6 @@ static void restart_offloadq(unsigned long data)
        struct sge_txq *q = &qs->txq[TXQ_OFLD];
        const struct port_info *pi = netdev_priv(qs->netdev);
        struct adapter *adap = pi->adapter;
-       unsigned int written = 0;
 
        spin_lock(&q->lock);
 again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
@@ -1758,14 +1705,10 @@ again:  reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
                        break;
                }
 
-               if (map_skb(adap->pdev, skb, (dma_addr_t *)skb->head))
-                       break;
-
                gen = q->gen;
                q->in_use += ndesc;
                pidx = q->pidx;
                q->pidx += ndesc;
-               written += ndesc;
                if (q->pidx >= q->size) {
                        q->pidx -= q->size;
                        q->gen ^= 1;
@@ -1773,8 +1716,7 @@ again:    reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
                __skb_unlink(skb, &q->sendq);
                spin_unlock(&q->lock);
 
-               write_ofld_wr(adap, skb, q, pidx, gen, ndesc,
-                            (dma_addr_t *)skb->head);
+               write_ofld_wr(adap, skb, q, pidx, gen, ndesc);
                spin_lock(&q->lock);
        }
        spin_unlock(&q->lock);
@@ -1784,9 +1726,8 @@ again:    reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
        set_bit(TXQ_LAST_PKT_DB, &q->flags);
 #endif
        wmb();
-       if (likely(written))
-               t3_write_reg(adap, A_SG_KDOORBELL,
-                            F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
+       t3_write_reg(adap, A_SG_KDOORBELL,
+                    F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
 }
 
 /**
index 6e6e0a117ee2fe16061ae8e0b77a51aaacd3336b..8ec5d74ad44d75e4fb9d7c871b0ddd95654c74d1 100644 (file)
@@ -3048,6 +3048,9 @@ int be_cmd_get_func_config(struct be_adapter *adapter)
 
                adapter->max_event_queues = le16_to_cpu(desc->eq_count);
                adapter->if_cap_flags = le32_to_cpu(desc->cap_flags);
+
+               /* Clear flags that driver is not interested in */
+               adapter->if_cap_flags &=  BE_IF_CAP_FLAGS_WANT;
        }
 err:
        mutex_unlock(&adapter->mbox_lock);
index 5228d88c5a024e8e20be18e4f9fec69de7865e42..1b3b9e886412ddd689a0abbee536e6ba0d4ad50c 100644 (file)
@@ -563,6 +563,12 @@ enum be_if_flags {
        BE_IF_FLAGS_MULTICAST = 0x1000
 };
 
+#define BE_IF_CAP_FLAGS_WANT (BE_IF_FLAGS_RSS | BE_IF_FLAGS_PROMISCUOUS |\
+                        BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_VLAN_PROMISCUOUS |\
+                        BE_IF_FLAGS_VLAN | BE_IF_FLAGS_MCAST_PROMISCUOUS |\
+                        BE_IF_FLAGS_PASS_L3L4_ERRORS | BE_IF_FLAGS_MULTICAST |\
+                        BE_IF_FLAGS_UNTAGGED)
+
 /* An RX interface is an object with one or more MAC addresses and
  * filtering capabilities. */
 struct be_cmd_req_if_create {
index c896079728e1312893fdf03f0b0e111f2a6676cc..ef94a591f9e550ed31d654c2cc1fa2ba7bc1bef9 100644 (file)
@@ -931,17 +931,20 @@ static int skge_ring_alloc(struct skge_ring *ring, void *vaddr, u32 base)
 }
 
 /* Allocate and setup a new buffer for receiving */
-static void skge_rx_setup(struct skge_port *skge, struct skge_element *e,
-                         struct sk_buff *skb, unsigned int bufsize)
+static int skge_rx_setup(struct skge_port *skge, struct skge_element *e,
+                        struct sk_buff *skb, unsigned int bufsize)
 {
        struct skge_rx_desc *rd = e->desc;
-       u64 map;
+       dma_addr_t map;
 
        map = pci_map_single(skge->hw->pdev, skb->data, bufsize,
                             PCI_DMA_FROMDEVICE);
 
-       rd->dma_lo = map;
-       rd->dma_hi = map >> 32;
+       if (pci_dma_mapping_error(skge->hw->pdev, map))
+               return -1;
+
+       rd->dma_lo = lower_32_bits(map);
+       rd->dma_hi = upper_32_bits(map);
        e->skb = skb;
        rd->csum1_start = ETH_HLEN;
        rd->csum2_start = ETH_HLEN;
@@ -953,6 +956,7 @@ static void skge_rx_setup(struct skge_port *skge, struct skge_element *e,
        rd->control = BMU_OWN | BMU_STF | BMU_IRQ_EOF | BMU_TCP_CHECK | bufsize;
        dma_unmap_addr_set(e, mapaddr, map);
        dma_unmap_len_set(e, maplen, bufsize);
+       return 0;
 }
 
 /* Resume receiving using existing skb,
@@ -1014,7 +1018,10 @@ static int skge_rx_fill(struct net_device *dev)
                        return -ENOMEM;
 
                skb_reserve(skb, NET_IP_ALIGN);
-               skge_rx_setup(skge, e, skb, skge->rx_buf_size);
+               if (skge_rx_setup(skge, e, skb, skge->rx_buf_size) < 0) {
+                       dev_kfree_skb(skb);
+                       return -EIO;
+               }
        } while ((e = e->next) != ring->start);
 
        ring->to_clean = ring->start;
@@ -2544,7 +2551,7 @@ static int skge_up(struct net_device *dev)
 
        BUG_ON(skge->dma & 7);
 
-       if ((u64)skge->dma >> 32 != ((u64) skge->dma + skge->mem_size) >> 32) {
+       if (upper_32_bits(skge->dma) != upper_32_bits(skge->dma + skge->mem_size)) {
                dev_err(&hw->pdev->dev, "pci_alloc_consistent region crosses 4G boundary\n");
                err = -EINVAL;
                goto free_pci_mem;
@@ -2729,7 +2736,7 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb,
        struct skge_tx_desc *td;
        int i;
        u32 control, len;
-       u64 map;
+       dma_addr_t map;
 
        if (skb_padto(skb, ETH_ZLEN))
                return NETDEV_TX_OK;
@@ -2743,11 +2750,14 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb,
        e->skb = skb;
        len = skb_headlen(skb);
        map = pci_map_single(hw->pdev, skb->data, len, PCI_DMA_TODEVICE);
+       if (pci_dma_mapping_error(hw->pdev, map))
+               goto mapping_error;
+
        dma_unmap_addr_set(e, mapaddr, map);
        dma_unmap_len_set(e, maplen, len);
 
-       td->dma_lo = map;
-       td->dma_hi = map >> 32;
+       td->dma_lo = lower_32_bits(map);
+       td->dma_hi = upper_32_bits(map);
 
        if (skb->ip_summed == CHECKSUM_PARTIAL) {
                const int offset = skb_checksum_start_offset(skb);
@@ -2778,14 +2788,16 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb,
 
                        map = skb_frag_dma_map(&hw->pdev->dev, frag, 0,
                                               skb_frag_size(frag), DMA_TO_DEVICE);
+                       if (dma_mapping_error(&hw->pdev->dev, map))
+                               goto mapping_unwind;
 
                        e = e->next;
                        e->skb = skb;
                        tf = e->desc;
                        BUG_ON(tf->control & BMU_OWN);
 
-                       tf->dma_lo = map;
-                       tf->dma_hi = (u64) map >> 32;
+                       tf->dma_lo = lower_32_bits(map);
+                       tf->dma_hi = upper_32_bits(map);
                        dma_unmap_addr_set(e, mapaddr, map);
                        dma_unmap_len_set(e, maplen, skb_frag_size(frag));
 
@@ -2815,6 +2827,26 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb,
        }
 
        return NETDEV_TX_OK;
+
+mapping_unwind:
+       e = skge->tx_ring.to_use;
+       pci_unmap_single(hw->pdev,
+                        dma_unmap_addr(e, mapaddr),
+                        dma_unmap_len(e, maplen),
+                        PCI_DMA_TODEVICE);
+       while (i-- > 0) {
+               e = e->next;
+               pci_unmap_page(hw->pdev,
+                              dma_unmap_addr(e, mapaddr),
+                              dma_unmap_len(e, maplen),
+                              PCI_DMA_TODEVICE);
+       }
+
+mapping_error:
+       if (net_ratelimit())
+               dev_warn(&hw->pdev->dev, "%s: tx mapping error\n", dev->name);
+       dev_kfree_skb(skb);
+       return NETDEV_TX_OK;
 }
 
 
@@ -3045,11 +3077,13 @@ static struct sk_buff *skge_rx_get(struct net_device *dev,
 
                pci_dma_sync_single_for_cpu(skge->hw->pdev,
                                            dma_unmap_addr(e, mapaddr),
-                                           len, PCI_DMA_FROMDEVICE);
+                                           dma_unmap_len(e, maplen),
+                                           PCI_DMA_FROMDEVICE);
                skb_copy_from_linear_data(e->skb, skb->data, len);
                pci_dma_sync_single_for_device(skge->hw->pdev,
                                               dma_unmap_addr(e, mapaddr),
-                                              len, PCI_DMA_FROMDEVICE);
+                                              dma_unmap_len(e, maplen),
+                                              PCI_DMA_FROMDEVICE);
                skge_rx_reuse(e, skge->rx_buf_size);
        } else {
                struct sk_buff *nskb;
@@ -3058,13 +3092,17 @@ static struct sk_buff *skge_rx_get(struct net_device *dev,
                if (!nskb)
                        goto resubmit;
 
+               if (skge_rx_setup(skge, e, nskb, skge->rx_buf_size) < 0) {
+                       dev_kfree_skb(nskb);
+                       goto resubmit;
+               }
+
                pci_unmap_single(skge->hw->pdev,
                                 dma_unmap_addr(e, mapaddr),
                                 dma_unmap_len(e, maplen),
                                 PCI_DMA_FROMDEVICE);
                skb = e->skb;
                prefetch(skb->data);
-               skge_rx_setup(skge, e, nskb, skge->rx_buf_size);
        }
 
        skb_put(skb, len);
index c571de85d0f995bb2c302210cfbd979b9e1bec9e..5472cbd34028d9038539824c4167a0c3010a3a01 100644 (file)
@@ -46,7 +46,7 @@
 #include "mlx5_core.h"
 
 enum {
-       CMD_IF_REV = 4,
+       CMD_IF_REV = 5,
 };
 
 enum {
index c02cbcfd0fb83a4b10d198888c88f67fc4670cc9..443cc4d7b024c02d2cc77861868e1c1b17ee2524 100644 (file)
@@ -268,7 +268,7 @@ static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
                case MLX5_EVENT_TYPE_PAGE_REQUEST:
                        {
                                u16 func_id = be16_to_cpu(eqe->data.req_pages.func_id);
-                               s16 npages = be16_to_cpu(eqe->data.req_pages.num_pages);
+                               s32 npages = be32_to_cpu(eqe->data.req_pages.num_pages);
 
                                mlx5_core_dbg(dev, "page request for func 0x%x, napges %d\n", func_id, npages);
                                mlx5_core_req_pages_handler(dev, func_id, npages);
index 72a5222447f558b3e4a118ccaac70de358af8c64..f012658b6a927baf1fca6a2d39d806346c61a1a6 100644 (file)
@@ -113,7 +113,7 @@ int mlx5_cmd_query_hca_cap(struct mlx5_core_dev *dev,
        caps->log_max_srq = out->hca_cap.log_max_srqs & 0x1f;
        caps->local_ca_ack_delay = out->hca_cap.local_ca_ack_delay & 0x1f;
        caps->log_max_mcg = out->hca_cap.log_max_mcg;
-       caps->max_qp_mcg = be16_to_cpu(out->hca_cap.max_qp_mcg);
+       caps->max_qp_mcg = be32_to_cpu(out->hca_cap.max_qp_mcg) & 0xffffff;
        caps->max_ra_res_qp = 1 << (out->hca_cap.log_max_ra_res_qp & 0x3f);
        caps->max_ra_req_qp = 1 << (out->hca_cap.log_max_ra_req_qp & 0x3f);
        caps->max_srq_wqes = 1 << out->hca_cap.log_max_srq_sz;
index 748f10a155c42e4923850f718df8b2142eeff01b..3e6670c4a7cd215998bf2302f2573acb8540ffb7 100644 (file)
@@ -55,33 +55,9 @@ enum {
 };
 
 static DEFINE_SPINLOCK(health_lock);
-
 static LIST_HEAD(health_list);
 static struct work_struct health_work;
 
-static health_handler_t reg_handler;
-int mlx5_register_health_report_handler(health_handler_t handler)
-{
-       spin_lock_irq(&health_lock);
-       if (reg_handler) {
-               spin_unlock_irq(&health_lock);
-               return -EEXIST;
-       }
-       reg_handler = handler;
-       spin_unlock_irq(&health_lock);
-
-       return 0;
-}
-EXPORT_SYMBOL(mlx5_register_health_report_handler);
-
-void mlx5_unregister_health_report_handler(void)
-{
-       spin_lock_irq(&health_lock);
-       reg_handler = NULL;
-       spin_unlock_irq(&health_lock);
-}
-EXPORT_SYMBOL(mlx5_unregister_health_report_handler);
-
 static void health_care(struct work_struct *work)
 {
        struct mlx5_core_health *health, *n;
@@ -98,11 +74,8 @@ static void health_care(struct work_struct *work)
                priv = container_of(health, struct mlx5_priv, health);
                dev = container_of(priv, struct mlx5_core_dev, priv);
                mlx5_core_warn(dev, "handling bad device here\n");
+               /* nothing yet */
                spin_lock_irq(&health_lock);
-               if (reg_handler)
-                       reg_handler(dev->pdev, health->health,
-                                   sizeof(health->health));
-
                list_del_init(&health->list);
                spin_unlock_irq(&health_lock);
        }
index 4a3e137931a38b898468061ae8334016e4e2f23e..3a2408d448203623754d0aa87e35c16e809da43f 100644 (file)
@@ -43,10 +43,16 @@ enum {
        MLX5_PAGES_TAKE         = 2
 };
 
+enum {
+       MLX5_BOOT_PAGES         = 1,
+       MLX5_INIT_PAGES         = 2,
+       MLX5_POST_INIT_PAGES    = 3
+};
+
 struct mlx5_pages_req {
        struct mlx5_core_dev *dev;
        u32     func_id;
-       s16     npages;
+       s32     npages;
        struct work_struct work;
 };
 
@@ -64,27 +70,23 @@ struct mlx5_query_pages_inbox {
 
 struct mlx5_query_pages_outbox {
        struct mlx5_outbox_hdr  hdr;
-       __be16                  num_boot_pages;
+       __be16                  rsvd;
        __be16                  func_id;
-       __be16                  init_pages;
-       __be16                  num_pages;
+       __be32                  num_pages;
 };
 
 struct mlx5_manage_pages_inbox {
        struct mlx5_inbox_hdr   hdr;
-       __be16                  rsvd0;
+       __be16                  rsvd;
        __be16                  func_id;
-       __be16                  rsvd1;
-       __be16                  num_entries;
-       u8                      rsvd2[16];
+       __be32                  num_entries;
        __be64                  pas[0];
 };
 
 struct mlx5_manage_pages_outbox {
        struct mlx5_outbox_hdr  hdr;
-       u8                      rsvd0[2];
-       __be16                  num_entries;
-       u8                      rsvd1[20];
+       __be32                  num_entries;
+       u8                      rsvd[4];
        __be64                  pas[0];
 };
 
@@ -146,7 +148,7 @@ static struct page *remove_page(struct mlx5_core_dev *dev, u64 addr)
 }
 
 static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id,
-                               s16 *pages, s16 *init_pages, u16 *boot_pages)
+                               s32 *npages, int boot)
 {
        struct mlx5_query_pages_inbox   in;
        struct mlx5_query_pages_outbox  out;
@@ -155,6 +157,8 @@ static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id,
        memset(&in, 0, sizeof(in));
        memset(&out, 0, sizeof(out));
        in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_PAGES);
+       in.hdr.opmod = boot ? cpu_to_be16(MLX5_BOOT_PAGES) : cpu_to_be16(MLX5_INIT_PAGES);
+
        err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
        if (err)
                return err;
@@ -162,15 +166,7 @@ static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id,
        if (out.hdr.status)
                return mlx5_cmd_status_to_err(&out.hdr);
 
-       if (pages)
-               *pages = be16_to_cpu(out.num_pages);
-
-       if (init_pages)
-               *init_pages = be16_to_cpu(out.init_pages);
-
-       if (boot_pages)
-               *boot_pages = be16_to_cpu(out.num_boot_pages);
-
+       *npages = be32_to_cpu(out.num_pages);
        *func_id = be16_to_cpu(out.func_id);
 
        return err;
@@ -224,7 +220,7 @@ static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
        in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES);
        in->hdr.opmod = cpu_to_be16(MLX5_PAGES_GIVE);
        in->func_id = cpu_to_be16(func_id);
-       in->num_entries = cpu_to_be16(npages);
+       in->num_entries = cpu_to_be32(npages);
        err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
        mlx5_core_dbg(dev, "err %d\n", err);
        if (err) {
@@ -292,7 +288,7 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
        in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES);
        in.hdr.opmod = cpu_to_be16(MLX5_PAGES_TAKE);
        in.func_id = cpu_to_be16(func_id);
-       in.num_entries = cpu_to_be16(npages);
+       in.num_entries = cpu_to_be32(npages);
        mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen);
        err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen);
        if (err) {
@@ -306,7 +302,7 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
                goto out_free;
        }
 
-       num_claimed = be16_to_cpu(out->num_entries);
+       num_claimed = be32_to_cpu(out->num_entries);
        if (nclaimed)
                *nclaimed = num_claimed;
 
@@ -345,7 +341,7 @@ static void pages_work_handler(struct work_struct *work)
 }
 
 void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
-                                s16 npages)
+                                s32 npages)
 {
        struct mlx5_pages_req *req;
 
@@ -364,20 +360,18 @@ void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
 
 int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot)
 {
-       u16 uninitialized_var(boot_pages);
-       s16 uninitialized_var(init_pages);
        u16 uninitialized_var(func_id);
+       s32 uninitialized_var(npages);
        int err;
 
-       err = mlx5_cmd_query_pages(dev, &func_id, NULL, &init_pages,
-                                  &boot_pages);
+       err = mlx5_cmd_query_pages(dev, &func_id, &npages, boot);
        if (err)
                return err;
 
+       mlx5_core_dbg(dev, "requested %d %s pages for func_id 0x%x\n",
+                     npages, boot ? "boot" : "init", func_id);
 
-       mlx5_core_dbg(dev, "requested %d init pages and %d boot pages for func_id 0x%x\n",
-                     init_pages, boot_pages, func_id);
-       return give_pages(dev, func_id, boot ? boot_pages : init_pages, 0);
+       return give_pages(dev, func_id, npages, 0);
 }
 
 static int optimal_reclaimed_pages(void)
index 92da9980a0a0a2b86ed2189313c7105ad693354a..9d4bb7f839041967eaed1ef6fd0fd51b40f6ec6d 100644 (file)
@@ -3266,6 +3266,11 @@ int qlcnic_83xx_interrupt_test(struct net_device *netdev)
        u8 val;
        int ret, max_sds_rings = adapter->max_sds_rings;
 
+       if (test_bit(__QLCNIC_RESETTING, &adapter->state)) {
+               netdev_info(netdev, "Device is resetting\n");
+               return -EBUSY;
+       }
+
        if (qlcnic_get_diag_lock(adapter)) {
                netdev_info(netdev, "Device in diagnostics mode\n");
                return -EBUSY;
index 9f4b8d5f08657d04fff6f0996b46313a1eea75dd..345d987aede491b8d5daf0e1e268b0ed8a9f2a51 100644 (file)
@@ -629,7 +629,8 @@ int qlcnic_83xx_idc_reattach_driver(struct qlcnic_adapter *adapter)
                return -EIO;
        }
 
-       qlcnic_set_drv_version(adapter);
+       if (adapter->portnum == 0)
+               qlcnic_set_drv_version(adapter);
        qlcnic_83xx_idc_attach_driver(adapter);
 
        return 0;
index ee013fcc33220f4d7bfdf8e60600accaefab77cd..bc05d016c85943834b687e0b2221495c88ea7190 100644 (file)
@@ -2165,7 +2165,8 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (err)
                goto err_out_disable_mbx_intr;
 
-       qlcnic_set_drv_version(adapter);
+       if (adapter->portnum == 0)
+               qlcnic_set_drv_version(adapter);
 
        pci_set_drvdata(pdev, adapter);
 
@@ -3085,7 +3086,8 @@ done:
        adapter->fw_fail_cnt = 0;
        adapter->flags &= ~QLCNIC_FW_HANG;
        clear_bit(__QLCNIC_RESETTING, &adapter->state);
-       qlcnic_set_drv_version(adapter);
+       if (adapter->portnum == 0)
+               qlcnic_set_drv_version(adapter);
 
        if (!qlcnic_clr_drv_state(adapter))
                qlcnic_schedule_work(adapter, qlcnic_fw_poll_work,
index 10ed82b3baca0970f65ce9797a18fc35cf76b1e3..660c3f5b22377a956c0caed68e59e6761d282239 100644 (file)
@@ -170,9 +170,9 @@ static int qlcnic_82xx_store_beacon(struct qlcnic_adapter *adapter,
 
        if (ahw->extra_capability[0] & QLCNIC_FW_CAPABILITY_2_BEACON) {
                err = qlcnic_get_beacon_state(adapter, &h_beacon_state);
-               if (!err) {
-                       dev_info(&adapter->pdev->dev,
-                                "Failed to get current beacon state\n");
+               if (err) {
+                       netdev_err(adapter->netdev,
+                                  "Failed to get current beacon state\n");
                } else {
                        if (h_beacon_state == QLCNIC_BEACON_DISABLE)
                                ahw->beacon_state = 0;
index 6f35f8404d68adeffa84e9adbdbe4927176db74e..d2e591955bdde3fd03b40dac1c9c22e3d013c23d 100644 (file)
@@ -524,6 +524,7 @@ rx_status_loop:
                                         PCI_DMA_FROMDEVICE);
                if (dma_mapping_error(&cp->pdev->dev, new_mapping)) {
                        dev->stats.rx_dropped++;
+                       kfree_skb(new_skb);
                        goto rx_next;
                }
 
index c9d942a5c335d022a3eb8bd59ebe5345d655d557..1ef9d8a555aa33100d2103fd0d8e3a48f403a735 100644 (file)
@@ -33,10 +33,15 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
        struct stmmac_priv *priv = (struct stmmac_priv *)p;
        unsigned int txsize = priv->dma_tx_size;
        unsigned int entry = priv->cur_tx % txsize;
-       struct dma_desc *desc = priv->dma_tx + entry;
+       struct dma_desc *desc;
        unsigned int nopaged_len = skb_headlen(skb);
        unsigned int bmax, len;
 
+       if (priv->extend_desc)
+               desc = (struct dma_desc *)(priv->dma_etx + entry);
+       else
+               desc = priv->dma_tx + entry;
+
        if (priv->plat->enh_desc)
                bmax = BUF_SIZE_8KiB;
        else
@@ -54,7 +59,11 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
                                                STMMAC_RING_MODE);
                wmb();
                entry = (++priv->cur_tx) % txsize;
-               desc = priv->dma_tx + entry;
+
+               if (priv->extend_desc)
+                       desc = (struct dma_desc *)(priv->dma_etx + entry);
+               else
+                       desc = priv->dma_tx + entry;
 
                desc->des2 = dma_map_single(priv->device, skb->data + bmax,
                                            len, DMA_TO_DEVICE);
index f2ccb36e868590917a4cea295eb93dc20cb9ea8c..0a9bb9d30c3f0b78e28f9233e1775392d260aff4 100644 (file)
@@ -939,15 +939,20 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
 
        skb = __netdev_alloc_skb(priv->dev, priv->dma_buf_sz + NET_IP_ALIGN,
                                 GFP_KERNEL);
-       if (unlikely(skb == NULL)) {
+       if (!skb) {
                pr_err("%s: Rx init fails; skb is NULL\n", __func__);
-               return 1;
+               return -ENOMEM;
        }
        skb_reserve(skb, NET_IP_ALIGN);
        priv->rx_skbuff[i] = skb;
        priv->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
                                                priv->dma_buf_sz,
                                                DMA_FROM_DEVICE);
+       if (dma_mapping_error(priv->device, priv->rx_skbuff_dma[i])) {
+               pr_err("%s: DMA mapping error\n", __func__);
+               dev_kfree_skb_any(skb);
+               return -EINVAL;
+       }
 
        p->des2 = priv->rx_skbuff_dma[i];
 
@@ -958,6 +963,16 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
        return 0;
 }
 
+static void stmmac_free_rx_buffers(struct stmmac_priv *priv, int i)
+{
+       if (priv->rx_skbuff[i]) {
+               dma_unmap_single(priv->device, priv->rx_skbuff_dma[i],
+                                priv->dma_buf_sz, DMA_FROM_DEVICE);
+               dev_kfree_skb_any(priv->rx_skbuff[i]);
+       }
+       priv->rx_skbuff[i] = NULL;
+}
+
 /**
  * init_dma_desc_rings - init the RX/TX descriptor rings
  * @dev: net device structure
@@ -965,13 +980,14 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
  * and allocates the socket buffers. It suppors the chained and ring
  * modes.
  */
-static void init_dma_desc_rings(struct net_device *dev)
+static int init_dma_desc_rings(struct net_device *dev)
 {
        int i;
        struct stmmac_priv *priv = netdev_priv(dev);
        unsigned int txsize = priv->dma_tx_size;
        unsigned int rxsize = priv->dma_rx_size;
        unsigned int bfsize = 0;
+       int ret = -ENOMEM;
 
        /* Set the max buffer size according to the DESC mode
         * and the MTU. Note that RING mode allows 16KiB bsize.
@@ -992,34 +1008,60 @@ static void init_dma_desc_rings(struct net_device *dev)
                                                          dma_extended_desc),
                                                   &priv->dma_rx_phy,
                                                   GFP_KERNEL);
+               if (!priv->dma_erx)
+                       goto err_dma;
+
                priv->dma_etx = dma_alloc_coherent(priv->device, txsize *
                                                   sizeof(struct
                                                          dma_extended_desc),
                                                   &priv->dma_tx_phy,
                                                   GFP_KERNEL);
-               if ((!priv->dma_erx) || (!priv->dma_etx))
-                       return;
+               if (!priv->dma_etx) {
+                       dma_free_coherent(priv->device, priv->dma_rx_size *
+                                       sizeof(struct dma_extended_desc),
+                                       priv->dma_erx, priv->dma_rx_phy);
+                       goto err_dma;
+               }
        } else {
                priv->dma_rx = dma_alloc_coherent(priv->device, rxsize *
                                                  sizeof(struct dma_desc),
                                                  &priv->dma_rx_phy,
                                                  GFP_KERNEL);
+               if (!priv->dma_rx)
+                       goto err_dma;
+
                priv->dma_tx = dma_alloc_coherent(priv->device, txsize *
                                                  sizeof(struct dma_desc),
                                                  &priv->dma_tx_phy,
                                                  GFP_KERNEL);
-               if ((!priv->dma_rx) || (!priv->dma_tx))
-                       return;
+               if (!priv->dma_tx) {
+                       dma_free_coherent(priv->device, priv->dma_rx_size *
+                                       sizeof(struct dma_desc),
+                                       priv->dma_rx, priv->dma_rx_phy);
+                       goto err_dma;
+               }
        }
 
        priv->rx_skbuff_dma = kmalloc_array(rxsize, sizeof(dma_addr_t),
                                            GFP_KERNEL);
+       if (!priv->rx_skbuff_dma)
+               goto err_rx_skbuff_dma;
+
        priv->rx_skbuff = kmalloc_array(rxsize, sizeof(struct sk_buff *),
                                        GFP_KERNEL);
+       if (!priv->rx_skbuff)
+               goto err_rx_skbuff;
+
        priv->tx_skbuff_dma = kmalloc_array(txsize, sizeof(dma_addr_t),
                                            GFP_KERNEL);
+       if (!priv->tx_skbuff_dma)
+               goto err_tx_skbuff_dma;
+
        priv->tx_skbuff = kmalloc_array(txsize, sizeof(struct sk_buff *),
                                        GFP_KERNEL);
+       if (!priv->tx_skbuff)
+               goto err_tx_skbuff;
+
        if (netif_msg_probe(priv)) {
                pr_debug("(%s) dma_rx_phy=0x%08x dma_tx_phy=0x%08x\n", __func__,
                         (u32) priv->dma_rx_phy, (u32) priv->dma_tx_phy);
@@ -1034,8 +1076,9 @@ static void init_dma_desc_rings(struct net_device *dev)
                else
                        p = priv->dma_rx + i;
 
-               if (stmmac_init_rx_buffers(priv, p, i))
-                       break;
+               ret = stmmac_init_rx_buffers(priv, p, i);
+               if (ret)
+                       goto err_init_rx_buffers;
 
                if (netif_msg_probe(priv))
                        pr_debug("[%p]\t[%p]\t[%x]\n", priv->rx_skbuff[i],
@@ -1081,20 +1124,44 @@ static void init_dma_desc_rings(struct net_device *dev)
 
        if (netif_msg_hw(priv))
                stmmac_display_rings(priv);
+
+       return 0;
+err_init_rx_buffers:
+       while (--i >= 0)
+               stmmac_free_rx_buffers(priv, i);
+       kfree(priv->tx_skbuff);
+err_tx_skbuff:
+       kfree(priv->tx_skbuff_dma);
+err_tx_skbuff_dma:
+       kfree(priv->rx_skbuff);
+err_rx_skbuff:
+       kfree(priv->rx_skbuff_dma);
+err_rx_skbuff_dma:
+       if (priv->extend_desc) {
+               dma_free_coherent(priv->device, priv->dma_tx_size *
+                                 sizeof(struct dma_extended_desc),
+                                 priv->dma_etx, priv->dma_tx_phy);
+               dma_free_coherent(priv->device, priv->dma_rx_size *
+                                 sizeof(struct dma_extended_desc),
+                                 priv->dma_erx, priv->dma_rx_phy);
+       } else {
+               dma_free_coherent(priv->device,
+                               priv->dma_tx_size * sizeof(struct dma_desc),
+                               priv->dma_tx, priv->dma_tx_phy);
+               dma_free_coherent(priv->device,
+                               priv->dma_rx_size * sizeof(struct dma_desc),
+                               priv->dma_rx, priv->dma_rx_phy);
+       }
+err_dma:
+       return ret;
 }
 
 static void dma_free_rx_skbufs(struct stmmac_priv *priv)
 {
        int i;
 
-       for (i = 0; i < priv->dma_rx_size; i++) {
-               if (priv->rx_skbuff[i]) {
-                       dma_unmap_single(priv->device, priv->rx_skbuff_dma[i],
-                                        priv->dma_buf_sz, DMA_FROM_DEVICE);
-                       dev_kfree_skb_any(priv->rx_skbuff[i]);
-               }
-               priv->rx_skbuff[i] = NULL;
-       }
+       for (i = 0; i < priv->dma_rx_size; i++)
+               stmmac_free_rx_buffers(priv, i);
 }
 
 static void dma_free_tx_skbufs(struct stmmac_priv *priv)
@@ -1560,12 +1627,17 @@ static int stmmac_open(struct net_device *dev)
        priv->dma_tx_size = STMMAC_ALIGN(dma_txsize);
        priv->dma_rx_size = STMMAC_ALIGN(dma_rxsize);
        priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
-       init_dma_desc_rings(dev);
+
+       ret = init_dma_desc_rings(dev);
+       if (ret < 0) {
+               pr_err("%s: DMA descriptors initialization failed\n", __func__);
+               goto dma_desc_error;
+       }
 
        /* DMA initialization and SW reset */
        ret = stmmac_init_dma_engine(priv);
        if (ret < 0) {
-               pr_err("%s: DMA initialization failed\n", __func__);
+               pr_err("%s: DMA engine initialization failed\n", __func__);
                goto init_error;
        }
 
@@ -1672,6 +1744,7 @@ wolirq_error:
 
 init_error:
        free_dma_desc_resources(priv);
+dma_desc_error:
        if (priv->phydev)
                phy_disconnect(priv->phydev);
 phy_error:
index 1d6dc41f755dba00edfbc83d07c1d898c05020cf..d01cacf8a7c279ee892b703715e84707214c186e 100644 (file)
@@ -2100,7 +2100,7 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx)
 
                __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
        }
-       netif_rx(skb);
+       netif_receive_skb(skb);
 
        stats->rx_bytes += pkt_len;
        stats->rx_packets++;
@@ -2884,6 +2884,7 @@ out:
        return ret;
 
 err_iounmap:
+       netif_napi_del(&vptr->napi);
        iounmap(regs);
 err_free_dev:
        free_netdev(netdev);
@@ -2904,6 +2905,7 @@ static int velocity_remove(struct device *dev)
        struct velocity_info *vptr = netdev_priv(netdev);
 
        unregister_netdev(netdev);
+       netif_napi_del(&vptr->napi);
        iounmap(vptr->mac_regs);
        free_netdev(netdev);
        velocity_nics--;
index d0f9c2fd1d4fdaa06110d394411f48bb5f8874bb..16b43bf544b74dd3ee72599f56f429ca2dea8851 100644 (file)
@@ -739,6 +739,10 @@ static int macvlan_validate(struct nlattr *tb[], struct nlattr *data[])
                        return -EADDRNOTAVAIL;
        }
 
+       if (data && data[IFLA_MACVLAN_FLAGS] &&
+           nla_get_u16(data[IFLA_MACVLAN_FLAGS]) & ~MACVLAN_FLAG_NOPROMISC)
+               return -EINVAL;
+
        if (data && data[IFLA_MACVLAN_MODE]) {
                switch (nla_get_u32(data[IFLA_MACVLAN_MODE])) {
                case MACVLAN_MODE_PRIVATE:
index a98fb0ed6aef7b9be00c4ed21ecc1b79793f9459..b51db2abfe442cd95fdbcb97e17aec702ec0d2c5 100644 (file)
@@ -818,10 +818,13 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
                skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
                skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
        }
-       if (vlan)
+       if (vlan) {
+               local_bh_disable();
                macvlan_start_xmit(skb, vlan->dev);
-       else
+               local_bh_enable();
+       } else {
                kfree_skb(skb);
+       }
        rcu_read_unlock();
 
        return total_len;
@@ -912,8 +915,11 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q,
 done:
        rcu_read_lock();
        vlan = rcu_dereference(q->vlan);
-       if (vlan)
+       if (vlan) {
+               preempt_disable();
                macvlan_count_rx(vlan, copied - vnet_hdr_len, ret == 0, 0);
+               preempt_enable();
+       }
        rcu_read_unlock();
 
        return ret ? ret : copied;
index db690a372260786b395186dffb38ea24e58d45ee..71af122edf2d639c0a87099d83998a0706bfd433 100644 (file)
@@ -1074,8 +1074,9 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
        u32 rxhash;
 
        if (!(tun->flags & TUN_NO_PI)) {
-               if ((len -= sizeof(pi)) > total_len)
+               if (len < sizeof(pi))
                        return -EINVAL;
+               len -= sizeof(pi);
 
                if (memcpy_fromiovecend((void *)&pi, iv, 0, sizeof(pi)))
                        return -EFAULT;
@@ -1083,8 +1084,9 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
        }
 
        if (tun->flags & TUN_VNET_HDR) {
-               if ((len -= tun->vnet_hdr_sz) > total_len)
+               if (len < tun->vnet_hdr_sz)
                        return -EINVAL;
+               len -= tun->vnet_hdr_sz;
 
                if (memcpy_fromiovecend((void *)&gso, iv, offset, sizeof(gso)))
                        return -EFAULT;
index f4c6db419ddb3b56a9ecede0b62988c33e1d7c29..767f7af3bd40385ae18823ab579fff1709f79d7b 100644 (file)
@@ -1386,7 +1386,7 @@ static int vxlan_open(struct net_device *dev)
                return -ENOTCONN;
 
        if (IN_MULTICAST(ntohl(vxlan->default_dst.remote_ip)) &&
-           vxlan_group_used(vn, vxlan->default_dst.remote_ip)) {
+           vxlan_group_used(vn, vxlan->default_dst.remote_ip)) {
                vxlan_sock_hold(vs);
                dev_hold(dev);
                queue_work(vxlan_wq, &vxlan->igmp_join);
@@ -1793,8 +1793,6 @@ static void vxlan_dellink(struct net_device *dev, struct list_head *head)
        struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
        struct vxlan_dev *vxlan = netdev_priv(dev);
 
-       flush_workqueue(vxlan_wq);
-
        spin_lock(&vn->sock_lock);
        hlist_del_rcu(&vxlan->hlist);
        spin_unlock(&vn->sock_lock);
index 7365674366f4426fb79b74cadb4be87bbd07b7a1..010b252be584237c7f6b4e71f5d375332176f586 100644 (file)
@@ -1406,11 +1406,8 @@ static void cw1200_do_unjoin(struct cw1200_common *priv)
        if (!priv->join_status)
                goto done;
 
-       if (priv->join_status > CW1200_JOIN_STATUS_IBSS) {
-               wiphy_err(priv->hw->wiphy, "Unexpected: join status: %d\n",
-                         priv->join_status);
-               BUG_ON(1);
-       }
+       if (priv->join_status == CW1200_JOIN_STATUS_AP)
+               goto done;
 
        cancel_work_sync(&priv->update_filtering_work);
        cancel_work_sync(&priv->set_beacon_wakeup_period_work);
index b9b2bb51e60590ab7dfb91d4e284a6a0c6c259a7..f2ed62e373408d3882a3e463af81b88f3a83b7b1 100644 (file)
@@ -4460,12 +4460,12 @@ il4965_irq_tasklet(struct il_priv *il)
                 * is killed. Hence update the killswitch state here. The
                 * rfkill handler will care about restarting if needed.
                 */
-               if (!test_bit(S_ALIVE, &il->status)) {
-                       if (hw_rf_kill)
-                               set_bit(S_RFKILL, &il->status);
-                       else
-                               clear_bit(S_RFKILL, &il->status);
+               if (hw_rf_kill) {
+                       set_bit(S_RFKILL, &il->status);
+               } else {
+                       clear_bit(S_RFKILL, &il->status);
                        wiphy_rfkill_set_hw_state(il->hw->wiphy, hw_rf_kill);
+                       il_force_reset(il, true);
                }
 
                handled |= CSR_INT_BIT_RF_KILL;
@@ -5334,6 +5334,9 @@ il4965_alive_start(struct il_priv *il)
 
        il->active_rate = RATES_MASK;
 
+       il_power_update_mode(il, true);
+       D_INFO("Updated power mode\n");
+
        if (il_is_associated(il)) {
                struct il_rxon_cmd *active_rxon =
                    (struct il_rxon_cmd *)&il->active;
@@ -5364,9 +5367,6 @@ il4965_alive_start(struct il_priv *il)
        D_INFO("ALIVE processing complete.\n");
        wake_up(&il->wait_command_queue);
 
-       il_power_update_mode(il, true);
-       D_INFO("Updated power mode\n");
-
        return;
 
 restart:
index 3195aad440ddf6317b1b9b34d2e2f57e773755c3..b03e22ef5462d929ea20ffad72a4a01ff067c4a4 100644 (file)
@@ -4660,6 +4660,7 @@ il_force_reset(struct il_priv *il, bool external)
 
        return 0;
 }
+EXPORT_SYMBOL(il_force_reset);
 
 int
 il_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
index 767fee2ab340efb2d5ce647637053028c38b4aec..26019531db15a2f481dc87da6e6c76e085e1a06c 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/init.h>
 #include <linux/platform_device.h>
 #include <linux/interrupt.h>
+#include <linux/delay.h>
 #include <linux/rtc.h>
 #include <linux/slab.h>
 #include <linux/of_device.h>
@@ -119,24 +120,39 @@ static void stmp3xxx_wdt_register(struct platform_device *rtc_pdev)
 }
 #endif /* CONFIG_STMP3XXX_RTC_WATCHDOG */
 
-static void stmp3xxx_wait_time(struct stmp3xxx_rtc_data *rtc_data)
+static int stmp3xxx_wait_time(struct stmp3xxx_rtc_data *rtc_data)
 {
+       int timeout = 5000; /* 3ms according to i.MX28 Ref Manual */
        /*
-        * The datasheet doesn't say which way round the
-        * NEW_REGS/STALE_REGS bitfields go. In fact it's 0x1=P0,
-        * 0x2=P1, .., 0x20=P5, 0x40=ALARM, 0x80=SECONDS
+        * The i.MX28 Applications Processor Reference Manual, Rev. 1, 2010
+        * states:
+        * | The order in which registers are updated is
+        * | Persistent 0, 1, 2, 3, 4, 5, Alarm, Seconds.
+        * | (This list is in bitfield order, from LSB to MSB, as they would
+        * | appear in the STALE_REGS and NEW_REGS bitfields of the HW_RTC_STAT
+        * | register. For example, the Seconds register corresponds to
+        * | STALE_REGS or NEW_REGS containing 0x80.)
         */
-       while (readl(rtc_data->io + STMP3XXX_RTC_STAT) &
-                       (0x80 << STMP3XXX_RTC_STAT_STALE_SHIFT))
-               cpu_relax();
+       do {
+               if (!(readl(rtc_data->io + STMP3XXX_RTC_STAT) &
+                               (0x80 << STMP3XXX_RTC_STAT_STALE_SHIFT)))
+                       return 0;
+               udelay(1);
+       } while (--timeout > 0);
+       return (readl(rtc_data->io + STMP3XXX_RTC_STAT) &
+               (0x80 << STMP3XXX_RTC_STAT_STALE_SHIFT)) ? -ETIME : 0;
 }
 
 /* Time read/write */
 static int stmp3xxx_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm)
 {
+       int ret;
        struct stmp3xxx_rtc_data *rtc_data = dev_get_drvdata(dev);
 
-       stmp3xxx_wait_time(rtc_data);
+       ret = stmp3xxx_wait_time(rtc_data);
+       if (ret)
+               return ret;
+
        rtc_time_to_tm(readl(rtc_data->io + STMP3XXX_RTC_SECONDS), rtc_tm);
        return 0;
 }
@@ -146,8 +162,7 @@ static int stmp3xxx_rtc_set_mmss(struct device *dev, unsigned long t)
        struct stmp3xxx_rtc_data *rtc_data = dev_get_drvdata(dev);
 
        writel(t, rtc_data->io + STMP3XXX_RTC_SECONDS);
-       stmp3xxx_wait_time(rtc_data);
-       return 0;
+       return stmp3xxx_wait_time(rtc_data);
 }
 
 /* interrupt(s) handler */
index 45e57cc38200448da67b1ce2c896efff555f0e73..fc6f4f3a1a9d6c4c2f8b0c7afa1eb20d8029febf 100644 (file)
@@ -43,17 +43,18 @@ cifs_crypto_shash_md5_allocate(struct TCP_Server_Info *server)
        server->secmech.md5 = crypto_alloc_shash("md5", 0, 0);
        if (IS_ERR(server->secmech.md5)) {
                cifs_dbg(VFS, "could not allocate crypto md5\n");
-               return PTR_ERR(server->secmech.md5);
+               rc = PTR_ERR(server->secmech.md5);
+               server->secmech.md5 = NULL;
+               return rc;
        }
 
        size = sizeof(struct shash_desc) +
                        crypto_shash_descsize(server->secmech.md5);
        server->secmech.sdescmd5 = kmalloc(size, GFP_KERNEL);
        if (!server->secmech.sdescmd5) {
-               rc = -ENOMEM;
                crypto_free_shash(server->secmech.md5);
                server->secmech.md5 = NULL;
-               return rc;
+               return -ENOMEM;
        }
        server->secmech.sdescmd5->shash.tfm = server->secmech.md5;
        server->secmech.sdescmd5->shash.flags = 0x0;
@@ -421,7 +422,7 @@ find_domain_name(struct cifs_ses *ses, const struct nls_table *nls_cp)
                if (blobptr + attrsize > blobend)
                        break;
                if (type == NTLMSSP_AV_NB_DOMAIN_NAME) {
-                       if (!attrsize)
+                       if (!attrsize || attrsize >= CIFS_MAX_DOMAINNAME_LEN)
                                break;
                        if (!ses->domainName) {
                                ses->domainName =
@@ -591,6 +592,7 @@ CalcNTLMv2_response(const struct cifs_ses *ses, char *ntlmv2_hash)
 
 static int crypto_hmacmd5_alloc(struct TCP_Server_Info *server)
 {
+       int rc;
        unsigned int size;
 
        /* check if already allocated */
@@ -600,7 +602,9 @@ static int crypto_hmacmd5_alloc(struct TCP_Server_Info *server)
        server->secmech.hmacmd5 = crypto_alloc_shash("hmac(md5)", 0, 0);
        if (IS_ERR(server->secmech.hmacmd5)) {
                cifs_dbg(VFS, "could not allocate crypto hmacmd5\n");
-               return PTR_ERR(server->secmech.hmacmd5);
+               rc = PTR_ERR(server->secmech.hmacmd5);
+               server->secmech.hmacmd5 = NULL;
+               return rc;
        }
 
        size = sizeof(struct shash_desc) +
index 4bdd547dbf6fb3d10a2b2b962b710b722d0b9685..85ea98d139fc5643b0606b67959bb1f320037d80 100644 (file)
@@ -147,18 +147,17 @@ cifs_read_super(struct super_block *sb)
                goto out_no_root;
        }
 
+       if (cifs_sb_master_tcon(cifs_sb)->nocase)
+               sb->s_d_op = &cifs_ci_dentry_ops;
+       else
+               sb->s_d_op = &cifs_dentry_ops;
+
        sb->s_root = d_make_root(inode);
        if (!sb->s_root) {
                rc = -ENOMEM;
                goto out_no_root;
        }
 
-       /* do that *after* d_make_root() - we want NULL ->d_op for root here */
-       if (cifs_sb_master_tcon(cifs_sb)->nocase)
-               sb->s_d_op = &cifs_ci_dentry_ops;
-       else
-               sb->s_d_op = &cifs_dentry_ops;
-
 #ifdef CONFIG_CIFS_NFSD_EXPORT
        if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
                cifs_dbg(FYI, "export ops supported\n");
index 1fdc370410576e1a3f5a36a133245468160ae4b4..52ca861ed35e4fe3fbf78ec387ff401fb0e8fb94 100644 (file)
@@ -44,6 +44,7 @@
 #define MAX_TREE_SIZE (2 + MAX_SERVER_SIZE + 1 + MAX_SHARE_SIZE + 1)
 #define MAX_SERVER_SIZE 15
 #define MAX_SHARE_SIZE 80
+#define CIFS_MAX_DOMAINNAME_LEN 256 /* max domain name length */
 #define MAX_USERNAME_SIZE 256  /* reasonable maximum for current servers */
 #define MAX_PASSWORD_SIZE 512  /* max for windows seems to be 256 wide chars */
 
@@ -369,6 +370,9 @@ struct smb_version_operations {
        void (*generate_signingkey)(struct TCP_Server_Info *server);
        int (*calc_signature)(struct smb_rqst *rqst,
                                   struct TCP_Server_Info *server);
+       int (*query_mf_symlink)(const unsigned char *path, char *pbuf,
+                       unsigned int *pbytes_read, struct cifs_sb_info *cifs_sb,
+                       unsigned int xid);
 };
 
 struct smb_version_values {
index f7e584d047e202e0185f4936a71ef189e1a2781a..b29a012bed33a24b6ba45b17f95c70303e3e3014 100644 (file)
@@ -497,5 +497,7 @@ void cifs_writev_complete(struct work_struct *work);
 struct cifs_writedata *cifs_writedata_alloc(unsigned int nr_pages,
                                                work_func_t complete);
 void cifs_writedata_release(struct kref *refcount);
-
+int open_query_close_cifs_symlink(const unsigned char *path, char *pbuf,
+                       unsigned int *pbytes_read, struct cifs_sb_info *cifs_sb,
+                       unsigned int xid);
 #endif                 /* _CIFSPROTO_H */
index fa68813396b5acb7e7a2e5a65e1202d740688cbb..d67c550c49806254da76ca6f7dd32d29144c3c16 100644 (file)
@@ -1675,7 +1675,8 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
                        if (string == NULL)
                                goto out_nomem;
 
-                       if (strnlen(string, 256) == 256) {
+                       if (strnlen(string, CIFS_MAX_DOMAINNAME_LEN)
+                                       == CIFS_MAX_DOMAINNAME_LEN) {
                                printk(KERN_WARNING "CIFS: domain name too"
                                                    " long\n");
                                goto cifs_parse_mount_err;
@@ -2276,8 +2277,8 @@ cifs_put_smb_ses(struct cifs_ses *ses)
 
 #ifdef CONFIG_KEYS
 
-/* strlen("cifs:a:") + INET6_ADDRSTRLEN + 1 */
-#define CIFSCREDS_DESC_SIZE (7 + INET6_ADDRSTRLEN + 1)
+/* strlen("cifs:a:") + CIFS_MAX_DOMAINNAME_LEN + 1 */
+#define CIFSCREDS_DESC_SIZE (7 + CIFS_MAX_DOMAINNAME_LEN + 1)
 
 /* Populate username and pw fields from keyring if possible */
 static int
index 1e57f36ea1b2f84ac43c486cf6aa9cff4b2280a0..7e36ae34e9479b2614d61c919d393291593ef28f 100644 (file)
@@ -647,6 +647,7 @@ cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
                                     oflags, &oplock, &cfile->fid.netfid, xid);
                if (rc == 0) {
                        cifs_dbg(FYI, "posix reopen succeeded\n");
+                       oparms.reconnect = true;
                        goto reopen_success;
                }
                /*
index b83c3f5646bde6e21e80806541a00987198a5ce1..562044f700e56bf27997bf7ef7490bddf6bc1c91 100644 (file)
@@ -305,67 +305,89 @@ CIFSCouldBeMFSymlink(const struct cifs_fattr *fattr)
 }
 
 int
-CIFSCheckMFSymlink(struct cifs_fattr *fattr,
-                  const unsigned char *path,
-                  struct cifs_sb_info *cifs_sb, unsigned int xid)
+open_query_close_cifs_symlink(const unsigned char *path, char *pbuf,
+                       unsigned int *pbytes_read, struct cifs_sb_info *cifs_sb,
+                       unsigned int xid)
 {
        int rc;
        int oplock = 0;
        __u16 netfid = 0;
        struct tcon_link *tlink;
-       struct cifs_tcon *pTcon;
+       struct cifs_tcon *ptcon;
        struct cifs_io_parms io_parms;
-       u8 *buf;
-       char *pbuf;
-       unsigned int bytes_read = 0;
        int buf_type = CIFS_NO_BUFFER;
-       unsigned int link_len = 0;
        FILE_ALL_INFO file_info;
 
-       if (!CIFSCouldBeMFSymlink(fattr))
-               /* it's not a symlink */
-               return 0;
-
        tlink = cifs_sb_tlink(cifs_sb);
        if (IS_ERR(tlink))
                return PTR_ERR(tlink);
-       pTcon = tlink_tcon(tlink);
+       ptcon = tlink_tcon(tlink);
 
-       rc = CIFSSMBOpen(xid, pTcon, path, FILE_OPEN, GENERIC_READ,
+       rc = CIFSSMBOpen(xid, ptcon, path, FILE_OPEN, GENERIC_READ,
                         CREATE_NOT_DIR, &netfid, &oplock, &file_info,
                         cifs_sb->local_nls,
                         cifs_sb->mnt_cifs_flags &
                                CIFS_MOUNT_MAP_SPECIAL_CHR);
-       if (rc != 0)
-               goto out;
+       if (rc != 0) {
+               cifs_put_tlink(tlink);
+               return rc;
+       }
 
        if (file_info.EndOfFile != cpu_to_le64(CIFS_MF_SYMLINK_FILE_SIZE)) {
-               CIFSSMBClose(xid, pTcon, netfid);
+               CIFSSMBClose(xid, ptcon, netfid);
+               cifs_put_tlink(tlink);
                /* it's not a symlink */
-               goto out;
+               return rc;
        }
 
-       buf = kmalloc(CIFS_MF_SYMLINK_FILE_SIZE, GFP_KERNEL);
-       if (!buf) {
-               rc = -ENOMEM;
-               goto out;
-       }
-       pbuf = buf;
        io_parms.netfid = netfid;
        io_parms.pid = current->tgid;
-       io_parms.tcon = pTcon;
+       io_parms.tcon = ptcon;
        io_parms.offset = 0;
        io_parms.length = CIFS_MF_SYMLINK_FILE_SIZE;
 
-       rc = CIFSSMBRead(xid, &io_parms, &bytes_read, &pbuf, &buf_type);
-       CIFSSMBClose(xid, pTcon, netfid);
-       if (rc != 0) {
-               kfree(buf);
+       rc = CIFSSMBRead(xid, &io_parms, pbytes_read, &pbuf, &buf_type);
+       CIFSSMBClose(xid, ptcon, netfid);
+       cifs_put_tlink(tlink);
+       return rc;
+}
+
+
+int
+CIFSCheckMFSymlink(struct cifs_fattr *fattr,
+                  const unsigned char *path,
+                  struct cifs_sb_info *cifs_sb, unsigned int xid)
+{
+       int rc = 0;
+       u8 *buf = NULL;
+       unsigned int link_len = 0;
+       unsigned int bytes_read = 0;
+       struct cifs_tcon *ptcon;
+
+       if (!CIFSCouldBeMFSymlink(fattr))
+               /* it's not a symlink */
+               return 0;
+
+       buf = kmalloc(CIFS_MF_SYMLINK_FILE_SIZE, GFP_KERNEL);
+       if (!buf) {
+               rc = -ENOMEM;
                goto out;
        }
 
+       ptcon = tlink_tcon(cifs_sb_tlink(cifs_sb));
+       if ((ptcon->ses) && (ptcon->ses->server->ops->query_mf_symlink))
+               rc = ptcon->ses->server->ops->query_mf_symlink(path, buf,
+                                                &bytes_read, cifs_sb, xid);
+       else
+               goto out;
+
+       if (rc != 0)
+               goto out;
+
+       if (bytes_read == 0) /* not a symlink */
+               goto out;
+
        rc = CIFSParseMFSymlink(buf, bytes_read, &link_len, NULL);
-       kfree(buf);
        if (rc == -EINVAL) {
                /* it's not a symlink */
                rc = 0;
@@ -381,7 +403,7 @@ CIFSCheckMFSymlink(struct cifs_fattr *fattr,
        fattr->cf_mode |= S_IFLNK | S_IRWXU | S_IRWXG | S_IRWXO;
        fattr->cf_dtype = DT_LNK;
 out:
-       cifs_put_tlink(tlink);
+       kfree(buf);
        return rc;
 }
 
index ab877846939499c18cb454c83ed26c43dc60040c..69d2c826a23badc552bb686b518beaea297b473c 100644 (file)
@@ -111,6 +111,14 @@ cifs_prime_dcache(struct dentry *parent, struct qstr *name,
                        return;
        }
 
+       /*
+        * If we know that the inode will need to be revalidated immediately,
+        * then don't create a new dentry for it. We'll end up doing an on
+        * the wire call either way and this spares us an invalidation.
+        */
+       if (fattr->cf_flags & CIFS_FATTR_NEED_REVAL)
+               return;
+
        dentry = d_alloc(parent, name);
        if (!dentry)
                return;
index 79358e341fd2ea63cff42e6d608c58f65b5f9804..08dd37bb23aac8ea04fe979743f8c963990a80f7 100644 (file)
@@ -197,7 +197,7 @@ static void unicode_domain_string(char **pbcc_area, struct cifs_ses *ses,
                bytes_ret = 0;
        } else
                bytes_ret = cifs_strtoUTF16((__le16 *) bcc_ptr, ses->domainName,
-                                           256, nls_cp);
+                                           CIFS_MAX_DOMAINNAME_LEN, nls_cp);
        bcc_ptr += 2 * bytes_ret;
        bcc_ptr += 2;  /* account for null terminator */
 
@@ -255,8 +255,8 @@ static void ascii_ssetup_strings(char **pbcc_area, struct cifs_ses *ses,
 
        /* copy domain */
        if (ses->domainName != NULL) {
-               strncpy(bcc_ptr, ses->domainName, 256);
-               bcc_ptr += strnlen(ses->domainName, 256);
+               strncpy(bcc_ptr, ses->domainName, CIFS_MAX_DOMAINNAME_LEN);
+               bcc_ptr += strnlen(ses->domainName, CIFS_MAX_DOMAINNAME_LEN);
        } /* else we will send a null domain name
             so the server will default to its own domain */
        *bcc_ptr = 0;
index 6457690731a220b58fa2be3f55bd6bf430f27b8b..60943978aec35bb06360adb7e060802d5775bee1 100644 (file)
@@ -944,6 +944,7 @@ struct smb_version_operations smb1_operations = {
        .mand_lock = cifs_mand_lock,
        .mand_unlock_range = cifs_unlock_range,
        .push_mand_locks = cifs_push_mandatory_locks,
+       .query_mf_symlink = open_query_close_cifs_symlink,
 };
 
 struct smb_version_values smb1_values = {
index 301b191270b9bcf09edbea5ad47100267f32e859..4f2300d020c7e517a6fa89be5d955fa4b12753da 100644 (file)
@@ -42,6 +42,7 @@
 static int
 smb2_crypto_shash_allocate(struct TCP_Server_Info *server)
 {
+       int rc;
        unsigned int size;
 
        if (server->secmech.sdeschmacsha256 != NULL)
@@ -50,7 +51,9 @@ smb2_crypto_shash_allocate(struct TCP_Server_Info *server)
        server->secmech.hmacsha256 = crypto_alloc_shash("hmac(sha256)", 0, 0);
        if (IS_ERR(server->secmech.hmacsha256)) {
                cifs_dbg(VFS, "could not allocate crypto hmacsha256\n");
-               return PTR_ERR(server->secmech.hmacsha256);
+               rc = PTR_ERR(server->secmech.hmacsha256);
+               server->secmech.hmacsha256 = NULL;
+               return rc;
        }
 
        size = sizeof(struct shash_desc) +
@@ -87,7 +90,9 @@ smb3_crypto_shash_allocate(struct TCP_Server_Info *server)
                server->secmech.sdeschmacsha256 = NULL;
                crypto_free_shash(server->secmech.hmacsha256);
                server->secmech.hmacsha256 = NULL;
-               return PTR_ERR(server->secmech.cmacaes);
+               rc = PTR_ERR(server->secmech.cmacaes);
+               server->secmech.cmacaes = NULL;
+               return rc;
        }
 
        size = sizeof(struct shash_desc) +
index 9c73def87642fc8f36624b7fa525504541a59a04..fd774c7cb4831be8817799ed6cab355a368fa19f 100644 (file)
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -608,7 +608,7 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
                return -ENOMEM;
 
        lru_add_drain();
-       tlb_gather_mmu(&tlb, mm, 0);
+       tlb_gather_mmu(&tlb, mm, old_start, old_end);
        if (new_end > old_start) {
                /*
                 * when the old and new regions overlap clear from new_end.
@@ -625,7 +625,7 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
                free_pgd_range(&tlb, old_start, old_end, new_end,
                        vma->vm_next ? vma->vm_next->vm_start : USER_PGTABLES_CEILING);
        }
-       tlb_finish_mmu(&tlb, new_end, old_end);
+       tlb_finish_mmu(&tlb, old_start, old_end);
 
        /*
         * Shrink the vma to just the new range.  Always succeeds.
index 9491ac0590f746b9abe56e484e7646022a0bd1c3..c0427e2f66481a13f3018b4630901688fdd6646e 100644 (file)
@@ -77,8 +77,10 @@ static void swap_inode_data(struct inode *inode1, struct inode *inode2)
        memswap(ei1->i_data, ei2->i_data, sizeof(ei1->i_data));
        memswap(&ei1->i_flags, &ei2->i_flags, sizeof(ei1->i_flags));
        memswap(&ei1->i_disksize, &ei2->i_disksize, sizeof(ei1->i_disksize));
-       memswap(&ei1->i_es_tree, &ei2->i_es_tree, sizeof(ei1->i_es_tree));
-       memswap(&ei1->i_es_lru_nr, &ei2->i_es_lru_nr, sizeof(ei1->i_es_lru_nr));
+       ext4_es_remove_extent(inode1, 0, EXT_MAX_BLOCKS);
+       ext4_es_remove_extent(inode2, 0, EXT_MAX_BLOCKS);
+       ext4_es_lru_del(inode1);
+       ext4_es_lru_del(inode2);
 
        isize = i_size_read(inode1);
        i_size_write(inode1, i_size_read(inode2));
index 36b141e420b7a3dbb2c2b9330e76cfb4b778ec4a..b59373b625e9bcacaf3a63e409b21b1ef6212dfa 100644 (file)
@@ -1359,7 +1359,7 @@ static const struct mount_opts {
        {Opt_delalloc, EXT4_MOUNT_DELALLOC,
         MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT},
        {Opt_nodelalloc, EXT4_MOUNT_DELALLOC,
-        MOPT_EXT4_ONLY | MOPT_CLEAR | MOPT_EXPLICIT},
+        MOPT_EXT4_ONLY | MOPT_CLEAR},
        {Opt_journal_checksum, EXT4_MOUNT_JOURNAL_CHECKSUM,
         MOPT_EXT4_ONLY | MOPT_SET},
        {Opt_journal_async_commit, (EXT4_MOUNT_JOURNAL_ASYNC_COMMIT |
@@ -3483,7 +3483,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
                }
                if (test_opt(sb, DIOREAD_NOLOCK)) {
                        ext4_msg(sb, KERN_ERR, "can't mount with "
-                                "both data=journal and delalloc");
+                                "both data=journal and dioread_nolock");
                        goto failed_mount;
                }
                if (test_opt(sb, DELALLOC))
@@ -4727,6 +4727,21 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
                goto restore_opts;
        }
 
+       if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) {
+               if (test_opt2(sb, EXPLICIT_DELALLOC)) {
+                       ext4_msg(sb, KERN_ERR, "can't mount with "
+                                "both data=journal and delalloc");
+                       err = -EINVAL;
+                       goto restore_opts;
+               }
+               if (test_opt(sb, DIOREAD_NOLOCK)) {
+                       ext4_msg(sb, KERN_ERR, "can't mount with "
+                                "both data=journal and dioread_nolock");
+                       err = -EINVAL;
+                       goto restore_opts;
+               }
+       }
+
        if (sbi->s_mount_flags & EXT4_MF_FS_ABORTED)
                ext4_abort(sb, "Abort forced by user");
 
index a3f868ae3fd48f043f91a059ede274b20b431c3b..34423978b17083d73b038d360d72b47dc08c6693 100644 (file)
@@ -463,6 +463,14 @@ static struct inode *hugetlbfs_get_root(struct super_block *sb,
        return inode;
 }
 
+/*
+ * Hugetlbfs is not reclaimable; therefore its i_mmap_mutex will never
+ * be taken from reclaim -- unlike regular filesystems. This needs an
+ * annotation because huge_pmd_share() does an allocation under
+ * i_mmap_mutex.
+ */
+struct lock_class_key hugetlbfs_i_mmap_mutex_key;
+
 static struct inode *hugetlbfs_get_inode(struct super_block *sb,
                                        struct inode *dir,
                                        umode_t mode, dev_t dev)
@@ -474,6 +482,8 @@ static struct inode *hugetlbfs_get_inode(struct super_block *sb,
                struct hugetlbfs_inode_info *info;
                inode->i_ino = get_next_ino();
                inode_init_owner(inode, dir, mode);
+               lockdep_set_class(&inode->i_mapping->i_mmap_mutex,
+                               &hugetlbfs_i_mmap_mutex_key);
                inode->i_mapping->a_ops = &hugetlbfs_aops;
                inode->i_mapping->backing_dev_info =&hugetlbfs_backing_dev_info;
                inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
index 79736a28d84f911dd66acca86c844912284ad9fa..2abf97b2a592b7dafaad4e2cfc0f9a6f7442f475 100644 (file)
@@ -1757,7 +1757,7 @@ try_again:
                goto out;
        } else if (ret == 1) {
                clusters_need = wc->w_clen;
-               ret = ocfs2_refcount_cow(inode, filp, di_bh,
+               ret = ocfs2_refcount_cow(inode, di_bh,
                                         wc->w_cpos, wc->w_clen, UINT_MAX);
                if (ret) {
                        mlog_errno(ret);
index eb760d8acd500c96bd45195ce2a2c79fb0942ed5..30544ce8e9f78cc66cc0eb61387f84b21cef4309 100644 (file)
@@ -2153,11 +2153,9 @@ int ocfs2_empty_dir(struct inode *inode)
 {
        int ret;
        struct ocfs2_empty_dir_priv priv = {
-               .ctx.actor = ocfs2_empty_dir_filldir
+               .ctx.actor = ocfs2_empty_dir_filldir,
        };
 
-       memset(&priv, 0, sizeof(priv));
-
        if (ocfs2_dir_indexed(inode)) {
                ret = ocfs2_empty_dir_dx(inode, &priv);
                if (ret)
index 41000f223ca42bb855a57f1cc27f08a28ecccba9..3261d71319eeb27d3a569aeccf4f3f6e27a95496 100644 (file)
@@ -370,7 +370,7 @@ static int ocfs2_cow_file_pos(struct inode *inode,
        if (!(ext_flags & OCFS2_EXT_REFCOUNTED))
                goto out;
 
-       return ocfs2_refcount_cow(inode, NULL, fe_bh, cpos, 1, cpos+1);
+       return ocfs2_refcount_cow(inode, fe_bh, cpos, 1, cpos+1);
 
 out:
        return status;
@@ -899,7 +899,7 @@ static int ocfs2_zero_extend_get_range(struct inode *inode,
                zero_clusters = last_cpos - zero_cpos;
 
        if (needs_cow) {
-               rc = ocfs2_refcount_cow(inode, NULL, di_bh, zero_cpos,
+               rc = ocfs2_refcount_cow(inode, di_bh, zero_cpos,
                                        zero_clusters, UINT_MAX);
                if (rc) {
                        mlog_errno(rc);
@@ -2078,7 +2078,7 @@ static int ocfs2_prepare_inode_for_refcount(struct inode *inode,
 
        *meta_level = 1;
 
-       ret = ocfs2_refcount_cow(inode, file, di_bh, cpos, clusters, UINT_MAX);
+       ret = ocfs2_refcount_cow(inode, di_bh, cpos, clusters, UINT_MAX);
        if (ret)
                mlog_errno(ret);
 out:
index 96f9ac237e86dbc301339c07b882bbff508f9143..0a992737dcaf8e76d0b57b18dcb21b57d5ed6479 100644 (file)
@@ -537,7 +537,7 @@ static inline int ocfs2_calc_extend_credits(struct super_block *sb,
        extent_blocks = 1 + 1 + le16_to_cpu(root_el->l_tree_depth);
 
        return bitmap_blocks + sysfile_bitmap_blocks + extent_blocks +
-              ocfs2_quota_trans_credits(sb) + bits_wanted;
+              ocfs2_quota_trans_credits(sb);
 }
 
 static inline int ocfs2_calc_symlink_credits(struct super_block *sb)
index f1fc172175b6c47454cfbb4f10bfd239102e92a9..452068b45749a7545002543f0b0b450b52612dec 100644 (file)
@@ -69,7 +69,7 @@ static int __ocfs2_move_extent(handle_t *handle,
        u64 ino = ocfs2_metadata_cache_owner(context->et.et_ci);
        u64 old_blkno = ocfs2_clusters_to_blocks(inode->i_sb, p_cpos);
 
-       ret = ocfs2_duplicate_clusters_by_page(handle, context->file, cpos,
+       ret = ocfs2_duplicate_clusters_by_page(handle, inode, cpos,
                                               p_cpos, new_p_cpos, len);
        if (ret) {
                mlog_errno(ret);
index 9f6b96a09615bcc243b81ebc5cf3c9139c2deec3..a70d604593b61c0ffbce761a92b1207148f3b346 100644 (file)
@@ -49,7 +49,6 @@
 
 struct ocfs2_cow_context {
        struct inode *inode;
-       struct file *file;
        u32 cow_start;
        u32 cow_len;
        struct ocfs2_extent_tree data_et;
@@ -66,7 +65,7 @@ struct ocfs2_cow_context {
                            u32 *num_clusters,
                            unsigned int *extent_flags);
        int (*cow_duplicate_clusters)(handle_t *handle,
-                                     struct file *file,
+                                     struct inode *inode,
                                      u32 cpos, u32 old_cluster,
                                      u32 new_cluster, u32 new_len);
 };
@@ -2922,14 +2921,12 @@ static int ocfs2_clear_cow_buffer(handle_t *handle, struct buffer_head *bh)
 }
 
 int ocfs2_duplicate_clusters_by_page(handle_t *handle,
-                                    struct file *file,
+                                    struct inode *inode,
                                     u32 cpos, u32 old_cluster,
                                     u32 new_cluster, u32 new_len)
 {
        int ret = 0, partial;
-       struct inode *inode = file_inode(file);
-       struct ocfs2_caching_info *ci = INODE_CACHE(inode);
-       struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
+       struct super_block *sb = inode->i_sb;
        u64 new_block = ocfs2_clusters_to_blocks(sb, new_cluster);
        struct page *page;
        pgoff_t page_index;
@@ -2978,13 +2975,6 @@ int ocfs2_duplicate_clusters_by_page(handle_t *handle,
                if (PAGE_CACHE_SIZE <= OCFS2_SB(sb)->s_clustersize)
                        BUG_ON(PageDirty(page));
 
-               if (PageReadahead(page)) {
-                       page_cache_async_readahead(mapping,
-                                                  &file->f_ra, file,
-                                                  page, page_index,
-                                                  readahead_pages);
-               }
-
                if (!PageUptodate(page)) {
                        ret = block_read_full_page(page, ocfs2_get_block);
                        if (ret) {
@@ -3004,7 +2994,8 @@ int ocfs2_duplicate_clusters_by_page(handle_t *handle,
                        }
                }
 
-               ocfs2_map_and_dirty_page(inode, handle, from, to,
+               ocfs2_map_and_dirty_page(inode,
+                                        handle, from, to,
                                         page, 0, &new_block);
                mark_page_accessed(page);
 unlock:
@@ -3020,12 +3011,11 @@ unlock:
 }
 
 int ocfs2_duplicate_clusters_by_jbd(handle_t *handle,
-                                   struct file *file,
+                                   struct inode *inode,
                                    u32 cpos, u32 old_cluster,
                                    u32 new_cluster, u32 new_len)
 {
        int ret = 0;
-       struct inode *inode = file_inode(file);
        struct super_block *sb = inode->i_sb;
        struct ocfs2_caching_info *ci = INODE_CACHE(inode);
        int i, blocks = ocfs2_clusters_to_blocks(sb, new_len);
@@ -3150,7 +3140,7 @@ static int ocfs2_replace_clusters(handle_t *handle,
 
        /*If the old clusters is unwritten, no need to duplicate. */
        if (!(ext_flags & OCFS2_EXT_UNWRITTEN)) {
-               ret = context->cow_duplicate_clusters(handle, context->file,
+               ret = context->cow_duplicate_clusters(handle, context->inode,
                                                      cpos, old, new, len);
                if (ret) {
                        mlog_errno(ret);
@@ -3428,35 +3418,12 @@ static int ocfs2_replace_cow(struct ocfs2_cow_context *context)
        return ret;
 }
 
-static void ocfs2_readahead_for_cow(struct inode *inode,
-                                   struct file *file,
-                                   u32 start, u32 len)
-{
-       struct address_space *mapping;
-       pgoff_t index;
-       unsigned long num_pages;
-       int cs_bits = OCFS2_SB(inode->i_sb)->s_clustersize_bits;
-
-       if (!file)
-               return;
-
-       mapping = file->f_mapping;
-       num_pages = (len << cs_bits) >> PAGE_CACHE_SHIFT;
-       if (!num_pages)
-               num_pages = 1;
-
-       index = ((loff_t)start << cs_bits) >> PAGE_CACHE_SHIFT;
-       page_cache_sync_readahead(mapping, &file->f_ra, file,
-                                 index, num_pages);
-}
-
 /*
  * Starting at cpos, try to CoW write_len clusters.  Don't CoW
  * past max_cpos.  This will stop when it runs into a hole or an
  * unrefcounted extent.
  */
 static int ocfs2_refcount_cow_hunk(struct inode *inode,
-                                  struct file *file,
                                   struct buffer_head *di_bh,
                                   u32 cpos, u32 write_len, u32 max_cpos)
 {
@@ -3485,8 +3452,6 @@ static int ocfs2_refcount_cow_hunk(struct inode *inode,
 
        BUG_ON(cow_len == 0);
 
-       ocfs2_readahead_for_cow(inode, file, cow_start, cow_len);
-
        context = kzalloc(sizeof(struct ocfs2_cow_context), GFP_NOFS);
        if (!context) {
                ret = -ENOMEM;
@@ -3508,7 +3473,6 @@ static int ocfs2_refcount_cow_hunk(struct inode *inode,
        context->ref_root_bh = ref_root_bh;
        context->cow_duplicate_clusters = ocfs2_duplicate_clusters_by_page;
        context->get_clusters = ocfs2_di_get_clusters;
-       context->file = file;
 
        ocfs2_init_dinode_extent_tree(&context->data_et,
                                      INODE_CACHE(inode), di_bh);
@@ -3537,7 +3501,6 @@ out:
  * clusters between cpos and cpos+write_len are safe to modify.
  */
 int ocfs2_refcount_cow(struct inode *inode,
-                      struct file *file,
                       struct buffer_head *di_bh,
                       u32 cpos, u32 write_len, u32 max_cpos)
 {
@@ -3557,7 +3520,7 @@ int ocfs2_refcount_cow(struct inode *inode,
                        num_clusters = write_len;
 
                if (ext_flags & OCFS2_EXT_REFCOUNTED) {
-                       ret = ocfs2_refcount_cow_hunk(inode, file, di_bh, cpos,
+                       ret = ocfs2_refcount_cow_hunk(inode, di_bh, cpos,
                                                      num_clusters, max_cpos);
                        if (ret) {
                                mlog_errno(ret);
index 7754608c83a47b1b44425c8f9c5e13a2adc65675..6422bbcdb52506b1631ce495ddf4524a4ae71c92 100644 (file)
@@ -53,7 +53,7 @@ int ocfs2_prepare_refcount_change_for_del(struct inode *inode,
                                          int *credits,
                                          int *ref_blocks);
 int ocfs2_refcount_cow(struct inode *inode,
-                      struct file *filep, struct buffer_head *di_bh,
+                      struct buffer_head *di_bh,
                       u32 cpos, u32 write_len, u32 max_cpos);
 
 typedef int (ocfs2_post_refcount_func)(struct inode *inode,
@@ -85,11 +85,11 @@ int ocfs2_refcount_cow_xattr(struct inode *inode,
                             u32 cpos, u32 write_len,
                             struct ocfs2_post_refcount *post);
 int ocfs2_duplicate_clusters_by_page(handle_t *handle,
-                                    struct file *file,
+                                    struct inode *inode,
                                     u32 cpos, u32 old_cluster,
                                     u32 new_cluster, u32 new_len);
 int ocfs2_duplicate_clusters_by_jbd(handle_t *handle,
-                                   struct file *file,
+                                   struct inode *inode,
                                    u32 cpos, u32 old_cluster,
                                    u32 new_cluster, u32 new_len);
 int ocfs2_cow_sync_writeback(struct super_block *sb,
index dbf61f6174f0f473d3d9a19311a09170f8dab9f3..107d026f5d6e0cbebc068fce65f201f167c425c5 100644 (file)
@@ -730,8 +730,16 @@ static inline void clear_soft_dirty(struct vm_area_struct *vma,
         * of how soft-dirty works.
         */
        pte_t ptent = *pte;
-       ptent = pte_wrprotect(ptent);
-       ptent = pte_clear_flags(ptent, _PAGE_SOFT_DIRTY);
+
+       if (pte_present(ptent)) {
+               ptent = pte_wrprotect(ptent);
+               ptent = pte_clear_flags(ptent, _PAGE_SOFT_DIRTY);
+       } else if (is_swap_pte(ptent)) {
+               ptent = pte_swp_clear_soft_dirty(ptent);
+       } else if (pte_file(ptent)) {
+               ptent = pte_file_clear_soft_dirty(ptent);
+       }
+
        set_pte_at(vma->vm_mm, addr, pte, ptent);
 #endif
 }
@@ -752,14 +760,15 @@ static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
        pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
        for (; addr != end; pte++, addr += PAGE_SIZE) {
                ptent = *pte;
-               if (!pte_present(ptent))
-                       continue;
 
                if (cp->type == CLEAR_REFS_SOFT_DIRTY) {
                        clear_soft_dirty(vma, addr, pte);
                        continue;
                }
 
+               if (!pte_present(ptent))
+                       continue;
+
                page = vm_normal_page(vma, addr, ptent);
                if (!page)
                        continue;
@@ -859,7 +868,7 @@ typedef struct {
 } pagemap_entry_t;
 
 struct pagemapread {
-       int pos, len;
+       int pos, len;           /* units: PM_ENTRY_BYTES, not bytes */
        pagemap_entry_t *buffer;
        bool v2;
 };
@@ -867,7 +876,7 @@ struct pagemapread {
 #define PAGEMAP_WALK_SIZE      (PMD_SIZE)
 #define PAGEMAP_WALK_MASK      (PMD_MASK)
 
-#define PM_ENTRY_BYTES      sizeof(u64)
+#define PM_ENTRY_BYTES      sizeof(pagemap_entry_t)
 #define PM_STATUS_BITS      3
 #define PM_STATUS_OFFSET    (64 - PM_STATUS_BITS)
 #define PM_STATUS_MASK      (((1LL << PM_STATUS_BITS) - 1) << PM_STATUS_OFFSET)
@@ -930,8 +939,10 @@ static void pte_to_pagemap_entry(pagemap_entry_t *pme, struct pagemapread *pm,
                flags = PM_PRESENT;
                page = vm_normal_page(vma, addr, pte);
        } else if (is_swap_pte(pte)) {
-               swp_entry_t entry = pte_to_swp_entry(pte);
-
+               swp_entry_t entry;
+               if (pte_swp_soft_dirty(pte))
+                       flags2 |= __PM_SOFT_DIRTY;
+               entry = pte_to_swp_entry(pte);
                frame = swp_type(entry) |
                        (swp_offset(entry) << MAX_SWAPFILES_SHIFT);
                flags = PM_SWAP;
@@ -1116,8 +1127,8 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
                goto out_task;
 
        pm.v2 = soft_dirty_cleared;
-       pm.len = PM_ENTRY_BYTES * (PAGEMAP_WALK_SIZE >> PAGE_SHIFT);
-       pm.buffer = kmalloc(pm.len, GFP_TEMPORARY);
+       pm.len = (PAGEMAP_WALK_SIZE >> PAGE_SHIFT);
+       pm.buffer = kmalloc(pm.len * PM_ENTRY_BYTES, GFP_TEMPORARY);
        ret = -ENOMEM;
        if (!pm.buffer)
                goto out_task;
index 2f47ade1b5678f325faea455656a9578839b8638..0807ddf97b058fb04b1eedfd82c34b76e4253335 100644 (file)
@@ -417,6 +417,36 @@ static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
 {
        return pmd;
 }
+
+static inline pte_t pte_swp_mksoft_dirty(pte_t pte)
+{
+       return pte;
+}
+
+static inline int pte_swp_soft_dirty(pte_t pte)
+{
+       return 0;
+}
+
+static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
+{
+       return pte;
+}
+
+static inline pte_t pte_file_clear_soft_dirty(pte_t pte)
+{
+       return pte;
+}
+
+static inline pte_t pte_file_mksoft_dirty(pte_t pte)
+{
+       return pte;
+}
+
+static inline int pte_file_soft_dirty(pte_t pte)
+{
+       return 0;
+}
 #endif
 
 #ifndef __HAVE_PFNMAP_TRACKING
index 13821c339a4151912b88f2e4de04cbf99653a612..5672d7ea1fa066175b33c8f4c19457fcdc0614ed 100644 (file)
@@ -112,7 +112,7 @@ struct mmu_gather {
 
 #define HAVE_GENERIC_MMU_GATHER
 
-void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm);
+void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end);
 void tlb_flush_mmu(struct mmu_gather *tlb);
 void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start,
                                                        unsigned long end);
index 737685e9e852e91fd93ccf142500c33773f42e71..68029b30c3dc89e2d2620fd41101b2ab05069344 100644 (file)
@@ -309,21 +309,20 @@ struct mlx5_hca_cap {
        __be16  max_desc_sz_rq;
        u8      rsvd21[2];
        __be16  max_desc_sz_sq_dc;
-       u8      rsvd22[4];
-       __be16  max_qp_mcg;
-       u8      rsvd23;
+       __be32  max_qp_mcg;
+       u8      rsvd22[3];
        u8      log_max_mcg;
-       u8      rsvd24;
+       u8      rsvd23;
        u8      log_max_pd;
-       u8      rsvd25;
+       u8      rsvd24;
        u8      log_max_xrcd;
-       u8      rsvd26[42];
+       u8      rsvd25[42];
        __be16  log_uar_page_sz;
-       u8      rsvd27[28];
+       u8      rsvd26[28];
        u8      log_msx_atomic_size_qp;
-       u8      rsvd28[2];
+       u8      rsvd27[2];
        u8      log_msx_atomic_size_dc;
-       u8      rsvd29[76];
+       u8      rsvd28[76];
 };
 
 
@@ -472,9 +471,8 @@ struct mlx5_eqe_cmd {
 struct mlx5_eqe_page_req {
        u8              rsvd0[2];
        __be16          func_id;
-       u8              rsvd1[2];
-       __be16          num_pages;
-       __be32          rsvd2[5];
+       __be32          num_pages;
+       __be32          rsvd1[5];
 };
 
 union ev_data {
index 2aa258b0ced1449b8281f6728d6f243d0c72d15f..8888381fc150b8f3f852077407ee8187a54cb7aa 100644 (file)
@@ -358,7 +358,7 @@ struct mlx5_caps {
        u32     reserved_lkey;
        u8      local_ca_ack_delay;
        u8      log_max_mcg;
-       u16     max_qp_mcg;
+       u32     max_qp_mcg;
        int     min_page_sz;
 };
 
@@ -691,7 +691,7 @@ void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev);
 int mlx5_pagealloc_start(struct mlx5_core_dev *dev);
 void mlx5_pagealloc_stop(struct mlx5_core_dev *dev);
 void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
-                                s16 npages);
+                                s32 npages);
 int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot);
 int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev);
 void mlx5_register_debugfs(void);
@@ -731,9 +731,6 @@ void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev);
 int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db);
 void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db);
 
-typedef void (*health_handler_t)(struct pci_dev *pdev, struct health_buffer __iomem *buf, int size);
-int mlx5_register_health_report_handler(health_handler_t handler);
-void mlx5_unregister_health_report_handler(void);
 const char *mlx5_command_str(int command);
 int mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev);
 void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev);
index d722490da030c5906d991b7c76f8dc178edadee8..e9995eb5985cd50b28aaae49f528487acf3f018c 100644 (file)
@@ -314,6 +314,7 @@ struct nsproxy;
 struct user_namespace;
 
 #ifdef CONFIG_MMU
+extern unsigned long mmap_legacy_base(void);
 extern void arch_pick_mmap_layout(struct mm_struct *mm);
 extern unsigned long
 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
@@ -1532,6 +1533,8 @@ static inline pid_t task_pgrp_nr(struct task_struct *tsk)
  * Test if a process is not yet dead (at most zombie state)
  * If pid_alive fails, then pointers within the task structure
  * can be stale and must not be dereferenced.
+ *
+ * Return: 1 if the process is alive. 0 otherwise.
  */
 static inline int pid_alive(struct task_struct *p)
 {
@@ -1543,6 +1546,8 @@ static inline int pid_alive(struct task_struct *p)
  * @tsk: Task structure to be checked.
  *
  * Check if a task structure is the first user space task the kernel created.
+ *
+ * Return: 1 if the task structure is init. 0 otherwise.
  */
 static inline int is_global_init(struct task_struct *tsk)
 {
@@ -1894,6 +1899,8 @@ extern struct task_struct *idle_task(int cpu);
 /**
  * is_idle_task - is the specified task an idle task?
  * @p: the task in question.
+ *
+ * Return: 1 if @p is an idle task. 0 otherwise.
  */
 static inline bool is_idle_task(const struct task_struct *p)
 {
index 7d537ced949aa83b33b4c20e32c991d2123ca5a5..75f34949d9ab998adf5941c0321ea45005cc7d62 100644 (file)
@@ -117,9 +117,17 @@ do {                                                               \
 #endif /*arch_spin_is_contended*/
 #endif
 
-/* The lock does not imply full memory barrier. */
-#ifndef ARCH_HAS_SMP_MB_AFTER_LOCK
-static inline void smp_mb__after_lock(void) { smp_mb(); }
+/*
+ * Despite its name it doesn't necessarily has to be a full barrier.
+ * It should only guarantee that a STORE before the critical section
+ * can not be reordered with a LOAD inside this section.
+ * spin_lock() is the one-way barrier, this LOAD can not escape out
+ * of the region. So the default implementation simply ensures that
+ * a STORE can not move into the critical section, smp_wmb() should
+ * serialize it with another STORE done by spin_lock().
+ */
+#ifndef smp_mb__before_spinlock
+#define smp_mb__before_spinlock()      smp_wmb()
 #endif
 
 /**
index c5fd30d2a415a48964eb1fb2aaaac0db255656e1..8d4fa82bfb913d0021d93c49e71dab940804fdf7 100644 (file)
@@ -67,6 +67,8 @@ static inline swp_entry_t pte_to_swp_entry(pte_t pte)
        swp_entry_t arch_entry;
 
        BUG_ON(pte_file(pte));
+       if (pte_swp_soft_dirty(pte))
+               pte = pte_swp_clear_soft_dirty(pte);
        arch_entry = __pte_to_swp_entry(pte);
        return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry));
 }
index 4147d700a293623b63615b987c797f14d380a7a1..84662ecc7b51468233175959ddd5c04bd0efac0d 100644 (file)
@@ -802,9 +802,14 @@ asmlinkage long sys_vfork(void);
 asmlinkage long sys_clone(unsigned long, unsigned long, int __user *, int,
               int __user *);
 #else
+#ifdef CONFIG_CLONE_BACKWARDS3
+asmlinkage long sys_clone(unsigned long, unsigned long, int, int __user *,
+                         int __user *, int);
+#else
 asmlinkage long sys_clone(unsigned long, unsigned long, int __user *,
               int __user *, int);
 #endif
+#endif
 
 asmlinkage long sys_execve(const char __user *filename,
                const char __user *const __user *argv,
index f18b91966d3de718528ae3d44887e937514718fc..8a358a2c97e6a82cb98dd6b9586133828306f961 100644 (file)
@@ -122,7 +122,7 @@ static inline bool sk_busy_loop(struct sock *sk, int nonblock)
                if (rc > 0)
                        /* local bh are disabled so it is ok to use _BH */
                        NET_ADD_STATS_BH(sock_net(sk),
-                                        LINUX_MIB_LOWLATENCYRXPACKETS, rc);
+                                        LINUX_MIB_BUSYPOLLRXPACKETS, rc);
 
        } while (!nonblock && skb_queue_empty(&sk->sk_receive_queue) &&
                 !need_resched() && !busy_loop_timeout(end_time));
@@ -162,11 +162,6 @@ static inline bool sk_can_busy_loop(struct sock *sk)
        return false;
 }
 
-static inline bool sk_busy_poll(struct sock *sk, int nonblock)
-{
-       return false;
-}
-
 static inline void skb_mark_napi_id(struct sk_buff *skb,
                                    struct napi_struct *napi)
 {
index 781b3cf86a2f534ff71e03037b7984a0e84a8c8e..a354db5b7662f1bce43af7edc97471cdef3421bb 100644 (file)
@@ -145,20 +145,6 @@ static inline u8 ip_tunnel_ecn_encap(u8 tos, const struct iphdr *iph,
        return INET_ECN_encapsulate(tos, inner);
 }
 
-static inline void tunnel_ip_select_ident(struct sk_buff *skb,
-                                         const struct iphdr  *old_iph,
-                                         struct dst_entry *dst)
-{
-       struct iphdr *iph = ip_hdr(skb);
-
-       /* Use inner packet iph-id if possible. */
-       if (skb->protocol == htons(ETH_P_IP) && old_iph->id)
-               iph->id = old_iph->id;
-       else
-               __ip_select_ident(iph, dst,
-                                 (skb_shinfo(skb)->gso_segs ?: 1) - 1);
-}
-
 int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto);
 int iptunnel_xmit(struct net *net, struct rtable *rt,
                  struct sk_buff *skb,
index 6eab63363e59fe82dc3fc6119633e5583b663dd1..e5ae0c50fa9c5c481f991de92efd5e862d5bca67 100644 (file)
@@ -683,13 +683,19 @@ struct psched_ratecfg {
        u64     rate_bytes_ps; /* bytes per second */
        u32     mult;
        u16     overhead;
+       u8      linklayer;
        u8      shift;
 };
 
 static inline u64 psched_l2t_ns(const struct psched_ratecfg *r,
                                unsigned int len)
 {
-       return ((u64)(len + r->overhead) * r->mult) >> r->shift;
+       len += r->overhead;
+
+       if (unlikely(r->linklayer == TC_LINKLAYER_ATM))
+               return ((u64)(DIV_ROUND_UP(len,48)*53) * r->mult) >> r->shift;
+
+       return ((u64)len * r->mult) >> r->shift;
 }
 
 extern void psched_ratecfg_precompute(struct psched_ratecfg *r, const struct tc_ratespec *conf);
@@ -700,6 +706,7 @@ static inline void psched_ratecfg_getrate(struct tc_ratespec *res,
        memset(res, 0, sizeof(*res));
        res->rate = r->rate_bytes_ps;
        res->overhead = r->overhead;
+       res->linklayer = (r->linklayer & TC_LINKLAYER_MASK);
 }
 
 #endif
index dbd71b0c7d8c93edbc80ae50ad7223db3665f00a..09d62b9228ffa061de1b5cb8ee80e20ef470db5c 100644 (file)
@@ -73,9 +73,17 @@ struct tc_estimator {
 #define TC_H_ROOT      (0xFFFFFFFFU)
 #define TC_H_INGRESS    (0xFFFFFFF1U)
 
+/* Need to corrospond to iproute2 tc/tc_core.h "enum link_layer" */
+enum tc_link_layer {
+       TC_LINKLAYER_UNAWARE, /* Indicate unaware old iproute2 util */
+       TC_LINKLAYER_ETHERNET,
+       TC_LINKLAYER_ATM,
+};
+#define TC_LINKLAYER_MASK 0x0F /* limit use to lower 4 bits */
+
 struct tc_ratespec {
        unsigned char   cell_log;
-       unsigned char   __reserved;
+       __u8            linklayer; /* lower 4 bits */
        unsigned short  overhead;
        short           cell_align;
        unsigned short  mpu;
index af0a674cc677f570bf5d5e04683957605d5e9ea1..a1356d3b54df4bd7f68fa26e3436fd7ff2dbcb71 100644 (file)
@@ -253,7 +253,7 @@ enum
        LINUX_MIB_TCPFASTOPENLISTENOVERFLOW,    /* TCPFastOpenListenOverflow */
        LINUX_MIB_TCPFASTOPENCOOKIEREQD,        /* TCPFastOpenCookieReqd */
        LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES, /* TCPSpuriousRtxHostQueues */
-       LINUX_MIB_LOWLATENCYRXPACKETS,          /* LowLatencyRxPackets */
+       LINUX_MIB_BUSYPOLLRXPACKETS,            /* BusyPollRxPackets */
        __LINUX_MIB_MAX
 };
 
index 403d2bb8a96865ec2b62da44e48f77e3a7ae3e0c..e23bb19e2a3e23c9d3ad3c68e0f49acb214d3e51 100644 (file)
@@ -1679,6 +1679,12 @@ SYSCALL_DEFINE5(clone, unsigned long, newsp, unsigned long, clone_flags,
                 int __user *, parent_tidptr,
                 int __user *, child_tidptr,
                 int, tls_val)
+#elif defined(CONFIG_CLONE_BACKWARDS3)
+SYSCALL_DEFINE6(clone, unsigned long, clone_flags, unsigned long, newsp,
+               int, stack_size,
+               int __user *, parent_tidptr,
+               int __user *, child_tidptr,
+               int, tls_val)
 #else
 SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp,
                 int __user *, parent_tidptr,
index ff05f4bd86eb6acf10ff49307e448ddb8b00916a..a52ee7bb830d3fa9db3cf898a34c8212664ac8b7 100644 (file)
@@ -686,7 +686,7 @@ __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
        might_sleep();
        ret =  __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE,
                                   0, &ctx->dep_map, _RET_IP_, ctx);
-       if (!ret && ctx->acquired > 0)
+       if (!ret && ctx->acquired > 1)
                return ww_mutex_deadlock_injection(lock, ctx);
 
        return ret;
@@ -702,7 +702,7 @@ __ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
        ret = __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE,
                                  0, &ctx->dep_map, _RET_IP_, ctx);
 
-       if (!ret && ctx->acquired > 0)
+       if (!ret && ctx->acquired > 1)
                return ww_mutex_deadlock_injection(lock, ctx);
 
        return ret;
index b7c32cb7bfebbd5912f06a421decb87b15c34bf9..05c39f030314698730349de8eb9346c1437d9aba 100644 (file)
@@ -933,6 +933,8 @@ static int effective_prio(struct task_struct *p)
 /**
  * task_curr - is this task currently executing on a CPU?
  * @p: the task in question.
+ *
+ * Return: 1 if the task is currently executing. 0 otherwise.
  */
 inline int task_curr(const struct task_struct *p)
 {
@@ -1482,7 +1484,7 @@ static void ttwu_queue(struct task_struct *p, int cpu)
  * the simpler "current->state = TASK_RUNNING" to mark yourself
  * runnable without the overhead of this.
  *
- * Returns %true if @p was woken up, %false if it was already running
+ * Return: %true if @p was woken up, %false if it was already running.
  * or @state didn't match @p's state.
  */
 static int
@@ -1491,7 +1493,13 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
        unsigned long flags;
        int cpu, success = 0;
 
-       smp_wmb();
+       /*
+        * If we are going to wake up a thread waiting for CONDITION we
+        * need to ensure that CONDITION=1 done by the caller can not be
+        * reordered with p->state check below. This pairs with mb() in
+        * set_current_state() the waiting thread does.
+        */
+       smp_mb__before_spinlock();
        raw_spin_lock_irqsave(&p->pi_lock, flags);
        if (!(p->state & state))
                goto out;
@@ -1577,8 +1585,9 @@ out:
  * @p: The process to be woken up.
  *
  * Attempt to wake up the nominated process and move it to the set of runnable
- * processes.  Returns 1 if the process was woken up, 0 if it was already
- * running.
+ * processes.
+ *
+ * Return: 1 if the process was woken up, 0 if it was already running.
  *
  * It may be assumed that this function implies a write memory barrier before
  * changing the task state if and only if any tasks are woken up.
@@ -2191,6 +2200,8 @@ void scheduler_tick(void)
  * This makes sure that uptime, CFS vruntime, load
  * balancing, etc... continue to move forward, even
  * with a very low granularity.
+ *
+ * Return: Maximum deferment in nanoseconds.
  */
 u64 scheduler_tick_max_deferment(void)
 {
@@ -2394,6 +2405,12 @@ need_resched:
        if (sched_feat(HRTICK))
                hrtick_clear(rq);
 
+       /*
+        * Make sure that signal_pending_state()->signal_pending() below
+        * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE)
+        * done by the caller to avoid the race with signal_wake_up().
+        */
+       smp_mb__before_spinlock();
        raw_spin_lock_irq(&rq->lock);
 
        switch_count = &prev->nivcsw;
@@ -2796,8 +2813,8 @@ EXPORT_SYMBOL(wait_for_completion);
  * specified timeout to expire. The timeout is in jiffies. It is not
  * interruptible.
  *
- * The return value is 0 if timed out, and positive (at least 1, or number of
- * jiffies left till timeout) if completed.
+ * Return: 0 if timed out, and positive (at least 1, or number of jiffies left
+ * till timeout) if completed.
  */
 unsigned long __sched
 wait_for_completion_timeout(struct completion *x, unsigned long timeout)
@@ -2829,8 +2846,8 @@ EXPORT_SYMBOL(wait_for_completion_io);
  * specified timeout to expire. The timeout is in jiffies. It is not
  * interruptible. The caller is accounted as waiting for IO.
  *
- * The return value is 0 if timed out, and positive (at least 1, or number of
- * jiffies left till timeout) if completed.
+ * Return: 0 if timed out, and positive (at least 1, or number of jiffies left
+ * till timeout) if completed.
  */
 unsigned long __sched
 wait_for_completion_io_timeout(struct completion *x, unsigned long timeout)
@@ -2846,7 +2863,7 @@ EXPORT_SYMBOL(wait_for_completion_io_timeout);
  * This waits for completion of a specific task to be signaled. It is
  * interruptible.
  *
- * The return value is -ERESTARTSYS if interrupted, 0 if completed.
+ * Return: -ERESTARTSYS if interrupted, 0 if completed.
  */
 int __sched wait_for_completion_interruptible(struct completion *x)
 {
@@ -2865,8 +2882,8 @@ EXPORT_SYMBOL(wait_for_completion_interruptible);
  * This waits for either a completion of a specific task to be signaled or for a
  * specified timeout to expire. It is interruptible. The timeout is in jiffies.
  *
- * The return value is -ERESTARTSYS if interrupted, 0 if timed out,
- * positive (at least 1, or number of jiffies left till timeout) if completed.
+ * Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1,
+ * or number of jiffies left till timeout) if completed.
  */
 long __sched
 wait_for_completion_interruptible_timeout(struct completion *x,
@@ -2883,7 +2900,7 @@ EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
  * This waits to be signaled for completion of a specific task. It can be
  * interrupted by a kill signal.
  *
- * The return value is -ERESTARTSYS if interrupted, 0 if completed.
+ * Return: -ERESTARTSYS if interrupted, 0 if completed.
  */
 int __sched wait_for_completion_killable(struct completion *x)
 {
@@ -2903,8 +2920,8 @@ EXPORT_SYMBOL(wait_for_completion_killable);
  * signaled or for a specified timeout to expire. It can be
  * interrupted by a kill signal. The timeout is in jiffies.
  *
- * The return value is -ERESTARTSYS if interrupted, 0 if timed out,
- * positive (at least 1, or number of jiffies left till timeout) if completed.
+ * Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1,
+ * or number of jiffies left till timeout) if completed.
  */
 long __sched
 wait_for_completion_killable_timeout(struct completion *x,
@@ -2918,7 +2935,7 @@ EXPORT_SYMBOL(wait_for_completion_killable_timeout);
  *     try_wait_for_completion - try to decrement a completion without blocking
  *     @x:     completion structure
  *
- *     Returns: 0 if a decrement cannot be done without blocking
+ *     Return: 0 if a decrement cannot be done without blocking
  *              1 if a decrement succeeded.
  *
  *     If a completion is being used as a counting completion,
@@ -2945,7 +2962,7 @@ EXPORT_SYMBOL(try_wait_for_completion);
  *     completion_done - Test to see if a completion has any waiters
  *     @x:     completion structure
  *
- *     Returns: 0 if there are waiters (wait_for_completion() in progress)
+ *     Return: 0 if there are waiters (wait_for_completion() in progress)
  *              1 if there are no waiters.
  *
  */
@@ -3182,7 +3199,7 @@ SYSCALL_DEFINE1(nice, int, increment)
  * task_prio - return the priority value of a given task.
  * @p: the task in question.
  *
- * This is the priority value as seen by users in /proc.
+ * Return: The priority value as seen by users in /proc.
  * RT tasks are offset by -200. Normal tasks are centered
  * around 0, value goes from -16 to +15.
  */
@@ -3194,6 +3211,8 @@ int task_prio(const struct task_struct *p)
 /**
  * task_nice - return the nice value of a given task.
  * @p: the task in question.
+ *
+ * Return: The nice value [ -20 ... 0 ... 19 ].
  */
 int task_nice(const struct task_struct *p)
 {
@@ -3204,6 +3223,8 @@ EXPORT_SYMBOL(task_nice);
 /**
  * idle_cpu - is a given cpu idle currently?
  * @cpu: the processor in question.
+ *
+ * Return: 1 if the CPU is currently idle. 0 otherwise.
  */
 int idle_cpu(int cpu)
 {
@@ -3226,6 +3247,8 @@ int idle_cpu(int cpu)
 /**
  * idle_task - return the idle task for a given cpu.
  * @cpu: the processor in question.
+ *
+ * Return: The idle task for the cpu @cpu.
  */
 struct task_struct *idle_task(int cpu)
 {
@@ -3235,6 +3258,8 @@ struct task_struct *idle_task(int cpu)
 /**
  * find_process_by_pid - find a process with a matching PID value.
  * @pid: the pid in question.
+ *
+ * The task of @pid, if found. %NULL otherwise.
  */
 static struct task_struct *find_process_by_pid(pid_t pid)
 {
@@ -3432,6 +3457,8 @@ recheck:
  * @policy: new policy.
  * @param: structure containing the new RT priority.
  *
+ * Return: 0 on success. An error code otherwise.
+ *
  * NOTE that the task may be already dead.
  */
 int sched_setscheduler(struct task_struct *p, int policy,
@@ -3451,6 +3478,8 @@ EXPORT_SYMBOL_GPL(sched_setscheduler);
  * current context has permission.  For example, this is needed in
  * stop_machine(): we create temporary high priority worker threads,
  * but our caller might not have that capability.
+ *
+ * Return: 0 on success. An error code otherwise.
  */
 int sched_setscheduler_nocheck(struct task_struct *p, int policy,
                               const struct sched_param *param)
@@ -3485,6 +3514,8 @@ do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
  * @pid: the pid in question.
  * @policy: new policy.
  * @param: structure containing the new RT priority.
+ *
+ * Return: 0 on success. An error code otherwise.
  */
 SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy,
                struct sched_param __user *, param)
@@ -3500,6 +3531,8 @@ SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy,
  * sys_sched_setparam - set/change the RT priority of a thread
  * @pid: the pid in question.
  * @param: structure containing the new RT priority.
+ *
+ * Return: 0 on success. An error code otherwise.
  */
 SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
 {
@@ -3509,6 +3542,9 @@ SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
 /**
  * sys_sched_getscheduler - get the policy (scheduling class) of a thread
  * @pid: the pid in question.
+ *
+ * Return: On success, the policy of the thread. Otherwise, a negative error
+ * code.
  */
 SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
 {
@@ -3535,6 +3571,9 @@ SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
  * sys_sched_getparam - get the RT priority of a thread
  * @pid: the pid in question.
  * @param: structure containing the RT priority.
+ *
+ * Return: On success, 0 and the RT priority is in @param. Otherwise, an error
+ * code.
  */
 SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
 {
@@ -3659,6 +3698,8 @@ static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
  * @pid: pid of the process
  * @len: length in bytes of the bitmask pointed to by user_mask_ptr
  * @user_mask_ptr: user-space pointer to the new cpu mask
+ *
+ * Return: 0 on success. An error code otherwise.
  */
 SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
                unsigned long __user *, user_mask_ptr)
@@ -3710,6 +3751,8 @@ out_unlock:
  * @pid: pid of the process
  * @len: length in bytes of the bitmask pointed to by user_mask_ptr
  * @user_mask_ptr: user-space pointer to hold the current cpu mask
+ *
+ * Return: 0 on success. An error code otherwise.
  */
 SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
                unsigned long __user *, user_mask_ptr)
@@ -3744,6 +3787,8 @@ SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
  *
  * This function yields the current CPU to other tasks. If there are no
  * other threads running on this CPU then this function will return.
+ *
+ * Return: 0.
  */
 SYSCALL_DEFINE0(sched_yield)
 {
@@ -3869,7 +3914,7 @@ EXPORT_SYMBOL(yield);
  * It's the caller's job to ensure that the target task struct
  * can't go away on us before we can do any checks.
  *
- * Returns:
+ * Return:
  *     true (>0) if we indeed boosted the target task.
  *     false (0) if we failed to boost the target.
  *     -ESRCH if there's no task to yield to.
@@ -3972,8 +4017,9 @@ long __sched io_schedule_timeout(long timeout)
  * sys_sched_get_priority_max - return maximum RT priority.
  * @policy: scheduling class.
  *
- * this syscall returns the maximum rt_priority that can be used
- * by a given scheduling class.
+ * Return: On success, this syscall returns the maximum
+ * rt_priority that can be used by a given scheduling class.
+ * On failure, a negative error code is returned.
  */
 SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
 {
@@ -3997,8 +4043,9 @@ SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
  * sys_sched_get_priority_min - return minimum RT priority.
  * @policy: scheduling class.
  *
- * this syscall returns the minimum rt_priority that can be used
- * by a given scheduling class.
+ * Return: On success, this syscall returns the minimum
+ * rt_priority that can be used by a given scheduling class.
+ * On failure, a negative error code is returned.
  */
 SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
 {
@@ -4024,6 +4071,9 @@ SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
  *
  * this syscall writes the default timeslice value of a given process
  * into the user-space timespec buffer. A value of '0' means infinity.
+ *
+ * Return: On success, 0 and the timeslice is in @interval. Otherwise,
+ * an error code.
  */
 SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
                struct timespec __user *, interval)
@@ -6632,6 +6682,8 @@ void normalize_rt_tasks(void)
  * @cpu: the processor in question.
  *
  * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
+ *
+ * Return: The current task for @cpu.
  */
 struct task_struct *curr_task(int cpu)
 {
index 1095e878a46fdbe4a7224eb07a68cf1fc90c2530..8b836b376d9129760066326eabf5040f72b2e4f3 100644 (file)
@@ -62,7 +62,7 @@ static int convert_prio(int prio)
  * any discrepancies created by racing against the uncertainty of the current
  * priority configuration.
  *
- * Returns: (int)bool - CPUs were found
+ * Return: (int)bool - CPUs were found
  */
 int cpupri_find(struct cpupri *cp, struct task_struct *p,
                struct cpumask *lowest_mask)
@@ -203,7 +203,7 @@ void cpupri_set(struct cpupri *cp, int cpu, int newpri)
  * cpupri_init - initialize the cpupri structure
  * @cp: The cpupri context
  *
- * Returns: -ENOMEM if memory fails.
+ * Return: -ENOMEM on memory allocation failure.
  */
 int cpupri_init(struct cpupri *cp)
 {
index 9565645e320218f394f36eb0edbaa95236bbfcd1..68f1609ca149e259fab8db2fdb6bc3d18587a049 100644 (file)
@@ -2032,6 +2032,7 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
         */
        update_entity_load_avg(curr, 1);
        update_cfs_rq_blocked_load(cfs_rq, 1);
+       update_cfs_shares(cfs_rq);
 
 #ifdef CONFIG_SCHED_HRTICK
        /*
@@ -4280,6 +4281,8 @@ struct sg_lb_stats {
  * get_sd_load_idx - Obtain the load index for a given sched domain.
  * @sd: The sched_domain whose load_idx is to be obtained.
  * @idle: The Idle status of the CPU for whose sd load_icx is obtained.
+ *
+ * Return: The load index.
  */
 static inline int get_sd_load_idx(struct sched_domain *sd,
                                        enum cpu_idle_type idle)
@@ -4574,6 +4577,9 @@ static inline void update_sg_lb_stats(struct lb_env *env,
  *
  * Determine if @sg is a busier group than the previously selected
  * busiest group.
+ *
+ * Return: %true if @sg is a busier group than the previously selected
+ * busiest group. %false otherwise.
  */
 static bool update_sd_pick_busiest(struct lb_env *env,
                                   struct sd_lb_stats *sds,
@@ -4691,7 +4697,7 @@ static inline void update_sd_lb_stats(struct lb_env *env,
  * assuming lower CPU number will be equivalent to lower a SMT thread
  * number.
  *
- * Returns 1 when packing is required and a task should be moved to
+ * Return: 1 when packing is required and a task should be moved to
  * this CPU.  The amount of the imbalance is returned in *imbalance.
  *
  * @env: The load balancing environment.
@@ -4869,7 +4875,7 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
  * @balance: Pointer to a variable indicating if this_cpu
  *     is the appropriate cpu to perform load balancing at this_level.
  *
- * Returns:    - the busiest group if imbalance exists.
+ * Return:     - The busiest group if imbalance exists.
  *             - If no imbalance and user has opted for power-savings balance,
  *                return the least loaded group whose CPUs can be
  *                put to idle by rebalancing its tasks onto our group.
index 87da3590c61e20287b4854e8212c178f96cb73c6..5bff0814776870e2974e6e436ad99f99f264e05a 100644 (file)
@@ -57,17 +57,22 @@ static int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma,
                unsigned long addr, unsigned long pgoff, pgprot_t prot)
 {
        int err = -ENOMEM;
-       pte_t *pte;
+       pte_t *pte, ptfile;
        spinlock_t *ptl;
 
        pte = get_locked_pte(mm, addr, &ptl);
        if (!pte)
                goto out;
 
-       if (!pte_none(*pte))
+       ptfile = pgoff_to_pte(pgoff);
+
+       if (!pte_none(*pte)) {
+               if (pte_present(*pte) && pte_soft_dirty(*pte))
+                       pte_file_mksoft_dirty(ptfile);
                zap_pte(mm, vma, addr, pte);
+       }
 
-       set_pte_at(mm, addr, pte, pgoff_to_pte(pgoff));
+       set_pte_at(mm, addr, pte, ptfile);
        /*
         * We don't need to run update_mmu_cache() here because the "file pte"
         * being installed by install_file_pte() is not a real pte - it's a
index 83aff0a4d0938e308619a703dfca17265419e628..b60f33080a28f0a078f8186e65ab8d878fced928 100644 (file)
@@ -2490,7 +2490,7 @@ void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
 
        mm = vma->vm_mm;
 
-       tlb_gather_mmu(&tlb, mm, 0);
+       tlb_gather_mmu(&tlb, mm, start, end);
        __unmap_hugepage_range(&tlb, vma, start, end, ref_page);
        tlb_finish_mmu(&tlb, start, end);
 }
index c290a1cf38624adf8372d67ccf60d9ec01ff1357..c5792a5d87cede8cf2c5474156c1596f51f5ba61 100644 (file)
@@ -3195,11 +3195,11 @@ int memcg_register_cache(struct mem_cgroup *memcg, struct kmem_cache *s,
        if (!s->memcg_params)
                return -ENOMEM;
 
-       INIT_WORK(&s->memcg_params->destroy,
-                       kmem_cache_destroy_work_func);
        if (memcg) {
                s->memcg_params->memcg = memcg;
                s->memcg_params->root_cache = root_cache;
+               INIT_WORK(&s->memcg_params->destroy,
+                               kmem_cache_destroy_work_func);
        } else
                s->memcg_params->is_root_cache = true;
 
index 1ce2e2a734fc2b0812845279a69cb87f48588d3e..af84bc0ec17c213054b023815ae197fefde7ffb6 100644 (file)
@@ -209,14 +209,15 @@ static int tlb_next_batch(struct mmu_gather *tlb)
  *     tear-down from @mm. The @fullmm argument is used when @mm is without
  *     users and we're going to destroy the full address space (exit/execve).
  */
-void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm)
+void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
 {
        tlb->mm = mm;
 
-       tlb->fullmm     = fullmm;
+       /* Is it from 0 to ~0? */
+       tlb->fullmm     = !(start | (end+1));
        tlb->need_flush_all = 0;
-       tlb->start      = -1UL;
-       tlb->end        = 0;
+       tlb->start      = start;
+       tlb->end        = end;
        tlb->need_flush = 0;
        tlb->local.next = NULL;
        tlb->local.nr   = 0;
@@ -256,8 +257,6 @@ void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long e
 {
        struct mmu_gather_batch *batch, *next;
 
-       tlb->start = start;
-       tlb->end   = end;
        tlb_flush_mmu(tlb);
 
        /* keep the page table cache within bounds */
@@ -1099,7 +1098,6 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
        spinlock_t *ptl;
        pte_t *start_pte;
        pte_t *pte;
-       unsigned long range_start = addr;
 
 again:
        init_rss_vec(rss);
@@ -1141,9 +1139,12 @@ again:
                                continue;
                        if (unlikely(details) && details->nonlinear_vma
                            && linear_page_index(details->nonlinear_vma,
-                                               addr) != page->index)
-                               set_pte_at(mm, addr, pte,
-                                          pgoff_to_pte(page->index));
+                                               addr) != page->index) {
+                               pte_t ptfile = pgoff_to_pte(page->index);
+                               if (pte_soft_dirty(ptent))
+                                       pte_file_mksoft_dirty(ptfile);
+                               set_pte_at(mm, addr, pte, ptfile);
+                       }
                        if (PageAnon(page))
                                rss[MM_ANONPAGES]--;
                        else {
@@ -1202,17 +1203,25 @@ again:
         * and page-free while holding it.
         */
        if (force_flush) {
+               unsigned long old_end;
+
                force_flush = 0;
 
-#ifdef HAVE_GENERIC_MMU_GATHER
-               tlb->start = range_start;
+               /*
+                * Flush the TLB just for the previous segment,
+                * then update the range to be the remaining
+                * TLB range.
+                */
+               old_end = tlb->end;
                tlb->end = addr;
-#endif
+
                tlb_flush_mmu(tlb);
-               if (addr != end) {
-                       range_start = addr;
+
+               tlb->start = addr;
+               tlb->end = old_end;
+
+               if (addr != end)
                        goto again;
-               }
        }
 
        return addr;
@@ -1397,7 +1406,7 @@ void zap_page_range(struct vm_area_struct *vma, unsigned long start,
        unsigned long end = start + size;
 
        lru_add_drain();
-       tlb_gather_mmu(&tlb, mm, 0);
+       tlb_gather_mmu(&tlb, mm, start, end);
        update_hiwater_rss(mm);
        mmu_notifier_invalidate_range_start(mm, start, end);
        for ( ; vma && vma->vm_start < end; vma = vma->vm_next)
@@ -1423,7 +1432,7 @@ static void zap_page_range_single(struct vm_area_struct *vma, unsigned long addr
        unsigned long end = address + size;
 
        lru_add_drain();
-       tlb_gather_mmu(&tlb, mm, 0);
+       tlb_gather_mmu(&tlb, mm, address, end);
        update_hiwater_rss(mm);
        mmu_notifier_invalidate_range_start(mm, address, end);
        unmap_single_vma(&tlb, vma, address, end, details);
@@ -3115,6 +3124,8 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
                exclusive = 1;
        }
        flush_icache_page(vma, page);
+       if (pte_swp_soft_dirty(orig_pte))
+               pte = pte_mksoft_dirty(pte);
        set_pte_at(mm, address, page_table, pte);
        if (page == swapcache)
                do_page_add_anon_rmap(page, vma, address, exclusive);
@@ -3408,6 +3419,8 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
                entry = mk_pte(page, vma->vm_page_prot);
                if (flags & FAULT_FLAG_WRITE)
                        entry = maybe_mkwrite(pte_mkdirty(entry), vma);
+               else if (pte_file(orig_pte) && pte_file_soft_dirty(orig_pte))
+                       pte_mksoft_dirty(entry);
                if (anon) {
                        inc_mm_counter_fast(mm, MM_ANONPAGES);
                        page_add_new_anon_rmap(page, vma, address);
index 1edbaa3136c3c464cf648e569f2f98499f00642c..f9c97d10b873ae4a896c3a22266707653d941c90 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2336,7 +2336,7 @@ static void unmap_region(struct mm_struct *mm,
        struct mmu_gather tlb;
 
        lru_add_drain();
-       tlb_gather_mmu(&tlb, mm, 0);
+       tlb_gather_mmu(&tlb, mm, start, end);
        update_hiwater_rss(mm);
        unmap_vmas(&tlb, vma, start, end);
        free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS,
@@ -2709,7 +2709,7 @@ void exit_mmap(struct mm_struct *mm)
 
        lru_add_drain();
        flush_cache_mm(mm);
-       tlb_gather_mmu(&tlb, mm, 1);
+       tlb_gather_mmu(&tlb, mm, 0, -1);
        /* update_hiwater_rss(mm) here? but nobody should be looking */
        /* Use -1 here to ensure all VMAs in the mm are unmapped */
        unmap_vmas(&tlb, vma, 0, -1);
index cd356df4f71ab5aa0514e7685b4cce50eafceb6e..b2e29acd7e3d6267a626f7bde5656c4f7fd49640 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1236,6 +1236,7 @@ int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
                           swp_entry_to_pte(make_hwpoison_entry(page)));
        } else if (PageAnon(page)) {
                swp_entry_t entry = { .val = page_private(page) };
+               pte_t swp_pte;
 
                if (PageSwapCache(page)) {
                        /*
@@ -1264,7 +1265,10 @@ int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
                        BUG_ON(TTU_ACTION(flags) != TTU_MIGRATION);
                        entry = make_migration_entry(page, pte_write(pteval));
                }
-               set_pte_at(mm, address, pte, swp_entry_to_pte(entry));
+               swp_pte = swp_entry_to_pte(entry);
+               if (pte_soft_dirty(pteval))
+                       swp_pte = pte_swp_mksoft_dirty(swp_pte);
+               set_pte_at(mm, address, pte, swp_pte);
                BUG_ON(pte_file(*pte));
        } else if (IS_ENABLED(CONFIG_MIGRATION) &&
                   (TTU_ACTION(flags) == TTU_MIGRATION)) {
@@ -1401,8 +1405,12 @@ static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount,
                pteval = ptep_clear_flush(vma, address, pte);
 
                /* If nonlinear, store the file page offset in the pte. */
-               if (page->index != linear_page_index(vma, address))
-                       set_pte_at(mm, address, pte, pgoff_to_pte(page->index));
+               if (page->index != linear_page_index(vma, address)) {
+                       pte_t ptfile = pgoff_to_pte(page->index);
+                       if (pte_soft_dirty(pteval))
+                               pte_file_mksoft_dirty(ptfile);
+                       set_pte_at(mm, address, pte, ptfile);
+               }
 
                /* Move the dirty bit to the physical page now the pte is gone. */
                if (pte_dirty(pteval))
index 36af6eeaa67eaa0532442706e668e9a268596c90..6cf2e60983b7c36c5dea0b745e6aadd907c01532 100644 (file)
@@ -866,6 +866,21 @@ unsigned int count_swap_pages(int type, int free)
 }
 #endif /* CONFIG_HIBERNATION */
 
+static inline int maybe_same_pte(pte_t pte, pte_t swp_pte)
+{
+#ifdef CONFIG_MEM_SOFT_DIRTY
+       /*
+        * When pte keeps soft dirty bit the pte generated
+        * from swap entry does not has it, still it's same
+        * pte from logical point of view.
+        */
+       pte_t swp_pte_dirty = pte_swp_mksoft_dirty(swp_pte);
+       return pte_same(pte, swp_pte) || pte_same(pte, swp_pte_dirty);
+#else
+       return pte_same(pte, swp_pte);
+#endif
+}
+
 /*
  * No need to decide whether this PTE shares the swap entry with others,
  * just let do_wp_page work it out if a write is requested later - to
@@ -892,7 +907,7 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
        }
 
        pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
-       if (unlikely(!pte_same(*pte, swp_entry_to_pte(entry)))) {
+       if (unlikely(!maybe_same_pte(*pte, swp_entry_to_pte(entry)))) {
                mem_cgroup_cancel_charge_swapin(memcg);
                ret = 0;
                goto out;
@@ -947,7 +962,7 @@ static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
                 * swapoff spends a _lot_ of time in this loop!
                 * Test inline before going to call unuse_pte.
                 */
-               if (unlikely(pte_same(*pte, swp_pte))) {
+               if (unlikely(maybe_same_pte(*pte, swp_pte))) {
                        pte_unmap(pte);
                        ret = unuse_pte(vma, pmd, addr, entry, page);
                        if (ret)
index 4a78c4de9f200831c75e5c87bad683bbf669bbfb..6ee48aac776fbe84db68a503d9aca10ca45b0081 100644 (file)
@@ -91,7 +91,12 @@ EXPORT_SYMBOL(__vlan_find_dev_deep);
 
 struct net_device *vlan_dev_real_dev(const struct net_device *dev)
 {
-       return vlan_dev_priv(dev)->real_dev;
+       struct net_device *ret = vlan_dev_priv(dev)->real_dev;
+
+       while (is_vlan_dev(ret))
+               ret = vlan_dev_priv(ret)->real_dev;
+
+       return ret;
 }
 EXPORT_SYMBOL(vlan_dev_real_dev);
 
index e14531f1ce1c5258ccc816f9a739ed464b6c364f..264de88db3208290b940cc27b09e44f79e1616c9 100644 (file)
@@ -1529,6 +1529,8 @@ out:
  * in these cases, the skb is further handled by this function and
  * returns 1, otherwise it returns 0 and the caller shall further
  * process the skb.
+ *
+ * This call might reallocate skb data.
  */
 int batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb,
                  unsigned short vid)
index f105219f4a4b120810973e0ae8cb31a8c4e2aca1..7614af31daff3678eb2bb75447810c0e27850f88 100644 (file)
@@ -508,6 +508,7 @@ out:
        return 0;
 }
 
+/* this call might reallocate skb data */
 static bool batadv_is_type_dhcprequest(struct sk_buff *skb, int header_len)
 {
        int ret = false;
@@ -568,6 +569,7 @@ out:
        return ret;
 }
 
+/* this call might reallocate skb data */
 bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len)
 {
        struct ethhdr *ethhdr;
@@ -619,6 +621,12 @@ bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len)
 
        if (!pskb_may_pull(skb, *header_len + sizeof(*udphdr)))
                return false;
+
+       /* skb->data might have been reallocated by pskb_may_pull() */
+       ethhdr = (struct ethhdr *)skb->data;
+       if (ntohs(ethhdr->h_proto) == ETH_P_8021Q)
+               ethhdr = (struct ethhdr *)(skb->data + VLAN_HLEN);
+
        udphdr = (struct udphdr *)(skb->data + *header_len);
        *header_len += sizeof(*udphdr);
 
@@ -634,12 +642,14 @@ bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len)
        return true;
 }
 
+/* this call might reallocate skb data */
 bool batadv_gw_out_of_range(struct batadv_priv *bat_priv,
-                           struct sk_buff *skb, struct ethhdr *ethhdr)
+                           struct sk_buff *skb)
 {
        struct batadv_neigh_node *neigh_curr = NULL, *neigh_old = NULL;
        struct batadv_orig_node *orig_dst_node = NULL;
        struct batadv_gw_node *curr_gw = NULL;
+       struct ethhdr *ethhdr;
        bool ret, out_of_range = false;
        unsigned int header_len = 0;
        uint8_t curr_tq_avg;
@@ -648,6 +658,7 @@ bool batadv_gw_out_of_range(struct batadv_priv *bat_priv,
        if (!ret)
                goto out;
 
+       ethhdr = (struct ethhdr *)skb->data;
        orig_dst_node = batadv_transtable_search(bat_priv, ethhdr->h_source,
                                                 ethhdr->h_dest);
        if (!orig_dst_node)
index 039902dca4a691074856d7846c59ad89f0bd8b8e..1037d75da51f35c1867cee72cc64bafd2859e3a8 100644 (file)
@@ -34,7 +34,6 @@ void batadv_gw_node_delete(struct batadv_priv *bat_priv,
 void batadv_gw_node_purge(struct batadv_priv *bat_priv);
 int batadv_gw_client_seq_print_text(struct seq_file *seq, void *offset);
 bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len);
-bool batadv_gw_out_of_range(struct batadv_priv *bat_priv,
-                           struct sk_buff *skb, struct ethhdr *ethhdr);
+bool batadv_gw_out_of_range(struct batadv_priv *bat_priv, struct sk_buff *skb);
 
 #endif /* _NET_BATMAN_ADV_GATEWAY_CLIENT_H_ */
index 700d0b49742da54d0a1280b84c87416977d7d721..0f04e1c302b4a7bf12aa4da04fc84a4d0c2a0f20 100644 (file)
@@ -180,6 +180,9 @@ static int batadv_interface_tx(struct sk_buff *skb,
        if (batadv_bla_tx(bat_priv, skb, vid))
                goto dropped;
 
+       /* skb->data might have been reallocated by batadv_bla_tx() */
+       ethhdr = (struct ethhdr *)skb->data;
+
        /* Register the client MAC in the transtable */
        if (!is_multicast_ether_addr(ethhdr->h_source))
                batadv_tt_local_add(soft_iface, ethhdr->h_source, skb->skb_iif);
@@ -220,6 +223,10 @@ static int batadv_interface_tx(struct sk_buff *skb,
                default:
                        break;
                }
+
+               /* reminder: ethhdr might have become unusable from here on
+                * (batadv_gw_is_dhcp_target() might have reallocated skb data)
+                */
        }
 
        /* ethernet packet should be broadcasted */
@@ -266,7 +273,7 @@ static int batadv_interface_tx(struct sk_buff *skb,
        /* unicast packet */
        } else {
                if (atomic_read(&bat_priv->gw_mode) != BATADV_GW_MODE_OFF) {
-                       ret = batadv_gw_out_of_range(bat_priv, skb, ethhdr);
+                       ret = batadv_gw_out_of_range(bat_priv, skb);
                        if (ret)
                                goto dropped;
                }
index dc8b5d4dd636d3d5ed982a5d93c2783f203ac4f5..688a0419756bfc6ce2a9f632f6e84a6ed42debcf 100644 (file)
@@ -326,7 +326,9 @@ static bool batadv_unicast_push_and_fill_skb(struct sk_buff *skb, int hdr_size,
  * @skb: the skb containing the payload to encapsulate
  * @orig_node: the destination node
  *
- * Returns false if the payload could not be encapsulated or true otherwise
+ * Returns false if the payload could not be encapsulated or true otherwise.
+ *
+ * This call might reallocate skb data.
  */
 static bool batadv_unicast_prepare_skb(struct sk_buff *skb,
                                       struct batadv_orig_node *orig_node)
@@ -343,7 +345,9 @@ static bool batadv_unicast_prepare_skb(struct sk_buff *skb,
  * @orig_node: the destination node
  * @packet_subtype: the batman 4addr packet subtype to use
  *
- * Returns false if the payload could not be encapsulated or true otherwise
+ * Returns false if the payload could not be encapsulated or true otherwise.
+ *
+ * This call might reallocate skb data.
  */
 bool batadv_unicast_4addr_prepare_skb(struct batadv_priv *bat_priv,
                                      struct sk_buff *skb,
@@ -401,7 +405,7 @@ int batadv_unicast_generic_send_skb(struct batadv_priv *bat_priv,
        struct batadv_neigh_node *neigh_node;
        int data_len = skb->len;
        int ret = NET_RX_DROP;
-       unsigned int dev_mtu;
+       unsigned int dev_mtu, header_len;
 
        /* get routing information */
        if (is_multicast_ether_addr(ethhdr->h_dest)) {
@@ -429,10 +433,12 @@ find_router:
        switch (packet_type) {
        case BATADV_UNICAST:
                batadv_unicast_prepare_skb(skb, orig_node);
+               header_len = sizeof(struct batadv_unicast_packet);
                break;
        case BATADV_UNICAST_4ADDR:
                batadv_unicast_4addr_prepare_skb(bat_priv, skb, orig_node,
                                                 packet_subtype);
+               header_len = sizeof(struct batadv_unicast_4addr_packet);
                break;
        default:
                /* this function supports UNICAST and UNICAST_4ADDR only. It
@@ -441,6 +447,7 @@ find_router:
                goto out;
        }
 
+       ethhdr = (struct ethhdr *)(skb->data + header_len);
        unicast_packet = (struct batadv_unicast_packet *)skb->data;
 
        /* inform the destination node that we are still missing a correct route
index 61c5e819380e52d6347cde7186251467e921c1a0..08e576ada0b2699922b85c9e2783773baee404dc 100644 (file)
@@ -1195,7 +1195,7 @@ static int br_ip6_multicast_query(struct net_bridge *br,
                max_delay = msecs_to_jiffies(ntohs(mld->mld_maxdelay));
                if (max_delay)
                        group = &mld->mld_mca;
-       } else if (skb->len >= sizeof(*mld2q)) {
+       } else {
                if (!pskb_may_pull(skb, sizeof(*mld2q))) {
                        err = -EINVAL;
                        goto out;
index 394bb96b608707aa1ab943f66bc52fa173859706..3b9637fb7939a146e8a172a1e009e84c6ca4e841 100644 (file)
@@ -1,5 +1,5 @@
 /*
- *     Sysfs attributes of bridge ports
+ *     Sysfs attributes of bridge
  *     Linux ethernet bridge
  *
  *     Authors:
index 00ee068efc1c2374bc0195d9458eca00392359c6..b84a1b155bc133e39d0f118b61be85fb1d2ed02f 100644 (file)
@@ -65,6 +65,7 @@ ipv6:
                nhoff += sizeof(struct ipv6hdr);
                break;
        }
+       case __constant_htons(ETH_P_8021AD):
        case __constant_htons(ETH_P_8021Q): {
                const struct vlan_hdr *vlan;
                struct vlan_hdr _vlan;
index 9232c68941abb4b5b5ec23f5d6cd56054d8b4c67..60533db8b72db4bafa73d336d697575ba3138c90 100644 (file)
@@ -1441,16 +1441,18 @@ struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
                atomic_set(&p->refcnt, 1);
                p->reachable_time =
                                neigh_rand_reach_time(p->base_reachable_time);
+               dev_hold(dev);
+               p->dev = dev;
+               write_pnet(&p->net, hold_net(net));
+               p->sysctl_table = NULL;
 
                if (ops->ndo_neigh_setup && ops->ndo_neigh_setup(dev, p)) {
+                       release_net(net);
+                       dev_put(dev);
                        kfree(p);
                        return NULL;
                }
 
-               dev_hold(dev);
-               p->dev = dev;
-               write_pnet(&p->net, hold_net(net));
-               p->sysctl_table = NULL;
                write_lock_bh(&tbl->lock);
                p->next         = tbl->parms.next;
                tbl->parms.next = p;
index 3de740834d1ffcfa35fe31735b41d5a0dcb58c3a..ca198c1d1d3047baededa2c1f2c6284b07010bfb 100644 (file)
@@ -2156,7 +2156,7 @@ int ndo_dflt_fdb_del(struct ndmsg *ndm,
        /* If aging addresses are supported device will need to
         * implement its own handler for this.
         */
-       if (ndm->ndm_state & NUD_PERMANENT) {
+       if (!(ndm->ndm_state & NUD_PERMANENT)) {
                pr_info("%s: FDB only supports static addresses\n", dev->name);
                return -EINVAL;
        }
@@ -2384,7 +2384,7 @@ static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb)
        struct nlattr *extfilt;
        u32 filter_mask = 0;
 
-       extfilt = nlmsg_find_attr(cb->nlh, sizeof(struct rtgenmsg),
+       extfilt = nlmsg_find_attr(cb->nlh, sizeof(struct ifinfomsg),
                                  IFLA_EXT_MASK);
        if (extfilt)
                filter_mask = nla_get_u32(extfilt);
index ab3d814bc80af8f377da971af189a49ae4f2f094..109ee89f123e5e51714cae1fa9097e6ec65f0627 100644 (file)
@@ -477,7 +477,7 @@ static u32 esp4_get_mtu(struct xfrm_state *x, int mtu)
        }
 
        return ((mtu - x->props.header_len - crypto_aead_authsize(esp->aead) -
-                net_adj) & ~(align - 1)) + (net_adj - 2);
+                net_adj) & ~(align - 1)) + net_adj - 2;
 }
 
 static void esp4_err(struct sk_buff *skb, u32 info)
index 108a1e9c9eac388515530bb790d0e56e4dba0bfd..3df6d3edb2a15a98cb0e90a4e5ed935f42f15f1c 100644 (file)
@@ -71,7 +71,6 @@
 #include <linux/init.h>
 #include <linux/list.h>
 #include <linux/slab.h>
-#include <linux/prefetch.h>
 #include <linux/export.h>
 #include <net/net_namespace.h>
 #include <net/ip.h>
@@ -1761,10 +1760,8 @@ static struct leaf *leaf_walk_rcu(struct tnode *p, struct rt_trie_node *c)
                        if (!c)
                                continue;
 
-                       if (IS_LEAF(c)) {
-                               prefetch(rcu_dereference_rtnl(p->child[idx]));
+                       if (IS_LEAF(c))
                                return (struct leaf *) c;
-                       }
 
                        /* Rescan start scanning in new node */
                        p = (struct tnode *) c;
index 1f6eab66f7cee5f94e1a5cfd6b6ea919184b5d35..8d6939eeb49247b1c2ee8659cc190814d64eb4e5 100644 (file)
@@ -383,7 +383,7 @@ static int ipgre_header(struct sk_buff *skb, struct net_device *dev,
        if (daddr)
                memcpy(&iph->daddr, daddr, 4);
        if (iph->daddr)
-               return t->hlen;
+               return t->hlen + sizeof(*iph);
 
        return -(t->hlen + sizeof(*iph));
 }
index 7167b08977df582bd38565cf960568a8553f29d4..850525b34899f3a50d0e19f6bd326821b0c2423a 100644 (file)
@@ -76,9 +76,7 @@ int iptunnel_xmit(struct net *net, struct rtable *rt,
        iph->daddr      =       dst;
        iph->saddr      =       src;
        iph->ttl        =       ttl;
-       tunnel_ip_select_ident(skb,
-                              (const struct iphdr *)skb_inner_network_header(skb),
-                              &rt->dst);
+       __ip_select_ident(iph, &rt->dst, (skb_shinfo(skb)->gso_segs ?: 1) - 1);
 
        err = ip_local_out(skb);
        if (unlikely(net_xmit_eval(err)))
index 6577a1149a47c17853e8c7e0e3daa267ef16d1cd..463bd1273346d72cad00a42a5852f95cef490c19 100644 (file)
@@ -273,7 +273,7 @@ static const struct snmp_mib snmp4_net_list[] = {
        SNMP_MIB_ITEM("TCPFastOpenListenOverflow", LINUX_MIB_TCPFASTOPENLISTENOVERFLOW),
        SNMP_MIB_ITEM("TCPFastOpenCookieReqd", LINUX_MIB_TCPFASTOPENCOOKIEREQD),
        SNMP_MIB_ITEM("TCPSpuriousRtxHostQueues", LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES),
-       SNMP_MIB_ITEM("LowLatencyRxPackets", LINUX_MIB_LOWLATENCYRXPACKETS),
+       SNMP_MIB_ITEM("BusyPollRxPackets", LINUX_MIB_BUSYPOLLRXPACKETS),
        SNMP_MIB_SENTINEL
 };
 
index a9077f441cb27b693d8ad1d710e4dba67aa93ebb..b6ae92a51f58607f7c32be2d0762a49380205012 100644 (file)
@@ -206,8 +206,8 @@ static u32 cubic_root(u64 a)
  */
 static inline void bictcp_update(struct bictcp *ca, u32 cwnd)
 {
-       u64 offs;
-       u32 delta, t, bic_target, max_cnt;
+       u32 delta, bic_target, max_cnt;
+       u64 offs, t;
 
        ca->ack_cnt++;  /* count the number of ACKs */
 
@@ -250,9 +250,11 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd)
         * if the cwnd < 1 million packets !!!
         */
 
+       t = (s32)(tcp_time_stamp - ca->epoch_start);
+       t += msecs_to_jiffies(ca->delay_min >> 3);
        /* change the unit from HZ to bictcp_HZ */
-       t = ((tcp_time_stamp + msecs_to_jiffies(ca->delay_min>>3)
-             - ca->epoch_start) << BICTCP_HZ) / HZ;
+       t <<= BICTCP_HZ;
+       do_div(t, HZ);
 
        if (t < ca->bic_K)              /* t - K */
                offs = ca->bic_K - t;
@@ -414,7 +416,7 @@ static void bictcp_acked(struct sock *sk, u32 cnt, s32 rtt_us)
                return;
 
        /* Discard delay samples right after fast recovery */
-       if ((s32)(tcp_time_stamp - ca->epoch_start) < HZ)
+       if (ca->epoch_start && (s32)(tcp_time_stamp - ca->epoch_start) < HZ)
                return;
 
        delay = (rtt_us << 3) / USEC_PER_MSEC;
index 40ffd72243a4f2d1ec9e5da494b3f2650dd027a6..aeac0dc3635d98458a1f70ec84103c89a3f5a648 100644 (file)
@@ -425,7 +425,7 @@ static u32 esp6_get_mtu(struct xfrm_state *x, int mtu)
                net_adj = 0;
 
        return ((mtu - x->props.header_len - crypto_aead_authsize(esp->aead) -
-                net_adj) & ~(align - 1)) + (net_adj - 2);
+                net_adj) & ~(align - 1)) + net_adj - 2;
 }
 
 static void esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
index bff3d821c7ebb7cf7b05342a5e4a9b4ddb14cd4a..c4ff5bbb45c44a1bda8eee4f5d7728338de88a47 100644 (file)
@@ -993,14 +993,22 @@ static struct fib6_node * fib6_lookup_1(struct fib6_node *root,
 
                        if (ipv6_prefix_equal(&key->addr, args->addr, key->plen)) {
 #ifdef CONFIG_IPV6_SUBTREES
-                               if (fn->subtree)
-                                       fn = fib6_lookup_1(fn->subtree, args + 1);
+                               if (fn->subtree) {
+                                       struct fib6_node *sfn;
+                                       sfn = fib6_lookup_1(fn->subtree,
+                                                           args + 1);
+                                       if (!sfn)
+                                               goto backtrack;
+                                       fn = sfn;
+                               }
 #endif
-                               if (!fn || fn->fn_flags & RTN_RTINFO)
+                               if (fn->fn_flags & RTN_RTINFO)
                                        return fn;
                        }
                }
-
+#ifdef CONFIG_IPV6_SUBTREES
+backtrack:
+#endif
                if (fn->fn_flags & RTN_ROOT)
                        break;
 
index ae31968d42d3b855eebe05dcf0e660b4d367ff8f..cc9e02d79b550106ee07b6aca48ffa9e18081a45 100644 (file)
 #include "led.h"
 
 #define IEEE80211_AUTH_TIMEOUT         (HZ / 5)
+#define IEEE80211_AUTH_TIMEOUT_LONG    (HZ / 2)
 #define IEEE80211_AUTH_TIMEOUT_SHORT   (HZ / 10)
 #define IEEE80211_AUTH_MAX_TRIES       3
 #define IEEE80211_AUTH_WAIT_ASSOC      (HZ * 5)
 #define IEEE80211_ASSOC_TIMEOUT                (HZ / 5)
+#define IEEE80211_ASSOC_TIMEOUT_LONG   (HZ / 2)
 #define IEEE80211_ASSOC_TIMEOUT_SHORT  (HZ / 10)
 #define IEEE80211_ASSOC_MAX_TRIES      3
 
@@ -209,8 +211,9 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
                             struct ieee80211_channel *channel,
                             const struct ieee80211_ht_operation *ht_oper,
                             const struct ieee80211_vht_operation *vht_oper,
-                            struct cfg80211_chan_def *chandef, bool verbose)
+                            struct cfg80211_chan_def *chandef, bool tracking)
 {
+       struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
        struct cfg80211_chan_def vht_chandef;
        u32 ht_cfreq, ret;
 
@@ -229,7 +232,7 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
        ht_cfreq = ieee80211_channel_to_frequency(ht_oper->primary_chan,
                                                  channel->band);
        /* check that channel matches the right operating channel */
-       if (channel->center_freq != ht_cfreq) {
+       if (!tracking && channel->center_freq != ht_cfreq) {
                /*
                 * It's possible that some APs are confused here;
                 * Netgear WNDR3700 sometimes reports 4 higher than
@@ -237,11 +240,10 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
                 * since we look at probe response/beacon data here
                 * it should be OK.
                 */
-               if (verbose)
-                       sdata_info(sdata,
-                                  "Wrong control channel: center-freq: %d ht-cfreq: %d ht->primary_chan: %d band: %d - Disabling HT\n",
-                                  channel->center_freq, ht_cfreq,
-                                  ht_oper->primary_chan, channel->band);
+               sdata_info(sdata,
+                          "Wrong control channel: center-freq: %d ht-cfreq: %d ht->primary_chan: %d band: %d - Disabling HT\n",
+                          channel->center_freq, ht_cfreq,
+                          ht_oper->primary_chan, channel->band);
                ret = IEEE80211_STA_DISABLE_HT | IEEE80211_STA_DISABLE_VHT;
                goto out;
        }
@@ -295,7 +297,7 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
                                channel->band);
                break;
        default:
-               if (verbose)
+               if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT))
                        sdata_info(sdata,
                                   "AP VHT operation IE has invalid channel width (%d), disable VHT\n",
                                   vht_oper->chan_width);
@@ -304,7 +306,7 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
        }
 
        if (!cfg80211_chandef_valid(&vht_chandef)) {
-               if (verbose)
+               if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT))
                        sdata_info(sdata,
                                   "AP VHT information is invalid, disable VHT\n");
                ret = IEEE80211_STA_DISABLE_VHT;
@@ -317,7 +319,7 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
        }
 
        if (!cfg80211_chandef_compatible(chandef, &vht_chandef)) {
-               if (verbose)
+               if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT))
                        sdata_info(sdata,
                                   "AP VHT information doesn't match HT, disable VHT\n");
                ret = IEEE80211_STA_DISABLE_VHT;
@@ -333,18 +335,27 @@ out:
        if (ret & IEEE80211_STA_DISABLE_VHT)
                vht_chandef = *chandef;
 
+       /*
+        * Ignore the DISABLED flag when we're already connected and only
+        * tracking the APs beacon for bandwidth changes - otherwise we
+        * might get disconnected here if we connect to an AP, update our
+        * regulatory information based on the AP's country IE and the
+        * information we have is wrong/outdated and disables the channel
+        * that we're actually using for the connection to the AP.
+        */
        while (!cfg80211_chandef_usable(sdata->local->hw.wiphy, chandef,
-                                       IEEE80211_CHAN_DISABLED)) {
+                                       tracking ? 0 :
+                                                  IEEE80211_CHAN_DISABLED)) {
                if (WARN_ON(chandef->width == NL80211_CHAN_WIDTH_20_NOHT)) {
                        ret = IEEE80211_STA_DISABLE_HT |
                              IEEE80211_STA_DISABLE_VHT;
-                       goto out;
+                       break;
                }
 
                ret |= chandef_downgrade(chandef);
        }
 
-       if (chandef->width != vht_chandef.width && verbose)
+       if (chandef->width != vht_chandef.width && !tracking)
                sdata_info(sdata,
                           "capabilities/regulatory prevented using AP HT/VHT configuration, downgraded\n");
 
@@ -384,7 +395,7 @@ static int ieee80211_config_bw(struct ieee80211_sub_if_data *sdata,
 
        /* calculate new channel (type) based on HT/VHT operation IEs */
        flags = ieee80211_determine_chantype(sdata, sband, chan, ht_oper,
-                                            vht_oper, &chandef, false);
+                                            vht_oper, &chandef, true);
 
        /*
         * Downgrade the new channel if we associated with restricted
@@ -3394,10 +3405,13 @@ static int ieee80211_probe_auth(struct ieee80211_sub_if_data *sdata)
 
        if (tx_flags == 0) {
                auth_data->timeout = jiffies + IEEE80211_AUTH_TIMEOUT;
-               ifmgd->auth_data->timeout_started = true;
+               auth_data->timeout_started = true;
                run_again(sdata, auth_data->timeout);
        } else {
-               auth_data->timeout_started = false;
+               auth_data->timeout =
+                       round_jiffies_up(jiffies + IEEE80211_AUTH_TIMEOUT_LONG);
+               auth_data->timeout_started = true;
+               run_again(sdata, auth_data->timeout);
        }
 
        return 0;
@@ -3434,7 +3448,11 @@ static int ieee80211_do_assoc(struct ieee80211_sub_if_data *sdata)
                assoc_data->timeout_started = true;
                run_again(sdata, assoc_data->timeout);
        } else {
-               assoc_data->timeout_started = false;
+               assoc_data->timeout =
+                       round_jiffies_up(jiffies +
+                                        IEEE80211_ASSOC_TIMEOUT_LONG);
+               assoc_data->timeout_started = true;
+               run_again(sdata, assoc_data->timeout);
        }
 
        return 0;
@@ -3829,7 +3847,7 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata,
        ifmgd->flags |= ieee80211_determine_chantype(sdata, sband,
                                                     cbss->channel,
                                                     ht_oper, vht_oper,
-                                                    &chandef, true);
+                                                    &chandef, false);
 
        sdata->needed_rx_chains = min(ieee80211_ht_vht_rx_chains(sdata, cbss),
                                      local->rx_chains);
index 7dcc376eea5f9205d1abf76f62a8d58eb54bc788..2f8010707d015dc62348ca758390c194388d7a8b 100644 (file)
@@ -526,7 +526,7 @@ static bool tcp_in_window(const struct nf_conn *ct,
        const struct nf_conntrack_tuple *tuple = &ct->tuplehash[dir].tuple;
        __u32 seq, ack, sack, end, win, swin;
        s16 receiver_offset;
-       bool res;
+       bool res, in_recv_win;
 
        /*
         * Get the required data from the packet.
@@ -649,14 +649,18 @@ static bool tcp_in_window(const struct nf_conn *ct,
                 receiver->td_end, receiver->td_maxend, receiver->td_maxwin,
                 receiver->td_scale);
 
+       /* Is the ending sequence in the receive window (if available)? */
+       in_recv_win = !receiver->td_maxwin ||
+                     after(end, sender->td_end - receiver->td_maxwin - 1);
+
        pr_debug("tcp_in_window: I=%i II=%i III=%i IV=%i\n",
                 before(seq, sender->td_maxend + 1),
-                after(end, sender->td_end - receiver->td_maxwin - 1),
+                (in_recv_win ? 1 : 0),
                 before(sack, receiver->td_end + 1),
                 after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1));
 
        if (before(seq, sender->td_maxend + 1) &&
-           after(end, sender->td_end - receiver->td_maxwin - 1) &&
+           in_recv_win &&
            before(sack, receiver->td_end + 1) &&
            after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1)) {
                /*
@@ -725,7 +729,7 @@ static bool tcp_in_window(const struct nf_conn *ct,
                        nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL,
                        "nf_ct_tcp: %s ",
                        before(seq, sender->td_maxend + 1) ?
-                       after(end, sender->td_end - receiver->td_maxwin - 1) ?
+                       in_recv_win ?
                        before(sack, receiver->td_end + 1) ?
                        after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1) ? "BUG"
                        : "ACK is under the lower bound (possible overly delayed ACK)"
index 962e9792e3179997db98a448a76fc909432d841f..d92cc317bf8b25a0c371b770688c860169c9dea8 100644 (file)
@@ -419,6 +419,7 @@ __build_packet_message(struct nfnl_log_net *log,
        nfmsg->version = NFNETLINK_V0;
        nfmsg->res_id = htons(inst->group_num);
 
+       memset(&pmsg, 0, sizeof(pmsg));
        pmsg.hw_protocol        = skb->protocol;
        pmsg.hook               = hooknum;
 
@@ -498,7 +499,10 @@ __build_packet_message(struct nfnl_log_net *log,
        if (indev && skb->dev &&
            skb->mac_header != skb->network_header) {
                struct nfulnl_msg_packet_hw phw;
-               int len = dev_parse_header(skb, phw.hw_addr);
+               int len;
+
+               memset(&phw, 0, sizeof(phw));
+               len = dev_parse_header(skb, phw.hw_addr);
                if (len > 0) {
                        phw.hw_addrlen = htons(len);
                        if (nla_put(inst->skb, NFULA_HWADDR, sizeof(phw), &phw))
index 971ea145ab3ea33a2d42ad0cf66e7025c1457832..8a703c3dd318b660c5e02b326495dbbc7b9f5b3e 100644 (file)
@@ -463,7 +463,10 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue,
        if (indev && entskb->dev &&
            entskb->mac_header != entskb->network_header) {
                struct nfqnl_msg_packet_hw phw;
-               int len = dev_parse_header(entskb, phw.hw_addr);
+               int len;
+
+               memset(&phw, 0, sizeof(phw));
+               len = dev_parse_header(entskb, phw.hw_addr);
                if (len) {
                        phw.hw_addrlen = htons(len);
                        if (nla_put(skb, NFQA_HWADDR, sizeof(phw), &phw))
index 7011c71646f0266eb75c856bc49fea7b5030bd52..6113cc7efffcd782cee9d8c2e07ffc399f6e67c4 100644 (file)
@@ -52,7 +52,8 @@ tcpmss_mangle_packet(struct sk_buff *skb,
 {
        const struct xt_tcpmss_info *info = par->targinfo;
        struct tcphdr *tcph;
-       unsigned int tcplen, i;
+       int len, tcp_hdrlen;
+       unsigned int i;
        __be16 oldval;
        u16 newmss;
        u8 *opt;
@@ -64,11 +65,14 @@ tcpmss_mangle_packet(struct sk_buff *skb,
        if (!skb_make_writable(skb, skb->len))
                return -1;
 
-       tcplen = skb->len - tcphoff;
+       len = skb->len - tcphoff;
+       if (len < (int)sizeof(struct tcphdr))
+               return -1;
+
        tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff);
+       tcp_hdrlen = tcph->doff * 4;
 
-       /* Header cannot be larger than the packet */
-       if (tcplen < tcph->doff*4)
+       if (len < tcp_hdrlen)
                return -1;
 
        if (info->mss == XT_TCPMSS_CLAMP_PMTU) {
@@ -87,9 +91,8 @@ tcpmss_mangle_packet(struct sk_buff *skb,
                newmss = info->mss;
 
        opt = (u_int8_t *)tcph;
-       for (i = sizeof(struct tcphdr); i < tcph->doff*4; i += optlen(opt, i)) {
-               if (opt[i] == TCPOPT_MSS && tcph->doff*4 - i >= TCPOLEN_MSS &&
-                   opt[i+1] == TCPOLEN_MSS) {
+       for (i = sizeof(struct tcphdr); i <= tcp_hdrlen - TCPOLEN_MSS; i += optlen(opt, i)) {
+               if (opt[i] == TCPOPT_MSS && opt[i+1] == TCPOLEN_MSS) {
                        u_int16_t oldmss;
 
                        oldmss = (opt[i+2] << 8) | opt[i+3];
@@ -112,9 +115,10 @@ tcpmss_mangle_packet(struct sk_buff *skb,
        }
 
        /* There is data after the header so the option can't be added
-          without moving it, and doing so may make the SYN packet
-          itself too large. Accept the packet unmodified instead. */
-       if (tcplen > tcph->doff*4)
+        * without moving it, and doing so may make the SYN packet
+        * itself too large. Accept the packet unmodified instead.
+        */
+       if (len > tcp_hdrlen)
                return 0;
 
        /*
@@ -143,10 +147,10 @@ tcpmss_mangle_packet(struct sk_buff *skb,
                newmss = min(newmss, (u16)1220);
 
        opt = (u_int8_t *)tcph + sizeof(struct tcphdr);
-       memmove(opt + TCPOLEN_MSS, opt, tcplen - sizeof(struct tcphdr));
+       memmove(opt + TCPOLEN_MSS, opt, len - sizeof(struct tcphdr));
 
        inet_proto_csum_replace2(&tcph->check, skb,
-                                htons(tcplen), htons(tcplen + TCPOLEN_MSS), 1);
+                                htons(len), htons(len + TCPOLEN_MSS), 1);
        opt[0] = TCPOPT_MSS;
        opt[1] = TCPOLEN_MSS;
        opt[2] = (newmss & 0xff00) >> 8;
index b68fa191710fe02bdb1b09f825ee6330c9bf570b..625fa1d636a01ccacb43c2fcae902d65a466b9db 100644 (file)
@@ -38,7 +38,7 @@ tcpoptstrip_mangle_packet(struct sk_buff *skb,
        struct tcphdr *tcph;
        u_int16_t n, o;
        u_int8_t *opt;
-       int len;
+       int len, tcp_hdrlen;
 
        /* This is a fragment, no TCP header is available */
        if (par->fragoff != 0)
@@ -52,7 +52,9 @@ tcpoptstrip_mangle_packet(struct sk_buff *skb,
                return NF_DROP;
 
        tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff);
-       if (tcph->doff * 4 > len)
+       tcp_hdrlen = tcph->doff * 4;
+
+       if (len < tcp_hdrlen)
                return NF_DROP;
 
        opt  = (u_int8_t *)tcph;
@@ -61,10 +63,10 @@ tcpoptstrip_mangle_packet(struct sk_buff *skb,
         * Walk through all TCP options - if we find some option to remove,
         * set all octets to %TCPOPT_NOP and adjust checksum.
         */
-       for (i = sizeof(struct tcphdr); i < tcp_hdrlen(skb); i += optl) {
+       for (i = sizeof(struct tcphdr); i < tcp_hdrlen - 1; i += optl) {
                optl = optlen(opt, i);
 
-               if (i + optl > tcp_hdrlen(skb))
+               if (i + optl > tcp_hdrlen)
                        break;
 
                if (!tcpoptstrip_test_bit(info->strip_bmap, opt[i]))
index 512718adb0d59df5120e047c69c973556d1c6fb6..f85f8a2ad6cf002fa438bef15597496897d00483 100644 (file)
@@ -789,6 +789,10 @@ static int ctrl_dumpfamily(struct sk_buff *skb, struct netlink_callback *cb)
        struct net *net = sock_net(skb->sk);
        int chains_to_skip = cb->args[0];
        int fams_to_skip = cb->args[1];
+       bool need_locking = chains_to_skip || fams_to_skip;
+
+       if (need_locking)
+               genl_lock();
 
        for (i = chains_to_skip; i < GENL_FAM_TAB_SIZE; i++) {
                n = 0;
@@ -810,6 +814,9 @@ errout:
        cb->args[0] = i;
        cb->args[1] = n;
 
+       if (need_locking)
+               genl_unlock();
+
        return skb->len;
 }
 
index 22c5f399f1cf8119f13064559099ad7c30aae93d..ab101f715447921b01829c493d1cdef195b4400f 100644 (file)
@@ -535,6 +535,7 @@ int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb)
 {
        struct sw_flow_actions *acts = rcu_dereference(OVS_CB(skb)->flow->sf_acts);
 
+       OVS_CB(skb)->tun_key = NULL;
        return do_execute_actions(dp, skb, acts->actions,
                                         acts->actions_len, false);
 }
index f7e3a0d84c40488cd6744dce31c90bd0d942bd3b..f2ed7600084e896c34d7f5c137a286fd3a05b986 100644 (file)
@@ -2076,9 +2076,6 @@ static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info)
        ovs_notify(reply, info, &ovs_dp_vport_multicast_group);
        return 0;
 
-       rtnl_unlock();
-       return 0;
-
 exit_free:
        kfree_skb(reply);
 exit_unlock:
index 5c519b121e1be0ba26c40e136ace7b8eba42f62e..1aa84dc58777b589f80f84f04554a981502bed2c 100644 (file)
@@ -240,7 +240,7 @@ static struct flex_array *alloc_buckets(unsigned int n_buckets)
        struct flex_array *buckets;
        int i, err;
 
-       buckets = flex_array_alloc(sizeof(struct hlist_head *),
+       buckets = flex_array_alloc(sizeof(struct hlist_head),
                                   n_buckets, GFP_KERNEL);
        if (!buckets)
                return NULL;
index 281c1bded1f60f94934e406c9cafe076e4e2706d..51b968d3febb477be1e4183c8a94258c97fdcd18 100644 (file)
@@ -285,6 +285,45 @@ static struct Qdisc_ops *qdisc_lookup_ops(struct nlattr *kind)
        return q;
 }
 
+/* The linklayer setting were not transferred from iproute2, in older
+ * versions, and the rate tables lookup systems have been dropped in
+ * the kernel. To keep backward compatible with older iproute2 tc
+ * utils, we detect the linklayer setting by detecting if the rate
+ * table were modified.
+ *
+ * For linklayer ATM table entries, the rate table will be aligned to
+ * 48 bytes, thus some table entries will contain the same value.  The
+ * mpu (min packet unit) is also encoded into the old rate table, thus
+ * starting from the mpu, we find low and high table entries for
+ * mapping this cell.  If these entries contain the same value, when
+ * the rate tables have been modified for linklayer ATM.
+ *
+ * This is done by rounding mpu to the nearest 48 bytes cell/entry,
+ * and then roundup to the next cell, calc the table entry one below,
+ * and compare.
+ */
+static __u8 __detect_linklayer(struct tc_ratespec *r, __u32 *rtab)
+{
+       int low       = roundup(r->mpu, 48);
+       int high      = roundup(low+1, 48);
+       int cell_low  = low >> r->cell_log;
+       int cell_high = (high >> r->cell_log) - 1;
+
+       /* rtab is too inaccurate at rates > 100Mbit/s */
+       if ((r->rate > (100000000/8)) || (rtab[0] == 0)) {
+               pr_debug("TC linklayer: Giving up ATM detection\n");
+               return TC_LINKLAYER_ETHERNET;
+       }
+
+       if ((cell_high > cell_low) && (cell_high < 256)
+           && (rtab[cell_low] == rtab[cell_high])) {
+               pr_debug("TC linklayer: Detected ATM, low(%d)=high(%d)=%u\n",
+                        cell_low, cell_high, rtab[cell_high]);
+               return TC_LINKLAYER_ATM;
+       }
+       return TC_LINKLAYER_ETHERNET;
+}
+
 static struct qdisc_rate_table *qdisc_rtab_list;
 
 struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, struct nlattr *tab)
@@ -308,6 +347,8 @@ struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, struct nlattr *ta
                rtab->rate = *r;
                rtab->refcnt = 1;
                memcpy(rtab->data, nla_data(tab), 1024);
+               if (r->linklayer == TC_LINKLAYER_UNAWARE)
+                       r->linklayer = __detect_linklayer(r, rtab->data);
                rtab->next = qdisc_rtab_list;
                qdisc_rtab_list = rtab;
        }
index 4626cef4b76ea631e32d082d34828ffd2e465459..48be3d5c0d9246117c03b0174da16a637129640c 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/rcupdate.h>
 #include <linux/list.h>
 #include <linux/slab.h>
+#include <linux/if_vlan.h>
 #include <net/sch_generic.h>
 #include <net/pkt_sched.h>
 #include <net/dst.h>
@@ -207,15 +208,19 @@ void __qdisc_run(struct Qdisc *q)
 
 unsigned long dev_trans_start(struct net_device *dev)
 {
-       unsigned long val, res = dev->trans_start;
+       unsigned long val, res;
        unsigned int i;
 
+       if (is_vlan_dev(dev))
+               dev = vlan_dev_real_dev(dev);
+       res = dev->trans_start;
        for (i = 0; i < dev->num_tx_queues; i++) {
                val = netdev_get_tx_queue(dev, i)->trans_start;
                if (val && time_after(val, res))
                        res = val;
        }
        dev->trans_start = res;
+
        return res;
 }
 EXPORT_SYMBOL(dev_trans_start);
@@ -904,6 +909,7 @@ void psched_ratecfg_precompute(struct psched_ratecfg *r,
        memset(r, 0, sizeof(*r));
        r->overhead = conf->overhead;
        r->rate_bytes_ps = conf->rate;
+       r->linklayer = (conf->linklayer & TC_LINKLAYER_MASK);
        r->mult = 1;
        /*
         * The deal here is to replace a divide by a reciprocal one
index 45e751527dfcc57ed533c4b259bbf5c56e093cfc..c2178b15ca6e0bb0ed60eb885cc94177fd5b8e95 100644 (file)
@@ -1329,6 +1329,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
        struct htb_sched *q = qdisc_priv(sch);
        struct htb_class *cl = (struct htb_class *)*arg, *parent;
        struct nlattr *opt = tca[TCA_OPTIONS];
+       struct qdisc_rate_table *rtab = NULL, *ctab = NULL;
        struct nlattr *tb[TCA_HTB_MAX + 1];
        struct tc_htb_opt *hopt;
 
@@ -1350,6 +1351,18 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
        if (!hopt->rate.rate || !hopt->ceil.rate)
                goto failure;
 
+       /* Keeping backward compatible with rate_table based iproute2 tc */
+       if (hopt->rate.linklayer == TC_LINKLAYER_UNAWARE) {
+               rtab = qdisc_get_rtab(&hopt->rate, tb[TCA_HTB_RTAB]);
+               if (rtab)
+                       qdisc_put_rtab(rtab);
+       }
+       if (hopt->ceil.linklayer == TC_LINKLAYER_UNAWARE) {
+               ctab = qdisc_get_rtab(&hopt->ceil, tb[TCA_HTB_CTAB]);
+               if (ctab)
+                       qdisc_put_rtab(ctab);
+       }
+
        if (!cl) {              /* new class */
                struct Qdisc *new_q;
                int prio;
index bce5b79662a62b8bdc5e7c895a6ea4bcd147ed86..ab67efc64b2490efba497e93db105bf9ed25b117 100644 (file)
@@ -846,12 +846,12 @@ void sctp_assoc_control_transport(struct sctp_association *asoc,
                else
                        spc_state = SCTP_ADDR_AVAILABLE;
                /* Don't inform ULP about transition from PF to
-                * active state and set cwnd to 1, see SCTP
+                * active state and set cwnd to 1 MTU, see SCTP
                 * Quick failover draft section 5.1, point 5
                 */
                if (transport->state == SCTP_PF) {
                        ulp_notify = false;
-                       transport->cwnd = 1;
+                       transport->cwnd = asoc->pathmtu;
                }
                transport->state = SCTP_ACTIVE;
                break;
index bdbbc3fd7c14e9fa2c31d4faf7de9b7b64540292..8fdd16046d668b0c6cf927e5e87754f2ebfaf583 100644 (file)
@@ -181,12 +181,12 @@ static void sctp_transport_destroy(struct sctp_transport *transport)
                return;
        }
 
-       call_rcu(&transport->rcu, sctp_transport_destroy_rcu);
-
        sctp_packet_free(&transport->packet);
 
        if (transport->asoc)
                sctp_association_put(transport->asoc);
+
+       call_rcu(&transport->rcu, sctp_transport_destroy_rcu);
 }
 
 /* Start T3_rtx timer if it is not already running and update the heartbeat
index cb29ef7ba2f0b1d285c079d87ddf8c5a07c118af..609c30c808165191a69899e750439666addc29f8 100644 (file)
@@ -460,6 +460,7 @@ static void bearer_disable(struct tipc_bearer *b_ptr)
 {
        struct tipc_link *l_ptr;
        struct tipc_link *temp_l_ptr;
+       struct tipc_link_req *temp_req;
 
        pr_info("Disabling bearer <%s>\n", b_ptr->name);
        spin_lock_bh(&b_ptr->lock);
@@ -468,9 +469,13 @@ static void bearer_disable(struct tipc_bearer *b_ptr)
        list_for_each_entry_safe(l_ptr, temp_l_ptr, &b_ptr->links, link_list) {
                tipc_link_delete(l_ptr);
        }
-       if (b_ptr->link_req)
-               tipc_disc_delete(b_ptr->link_req);
+       temp_req = b_ptr->link_req;
+       b_ptr->link_req = NULL;
        spin_unlock_bh(&b_ptr->lock);
+
+       if (temp_req)
+               tipc_disc_delete(temp_req);
+
        memset(b_ptr, 0, sizeof(struct tipc_bearer));
 }
 
index 593071dabd1cc7477169b24bc31979d8590465b6..4d9334683f8409c3e064c44a25235ea48e65b38b 100644 (file)
@@ -347,7 +347,7 @@ void vsock_for_each_connected_socket(void (*fn)(struct sock *sk))
        for (i = 0; i < ARRAY_SIZE(vsock_connected_table); i++) {
                struct vsock_sock *vsk;
                list_for_each_entry(vsk, &vsock_connected_table[i],
-                                   connected_table);
+                                   connected_table)
                        fn(sk_vsock(vsk));
        }
 
index 4f9f216665e9d4d6753172f3c14d7bbdd3f53154..a8c29fa4f1b3b90539f5370b9084056c5774812a 100644 (file)
@@ -765,6 +765,7 @@ void cfg80211_leave(struct cfg80211_registered_device *rdev,
                cfg80211_leave_mesh(rdev, dev);
                break;
        case NL80211_IFTYPE_AP:
+       case NL80211_IFTYPE_P2P_GO:
                cfg80211_stop_ap(rdev, dev);
                break;
        default:
index 25d217d90807f05f6a56945cb89dcd7b3314a53d..3fcba69817e579244e836b2e2d39a1aab14cc210 100644 (file)
@@ -441,10 +441,12 @@ static int nl80211_prepare_wdev_dump(struct sk_buff *skb,
                        goto out_unlock;
                }
                *rdev = wiphy_to_dev((*wdev)->wiphy);
-               cb->args[0] = (*rdev)->wiphy_idx;
+               /* 0 is the first index - add 1 to parse only once */
+               cb->args[0] = (*rdev)->wiphy_idx + 1;
                cb->args[1] = (*wdev)->identifier;
        } else {
-               struct wiphy *wiphy = wiphy_idx_to_wiphy(cb->args[0]);
+               /* subtract the 1 again here */
+               struct wiphy *wiphy = wiphy_idx_to_wiphy(cb->args[0] - 1);
                struct wireless_dev *tmp;
 
                if (!wiphy) {