]> Pileus Git - ~andy/linux/commitdiff
Merge tag 'staging-3.15-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh...
authorLinus Torvalds <torvalds@linux-foundation.org>
Mon, 10 Mar 2014 01:58:27 +0000 (18:58 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Mon, 10 Mar 2014 01:58:27 +0000 (18:58 -0700)
Pull staging driver tree fix from Greg KH:
 "Here is a single staging driver fix for your tree.

  It resolves an issue with arbritary writes to memory if a specific
  driver is loaded"

* tag 'staging-3.15-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging:
  staging/cxt1e1/linux.c: Correct arbitrary memory write in c4_ioctl()

274 files changed:
Documentation/device-mapper/cache.txt
Documentation/device-mapper/thin-provisioning.txt
Documentation/devicetree/bindings/clock/renesas,cpg-mstp-clocks.txt
Documentation/devicetree/bindings/net/opencores-ethoc.txt [new file with mode: 0644]
Documentation/networking/can.txt
MAINTAINERS
arch/arc/mm/cache_arc700.c
arch/arm/Kconfig
arch/arm/boot/compressed/.gitignore
arch/arm/boot/dts/keystone-clocks.dtsi
arch/arm/include/asm/memory.h
arch/arm/kernel/head-common.S
arch/arm/kernel/head.S
arch/arm/mach-sa1100/include/mach/collie.h
arch/arm/mm/dump.c
arch/c6x/include/asm/cache.h
arch/powerpc/kernel/process.c
arch/powerpc/kernel/reloc_64.S
arch/sh/include/cpu-sh2/cpu/cache.h
arch/sh/include/cpu-sh2a/cpu/cache.h
arch/sh/include/cpu-sh3/cpu/cache.h
arch/sh/include/cpu-sh4/cpu/cache.h
arch/sh/kernel/cpu/init.c
arch/sh/mm/cache-debugfs.c
arch/sh/mm/cache-sh2.c
arch/sh/mm/cache-sh2a.c
arch/sh/mm/cache-sh4.c
arch/sh/mm/cache-shx3.c
arch/sh/mm/cache.c
arch/x86/include/asm/efi.h
arch/x86/kernel/head_32.S
arch/x86/kernel/head_64.S
arch/x86/kernel/setup.c
arch/x86/mm/fault.c
arch/x86/platform/efi/efi.c
block/blk-exec.c
block/blk-flush.c
block/blk-mq-cpu.c
block/blk-mq.c
block/blk-mq.h
drivers/acpi/ec.c
drivers/acpi/resource.c
drivers/ata/libata-core.c
drivers/block/aoe/aoecmd.c
drivers/block/mtip32xx/mtip32xx.h
drivers/block/zram/zram_drv.c
drivers/clk/at91/clk-master.c
drivers/clk/clk-nomadik.c
drivers/clk/clk.c
drivers/clk/keystone/gate.c
drivers/clk/mvebu/armada-370.c
drivers/clk/mvebu/armada-xp.c
drivers/clk/mvebu/dove.c
drivers/clk/mvebu/kirkwood.c
drivers/clk/shmobile/clk-rcar-gen2.c
drivers/clk/tegra/clk-divider.c
drivers/clk/tegra/clk-id.h
drivers/clk/tegra/clk-tegra-periph.c
drivers/clk/tegra/clk-tegra-super-gen4.c
drivers/clk/tegra/clk-tegra114.c
drivers/clk/tegra/clk-tegra124.c
drivers/clk/tegra/clk-tegra20.c
drivers/cpufreq/cpufreq.c
drivers/firewire/core-device.c
drivers/firewire/net.c
drivers/firewire/ohci.c
drivers/firewire/sbp2.c
drivers/gpu/drm/armada/armada_drv.c
drivers/gpu/drm/bochs/Kconfig
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_gem_stolen.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_hdmi.c
drivers/gpu/drm/i915/intel_panel.c
drivers/gpu/drm/i915/intel_pm.c
drivers/gpu/drm/radeon/atombios_encoders.c
drivers/gpu/drm/radeon/cik.c
drivers/gpu/drm/radeon/evergreen.c
drivers/gpu/drm/radeon/evergreen_smc.h
drivers/gpu/drm/radeon/ni.c
drivers/gpu/drm/radeon/r100.c
drivers/gpu/drm/radeon/r300.c
drivers/gpu/drm/radeon/r420.c
drivers/gpu/drm/radeon/r520.c
drivers/gpu/drm/radeon/r600.c
drivers/gpu/drm/radeon/radeon_device.c
drivers/gpu/drm/radeon/radeon_ttm.c
drivers/gpu/drm/radeon/rs400.c
drivers/gpu/drm/radeon/rs600.c
drivers/gpu/drm/radeon/rs690.c
drivers/gpu/drm/radeon/rv515.c
drivers/gpu/drm/radeon/rv770.c
drivers/gpu/drm/radeon/si.c
drivers/infiniband/hw/mlx4/main.c
drivers/infiniband/hw/mlx5/main.c
drivers/infiniband/ulp/isert/ib_isert.c
drivers/infiniband/ulp/isert/ib_isert.h
drivers/md/Kconfig
drivers/md/dm-cache-policy-mq.c
drivers/md/dm-snap-persistent.c
drivers/md/dm-thin-metadata.c
drivers/md/dm-thin-metadata.h
drivers/md/dm-thin.c
drivers/md/persistent-data/Kconfig
drivers/md/persistent-data/dm-space-map-metadata.c
drivers/net/bonding/bond_3ad.c
drivers/net/bonding/bond_main.c
drivers/net/bonding/bonding.h
drivers/net/can/flexcan.c
drivers/net/ethernet/broadcom/b44.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
drivers/net/ethernet/broadcom/tg3.c
drivers/net/ethernet/broadcom/tg3.h
drivers/net/ethernet/brocade/bna/bnad.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
drivers/net/ethernet/emulex/benet/be.h
drivers/net/ethernet/emulex/benet/be_main.c
drivers/net/ethernet/freescale/fec_main.c
drivers/net/ethernet/mellanox/mlx4/mlx4.h
drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
drivers/net/ethernet/mellanox/mlx5/core/main.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
drivers/net/ethernet/realtek/r8169.c
drivers/net/ethernet/sfc/ptp.c
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/ti/cpsw.c
drivers/net/macvlan.c
drivers/net/phy/phy_device.c
drivers/net/tun.c
drivers/net/usb/ax88179_178a.c
drivers/net/veth.c
drivers/net/virtio_net.c
drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
drivers/net/wireless/ath/ath9k/hw.c
drivers/net/wireless/ath/ath9k/recv.c
drivers/net/wireless/ath/ath9k/xmit.c
drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
drivers/net/wireless/hostap/hostap_ap.c
drivers/net/wireless/iwlwifi/dvm/sta.c
drivers/net/wireless/iwlwifi/dvm/tx.c
drivers/net/wireless/iwlwifi/mvm/mvm.h
drivers/net/wireless/iwlwifi/mvm/tx.c
drivers/net/wireless/libertas/cfg.c
drivers/net/wireless/mwifiex/pcie.c
drivers/net/wireless/mwifiex/usb.c
drivers/net/wireless/mwifiex/wmm.c
drivers/net/xen-netfront.c
drivers/pinctrl/Kconfig
drivers/pinctrl/pinctrl-sunxi.c
drivers/pinctrl/pinctrl-sunxi.h
drivers/pinctrl/sh-pfc/pfc-r8a7791.c
drivers/pinctrl/sirf/pinctrl-sirf.c
drivers/rapidio/devices/tsi721.h
drivers/rapidio/devices/tsi721_dma.c
drivers/regulator/core.c
drivers/rtc/rtc-s3c.c
drivers/s390/net/qeth_core_main.c
drivers/s390/net/qeth_l2_main.c
drivers/s390/net/qeth_l3_main.c
drivers/spi/spi-ath79.c
drivers/spi/spi-atmel.c
drivers/spi/spi-coldfire-qspi.c
drivers/spi/spi-fsl-dspi.c
drivers/spi/spi-imx.c
drivers/spi/spi-topcliff-pch.c
drivers/target/iscsi/iscsi_target.c
drivers/target/iscsi/iscsi_target_erl2.c
drivers/target/iscsi/iscsi_target_tpg.c
drivers/target/target_core_sbc.c
drivers/thermal/Kconfig
drivers/thermal/thermal_core.c
drivers/thermal/x86_pkg_temp_thermal.c
drivers/vfio/vfio_iommu_type1.c
fs/bio-integrity.c
fs/hfsplus/options.c
fs/ocfs2/quota_global.c
fs/ocfs2/quota_local.c
fs/proc/page.c
include/dt-bindings/clock/tegra124-car.h
include/linux/blk-mq.h
include/linux/firewire.h
include/linux/huge_mm.h
include/linux/mm.h
include/linux/skbuff.h
include/linux/tracepoint.h
include/net/ip_tunnels.h
include/net/tcp.h
include/net/xfrm.h
include/target/iscsi/iscsi_transport.h
kernel/cpuset.c
kernel/irq/irqdomain.c
kernel/irq/manage.c
kernel/sched/cpudeadline.c
kernel/sched/deadline.c
kernel/sched/fair.c
kernel/sched/rt.c
kernel/trace/trace_events.c
kernel/tracepoint.c
lib/dma-debug.c
lib/radix-tree.c
mm/huge_memory.c
mm/ksm.c
mm/memcontrol.c
mm/memory-failure.c
mm/page_alloc.c
mm/swap.c
net/can/raw.c
net/core/neighbour.c
net/core/skbuff.c
net/hsr/hsr_framereg.c
net/ipv4/af_inet.c
net/ipv4/ip_output.c
net/ipv4/ip_tunnel.c
net/ipv4/ip_tunnel_core.c
net/ipv4/netfilter/nf_nat_snmp_basic.c
net/ipv4/tcp.c
net/ipv4/tcp_cong.c
net/ipv4/tcp_input.c
net/ipv4/tcp_output.c
net/ipv6/Kconfig
net/ipv6/exthdrs_core.c
net/ipv6/ip6_offload.c
net/ipv6/ip6_output.c
net/ipv6/ping.c
net/ipv6/sit.c
net/ipv6/udp_offload.c
net/mac80211/ieee80211_i.h
net/mac80211/mlme.c
net/mac80211/rx.c
net/mac80211/sta_info.c
net/mac80211/sta_info.h
net/mac80211/tx.c
net/mac80211/util.c
net/mac80211/wme.c
net/netfilter/nf_conntrack_netlink.c
net/netfilter/nf_nat_core.c
net/netfilter/nft_meta.c
net/netfilter/nft_payload.c
net/netfilter/nft_reject_inet.c
net/netlink/af_netlink.c
net/nfc/nci/core.c
net/sched/sch_tbf.c
net/sctp/associola.c
net/sctp/sm_sideeffect.c
net/sctp/sm_statefuns.c
net/tipc/bearer.c
net/tipc/config.c
net/tipc/core.c
net/tipc/core.h
net/tipc/name_table.c
net/tipc/netlink.c
net/tipc/ref.c
net/tipc/server.c
net/tipc/server.h
net/tipc/socket.c
net/wireless/reg.c
net/xfrm/xfrm_policy.c
net/xfrm/xfrm_state.c
net/xfrm/xfrm_user.c
scripts/gen_initramfs_list.sh
scripts/kallsyms.c
scripts/mod/modpost.c
security/keys/keyring.c
sound/pci/hda/patch_analog.c
sound/pci/hda/patch_realtek.c
sound/usb/mixer.c
tools/lib/lockdep/Makefile
tools/lib/lockdep/preload.c
tools/lib/lockdep/run_tests.sh [changed mode: 0644->0755]
tools/lib/lockdep/uinclude/asm/hash.h [new file with mode: 0644]
tools/lib/lockdep/uinclude/linux/rcu.h

index e6b72d35515123ad0aff423999aeef359183bb3d..68c0f517c60edb1ab7a61e990720c7c8f097bb16 100644 (file)
@@ -124,12 +124,11 @@ the default being 204800 sectors (or 100MB).
 Updating on-disk metadata
 -------------------------
 
-On-disk metadata is committed every time a REQ_SYNC or REQ_FUA bio is
-written.  If no such requests are made then commits will occur every
-second.  This means the cache behaves like a physical disk that has a
-write cache (the same is true of the thin-provisioning target).  If
-power is lost you may lose some recent writes.  The metadata should
-always be consistent in spite of any crash.
+On-disk metadata is committed every time a FLUSH or FUA bio is written.
+If no such requests are made then commits will occur every second.  This
+means the cache behaves like a physical disk that has a volatile write
+cache.  If power is lost you may lose some recent writes.  The metadata
+should always be consistent in spite of any crash.
 
 The 'dirty' state for a cache block changes far too frequently for us
 to keep updating it on the fly.  So we treat it as a hint.  In normal
index 8a7a3d46e0daa00d6f87315648b7cb292e8c05b4..05a27e9442bd4e5c4d3e618a6a0594e55466b192 100644 (file)
@@ -116,6 +116,35 @@ Resuming a device with a new table itself triggers an event so the
 userspace daemon can use this to detect a situation where a new table
 already exceeds the threshold.
 
+A low water mark for the metadata device is maintained in the kernel and
+will trigger a dm event if free space on the metadata device drops below
+it.
+
+Updating on-disk metadata
+-------------------------
+
+On-disk metadata is committed every time a FLUSH or FUA bio is written.
+If no such requests are made then commits will occur every second.  This
+means the thin-provisioning target behaves like a physical disk that has
+a volatile write cache.  If power is lost you may lose some recent
+writes.  The metadata should always be consistent in spite of any crash.
+
+If data space is exhausted the pool will either error or queue IO
+according to the configuration (see: error_if_no_space).  If metadata
+space is exhausted or a metadata operation fails: the pool will error IO
+until the pool is taken offline and repair is performed to 1) fix any
+potential inconsistencies and 2) clear the flag that imposes repair.
+Once the pool's metadata device is repaired it may be resized, which
+will allow the pool to return to normal operation.  Note that if a pool
+is flagged as needing repair, the pool's data and metadata devices
+cannot be resized until repair is performed.  It should also be noted
+that when the pool's metadata space is exhausted the current metadata
+transaction is aborted.  Given that the pool will cache IO whose
+completion may have already been acknowledged to upper IO layers
+(e.g. filesystem) it is strongly suggested that consistency checks
+(e.g. fsck) be performed on those layers when repair of the pool is
+required.
+
 Thin provisioning
 -----------------
 
@@ -258,10 +287,9 @@ ii) Status
        should register for the event and then check the target's status.
 
     held metadata root:
-       The location, in sectors, of the metadata root that has been
+       The location, in blocks, of the metadata root that has been
        'held' for userspace read access.  '-' indicates there is no
-       held root.  This feature is not yet implemented so '-' is
-       always returned.
+       held root.
 
     discard_passdown|no_discard_passdown
        Whether or not discards are actually being passed down to the
index a6a352c2771e74da0064ec9ce93a87c44ce2fe07..5992dceec7af7d1e9d1ac8f329b831d48e409d87 100644 (file)
@@ -21,9 +21,9 @@ Required Properties:
     must appear in the same order as the output clocks.
   - #clock-cells: Must be 1
   - clock-output-names: The name of the clocks as free-form strings
-  - renesas,indices: Indices of the gate clocks into the group (0 to 31)
+  - renesas,clock-indices: Indices of the gate clocks into the group (0 to 31)
 
-The clocks, clock-output-names and renesas,indices properties contain one
+The clocks, clock-output-names and renesas,clock-indices properties contain one
 entry per gate clock. The MSTP groups are sparsely populated. Unimplemented
 gate clocks must not be declared.
 
diff --git a/Documentation/devicetree/bindings/net/opencores-ethoc.txt b/Documentation/devicetree/bindings/net/opencores-ethoc.txt
new file mode 100644 (file)
index 0000000..2dc127c
--- /dev/null
@@ -0,0 +1,22 @@
+* OpenCores MAC 10/100 Mbps
+
+Required properties:
+- compatible: Should be "opencores,ethoc".
+- reg: two memory regions (address and length),
+  first region is for the device registers and descriptor rings,
+  second is for the device packet memory.
+- interrupts: interrupt for the device.
+
+Optional properties:
+- clocks: phandle to refer to the clk used as per
+  Documentation/devicetree/bindings/clock/clock-bindings.txt
+
+Examples:
+
+       enet0: ethoc@fd030000 {
+               compatible = "opencores,ethoc";
+               reg = <0xfd030000 0x4000 0xfd800000 0x4000>;
+               interrupts = <1>;
+               local-mac-address = [00 50 c2 13 6f 00];
+               clocks = <&osc>;
+        };
index f3089d4235153b6fa03aaf5c8d09c81043bba996..0cbe6ec22d6ff1d6a3c739f05ae61f4dc24781ec 100644 (file)
@@ -554,12 +554,6 @@ solution for a couple of reasons:
   not specified in the struct can_frame and therefore it is only valid in
   CANFD_MTU sized CAN FD frames.
 
-  As long as the payload length is <=8 the received CAN frames from CAN FD
-  capable CAN devices can be received and read by legacy sockets too. When
-  user-generated CAN FD frames have a payload length <=8 these can be send
-  by legacy CAN network interfaces too. Sending CAN FD frames with payload
-  length > 8 to a legacy CAN network interface returns an -EMSGSIZE error.
-
   Implementation hint for new CAN applications:
 
   To build a CAN FD aware application use struct canfd_frame as basic CAN
index c6d0e93eff62d700443b93dff01120d36d2ce52a..b7befe758429293e12f6523a5b35fa546f19ac4b 100644 (file)
@@ -73,7 +73,8 @@ Descriptions of section entries:
        L: Mailing list that is relevant to this area
        W: Web-page with status/info
        Q: Patchwork web based patch tracking system site
-       T: SCM tree type and location.  Type is one of: git, hg, quilt, stgit, topgit.
+       T: SCM tree type and location.
+          Type is one of: git, hg, quilt, stgit, topgit
        S: Status, one of the following:
           Supported:   Someone is actually paid to look after this.
           Maintained:  Someone actually looks after it.
@@ -473,7 +474,7 @@ F:  net/rxrpc/af_rxrpc.c
 
 AGPGART DRIVER
 M:     David Airlie <airlied@linux.ie>
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6.git
+T:     git git://people.freedesktop.org/~airlied/linux (part of drm maint)
 S:     Maintained
 F:     drivers/char/agp/
 F:     include/linux/agp*
@@ -1612,11 +1613,11 @@ S:      Maintained
 F:     drivers/net/wireless/atmel*
 
 ATTO EXPRESSSAS SAS/SATA RAID SCSI DRIVER
-M:      Bradley Grove <linuxdrivers@attotech.com>
-L:      linux-scsi@vger.kernel.org
-W:      http://www.attotech.com
-S:      Supported
-F:      drivers/scsi/esas2r
+M:     Bradley Grove <linuxdrivers@attotech.com>
+L:     linux-scsi@vger.kernel.org
+W:     http://www.attotech.com
+S:     Supported
+F:     drivers/scsi/esas2r
 
 AUDIT SUBSYSTEM
 M:     Eric Paris <eparis@redhat.com>
@@ -2159,7 +2160,7 @@ F:        Documentation/zh_CN/
 
 CHIPIDEA USB HIGH SPEED DUAL ROLE CONTROLLER
 M:     Peter Chen <Peter.Chen@freescale.com>
-T:     git://github.com/hzpeterchen/linux-usb.git
+T:     git git://github.com/hzpeterchen/linux-usb.git
 L:     linux-usb@vger.kernel.org
 S:     Maintained
 F:     drivers/usb/chipidea/
@@ -2179,9 +2180,9 @@ S:        Supported
 F:     drivers/net/ethernet/cisco/enic/
 
 CISCO VIC LOW LATENCY NIC DRIVER
-M:      Upinder Malhi <umalhi@cisco.com>
-S:      Supported
-F:      drivers/infiniband/hw/usnic
+M:     Upinder Malhi <umalhi@cisco.com>
+S:     Supported
+F:     drivers/infiniband/hw/usnic
 
 CIRRUS LOGIC EP93XX ETHERNET DRIVER
 M:     Hartley Sweeten <hsweeten@visionengravers.com>
@@ -2378,20 +2379,20 @@ F:      drivers/cpufreq/arm_big_little.c
 F:     drivers/cpufreq/arm_big_little_dt.c
 
 CPUIDLE DRIVER - ARM BIG LITTLE
-M:      Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
-M:      Daniel Lezcano <daniel.lezcano@linaro.org>
-L:      linux-pm@vger.kernel.org
-L:      linux-arm-kernel@lists.infradead.org
-T:      git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git
-S:      Maintained
-F:      drivers/cpuidle/cpuidle-big_little.c
+M:     Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+M:     Daniel Lezcano <daniel.lezcano@linaro.org>
+L:     linux-pm@vger.kernel.org
+L:     linux-arm-kernel@lists.infradead.org
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git
+S:     Maintained
+F:     drivers/cpuidle/cpuidle-big_little.c
 
 CPUIDLE DRIVERS
 M:     Rafael J. Wysocki <rjw@rjwysocki.net>
 M:     Daniel Lezcano <daniel.lezcano@linaro.org>
 L:     linux-pm@vger.kernel.org
 S:     Maintained
-T:     git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git
 F:     drivers/cpuidle/*
 F:     include/linux/cpuidle.h
 
@@ -2458,9 +2459,9 @@ S:        Maintained
 F:     sound/pci/cs5535audio/
 
 CW1200 WLAN driver
-M:     Solomon Peachy <pizza@shaftnet.org>
-S:     Maintained
-F:     drivers/net/wireless/cw1200/
+M:     Solomon Peachy <pizza@shaftnet.org>
+S:     Maintained
+F:     drivers/net/wireless/cw1200/
 
 CX18 VIDEO4LINUX DRIVER
 M:     Andy Walls <awalls@md.metrocast.net>
@@ -3095,6 +3096,8 @@ F:        fs/ecryptfs/
 
 EDAC-CORE
 M:     Doug Thompson <dougthompson@xmission.com>
+M:     Borislav Petkov <bp@alien8.de>
+M:     Mauro Carvalho Chehab <m.chehab@samsung.com>
 L:     linux-edac@vger.kernel.org
 W:     bluesmoke.sourceforge.net
 S:     Supported
@@ -4558,6 +4561,7 @@ F:        Documentation/networking/ixgbevf.txt
 F:     Documentation/networking/i40e.txt
 F:     Documentation/networking/i40evf.txt
 F:     drivers/net/ethernet/intel/
+F:     drivers/net/ethernet/intel/*/
 
 INTEL-MID GPIO DRIVER
 M:     David Cohen <david.a.cohen@linux.intel.com>
@@ -4914,7 +4918,7 @@ F:        drivers/staging/ktap/
 KCONFIG
 M:     "Yann E. MORIN" <yann.morin.1998@free.fr>
 L:     linux-kbuild@vger.kernel.org
-T:     git://gitorious.org/linux-kconfig/linux-kconfig
+T:     git git://gitorious.org/linux-kconfig/linux-kconfig
 S:     Maintained
 F:     Documentation/kbuild/kconfig-language.txt
 F:     scripts/kconfig/
@@ -5471,11 +5475,11 @@ S:      Maintained
 F:     drivers/media/tuners/m88ts2022*
 
 MA901 MASTERKIT USB FM RADIO DRIVER
-M:      Alexey Klimov <klimov.linux@gmail.com>
-L:      linux-media@vger.kernel.org
-T:      git git://linuxtv.org/media_tree.git
-S:      Maintained
-F:      drivers/media/radio/radio-ma901.c
+M:     Alexey Klimov <klimov.linux@gmail.com>
+L:     linux-media@vger.kernel.org
+T:     git git://linuxtv.org/media_tree.git
+S:     Maintained
+F:     drivers/media/radio/radio-ma901.c
 
 MAC80211
 M:     Johannes Berg <johannes@sipsolutions.net>
@@ -5636,7 +5640,7 @@ F:        drivers/scsi/megaraid/
 
 MELLANOX ETHERNET DRIVER (mlx4_en)
 M:     Amir Vadai <amirv@mellanox.com>
-L:     netdev@vger.kernel.org
+L:     netdev@vger.kernel.org
 S:     Supported
 W:     http://www.mellanox.com
 Q:     http://patchwork.ozlabs.org/project/netdev/list/
@@ -5677,7 +5681,7 @@ F:        include/linux/mtd/
 F:     include/uapi/mtd/
 
 MEN A21 WATCHDOG DRIVER
-M:     Johannes Thumshirn <johannes.thumshirn@men.de>
+M:     Johannes Thumshirn <johannes.thumshirn@men.de>
 L:     linux-watchdog@vger.kernel.org
 S:     Supported
 F:     drivers/watchdog/mena21_wdt.c
@@ -5733,20 +5737,20 @@ L:      linux-rdma@vger.kernel.org
 W:     http://www.mellanox.com
 Q:     http://patchwork.ozlabs.org/project/netdev/list/
 Q:     http://patchwork.kernel.org/project/linux-rdma/list/
-T:     git://openfabrics.org/~eli/connect-ib.git
+T:     git git://openfabrics.org/~eli/connect-ib.git
 S:     Supported
 F:     drivers/net/ethernet/mellanox/mlx5/core/
 F:     include/linux/mlx5/
 
 Mellanox MLX5 IB driver
-M:      Eli Cohen <eli@mellanox.com>
-L:      linux-rdma@vger.kernel.org
-W:      http://www.mellanox.com
-Q:      http://patchwork.kernel.org/project/linux-rdma/list/
-T:      git://openfabrics.org/~eli/connect-ib.git
-S:      Supported
-F:      include/linux/mlx5/
-F:      drivers/infiniband/hw/mlx5/
+M:     Eli Cohen <eli@mellanox.com>
+L:     linux-rdma@vger.kernel.org
+W:     http://www.mellanox.com
+Q:     http://patchwork.kernel.org/project/linux-rdma/list/
+T:     git git://openfabrics.org/~eli/connect-ib.git
+S:     Supported
+F:     include/linux/mlx5/
+F:     drivers/infiniband/hw/mlx5/
 
 MODULE SUPPORT
 M:     Rusty Russell <rusty@rustcorp.com.au>
@@ -6171,6 +6175,12 @@ S:       Supported
 F:     drivers/block/nvme*
 F:     include/linux/nvme.h
 
+NXP TDA998X DRM DRIVER
+M:     Russell King <rmk+kernel@arm.linux.org.uk>
+S:     Supported
+F:     drivers/gpu/drm/i2c/tda998x_drv.c
+F:     include/drm/i2c/tda998x.h
+
 OMAP SUPPORT
 M:     Tony Lindgren <tony@atomide.com>
 L:     linux-omap@vger.kernel.org
@@ -8700,17 +8710,17 @@ S:      Maintained
 F:     drivers/media/radio/radio-raremono.c
 
 THERMAL
-M:      Zhang Rui <rui.zhang@intel.com>
-M:      Eduardo Valentin <eduardo.valentin@ti.com>
-L:      linux-pm@vger.kernel.org
-T:      git git://git.kernel.org/pub/scm/linux/kernel/git/rzhang/linux.git
-T:      git git://git.kernel.org/pub/scm/linux/kernel/git/evalenti/linux-soc-thermal.git
-Q:      https://patchwork.kernel.org/project/linux-pm/list/
-S:      Supported
-F:      drivers/thermal/
-F:      include/linux/thermal.h
-F:      include/linux/cpu_cooling.h
-F:      Documentation/devicetree/bindings/thermal/
+M:     Zhang Rui <rui.zhang@intel.com>
+M:     Eduardo Valentin <eduardo.valentin@ti.com>
+L:     linux-pm@vger.kernel.org
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/rzhang/linux.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/evalenti/linux-soc-thermal.git
+Q:     https://patchwork.kernel.org/project/linux-pm/list/
+S:     Supported
+F:     drivers/thermal/
+F:     include/linux/thermal.h
+F:     include/linux/cpu_cooling.h
+F:     Documentation/devicetree/bindings/thermal/
 
 THINGM BLINK(1) USB RGB LED DRIVER
 M:     Vivien Didelot <vivien.didelot@savoirfairelinux.com>
@@ -9812,7 +9822,7 @@ ZR36067 VIDEO FOR LINUX DRIVER
 L:     mjpeg-users@lists.sourceforge.net
 L:     linux-media@vger.kernel.org
 W:     http://mjpeg.sourceforge.net/driver-zoran/
-T:     Mercurial http://linuxtv.org/hg/v4l-dvb
+T:     hg http://linuxtv.org/hg/v4l-dvb
 S:     Odd Fixes
 F:     drivers/media/pci/zoran/
 
index 6b58c1de757744feac1d0ae54ec1fdd0693fc500..400c663b21c2bf075e977a8ccf3016fb7e03b5a6 100644 (file)
@@ -282,7 +282,7 @@ static inline void __cache_line_loop(unsigned long paddr, unsigned long vaddr,
 #else
        /* if V-P const for loop, PTAG can be written once outside loop */
        if (full_page_op)
-               write_aux_reg(ARC_REG_DC_PTAG, paddr);
+               write_aux_reg(aux_tag, paddr);
 #endif
 
        while (num_lines-- > 0) {
@@ -296,7 +296,7 @@ static inline void __cache_line_loop(unsigned long paddr, unsigned long vaddr,
                write_aux_reg(aux_cmd, vaddr);
                vaddr += L1_CACHE_BYTES;
 #else
-               write_aux_reg(aux, paddr);
+               write_aux_reg(aux_cmd, paddr);
                paddr += L1_CACHE_BYTES;
 #endif
        }
index e254198177914ca13050f37519a6ca43e0da596d..15949459611f194376dafcdebaf569f8ef141dc5 100644 (file)
@@ -1578,6 +1578,7 @@ config BL_SWITCHER_DUMMY_IF
 
 choice
        prompt "Memory split"
+       depends on MMU
        default VMSPLIT_3G
        help
          Select the desired split between kernel and user memory.
@@ -1595,6 +1596,7 @@ endchoice
 
 config PAGE_OFFSET
        hex
+       default PHYS_OFFSET if !MMU
        default 0x40000000 if VMSPLIT_1G
        default 0x80000000 if VMSPLIT_2G
        default 0xC0000000
@@ -1903,6 +1905,7 @@ config XEN
        depends on ARM && AEABI && OF
        depends on CPU_V7 && !CPU_V6
        depends on !GENERIC_ATOMIC64
+       depends on MMU
        select ARM_PSCI
        select SWIOTLB_XEN
        select ARCH_DMA_ADDR_T_64BIT
index 47279aa96a6a48fd1945ae9faa7584d49f24a355..0714e0334e33c9790b221d03859574b2f4e774ad 100644 (file)
@@ -1,4 +1,5 @@
 ashldi3.S
+bswapsdi2.S
 font.c
 lib1funcs.S
 hyp-stub.S
index 2363593e1050b7b84b4705878fc10cb653b33cca..ef58d1c24313541a068436c5e590378120c6bb3b 100644 (file)
@@ -612,7 +612,7 @@ clocks {
                compatible = "ti,keystone,psc-clock";
                clocks = <&chipclk13>;
                clock-output-names = "vcp-3";
-               reg = <0x0235000a8 0xb00>, <0x02350060 0x400>;
+               reg = <0x023500a8 0xb00>, <0x02350060 0x400>;
                reg-names = "control", "domain";
                domain-id = <24>;
        };
index 8756e4bcdba0609ff789a4f1efaa551faa39ed98..4afb376d9c7c13d07ea81502e17ba53f0554c122 100644 (file)
  */
 #define UL(x) _AC(x, UL)
 
+/* PAGE_OFFSET - the virtual address of the start of the kernel image */
+#define PAGE_OFFSET            UL(CONFIG_PAGE_OFFSET)
+
 #ifdef CONFIG_MMU
 
 /*
- * PAGE_OFFSET - the virtual address of the start of the kernel image
  * TASK_SIZE - the maximum size of a user space task.
  * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area
  */
-#define PAGE_OFFSET            UL(CONFIG_PAGE_OFFSET)
 #define TASK_SIZE              (UL(CONFIG_PAGE_OFFSET) - UL(SZ_16M))
 #define TASK_UNMAPPED_BASE     ALIGN(TASK_SIZE / 3, SZ_16M)
 
 #define END_MEM                (UL(CONFIG_DRAM_BASE) + CONFIG_DRAM_SIZE)
 #endif
 
-#ifndef PAGE_OFFSET
-#define PAGE_OFFSET            PLAT_PHYS_OFFSET
-#endif
-
 /*
  * The module can be at any place in ram in nommu mode.
  */
index 47cd974e57ea3ca0de7e9feccd9e527c2ba39f1f..c96ecacb2021222936844a4807f39ef474965879 100644 (file)
@@ -177,6 +177,18 @@ __lookup_processor_type_data:
        .long   __proc_info_end
        .size   __lookup_processor_type_data, . - __lookup_processor_type_data
 
+__error_lpae:
+#ifdef CONFIG_DEBUG_LL
+       adr     r0, str_lpae
+       bl      printascii
+       b       __error
+str_lpae: .asciz "\nError: Kernel with LPAE support, but CPU does not support LPAE.\n"
+#else
+       b       __error
+#endif
+       .align
+ENDPROC(__error_lpae)
+
 __error_p:
 #ifdef CONFIG_DEBUG_LL
        adr     r0, str_p1
index 914616e0bdcd0c0108376a9e5834e9cb73219b51..f5f381d915560818dcf0d2200731a90b0c7ae404 100644 (file)
@@ -102,7 +102,7 @@ ENTRY(stext)
        and     r3, r3, #0xf                    @ extract VMSA support
        cmp     r3, #5                          @ long-descriptor translation table format?
  THUMB( it     lo )                            @ force fixup-able long branch encoding
-       blo     __error_p                       @ only classic page table format
+       blo     __error_lpae                    @ only classic page table format
 #endif
 
 #ifndef CONFIG_XIP_KERNEL
index f33679d2d3ee0a218d280972c5f409f0b561d41b..50e1d850ee2e01d6f29ee5004af4339f40c712b0 100644 (file)
@@ -13,6 +13,8 @@
 #ifndef __ASM_ARCH_COLLIE_H
 #define __ASM_ARCH_COLLIE_H
 
+#include "hardware.h" /* Gives GPIO_MAX */
+
 extern void locomolcd_power(int on);
 
 #define COLLIE_SCOOP_GPIO_BASE (GPIO_MAX + 1)
index 2b3a56414271e5e31323566676097b774fca9406..ef69152f9b52e473796829545bf5cf5d1e83926a 100644 (file)
@@ -264,6 +264,9 @@ static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start)
                        note_page(st, addr, 3, pmd_val(*pmd));
                else
                        walk_pte(st, pmd, addr);
+
+               if (SECTION_SIZE < PMD_SIZE && pmd_large(pmd[1]))
+                       note_page(st, addr + SECTION_SIZE, 3, pmd_val(pmd[1]));
        }
 }
 
index 09c5a0f5f4d1778156a5ee83715e6a2aec7f0441..86648c083bb4b1297275dc2bc4ceb3ecd949aa5b 100644 (file)
@@ -12,6 +12,7 @@
 #define _ASM_C6X_CACHE_H
 
 #include <linux/irqflags.h>
+#include <linux/init.h>
 
 /*
  * Cache line size
index 8d4c247f17389f283d02bc48dba044ff077e3ffe..af064d28b36524c5ce9c8fc982a58eec65ebbfc7 100644 (file)
@@ -1048,6 +1048,15 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
        flush_altivec_to_thread(src);
        flush_vsx_to_thread(src);
        flush_spe_to_thread(src);
+       /*
+        * Flush TM state out so we can copy it.  __switch_to_tm() does this
+        * flush but it removes the checkpointed state from the current CPU and
+        * transitions the CPU out of TM mode.  Hence we need to call
+        * tm_recheckpoint_new_task() (on the same task) to restore the
+        * checkpointed state back and the TM mode.
+        */
+       __switch_to_tm(src);
+       tm_recheckpoint_new_task(src);
 
        *dst = *src;
 
index 1482327cfeba9c1a846be00ae54e2606a1ac07b9..d88736fbece67c6627e6f33a9c4fd3e31ee2b533 100644 (file)
@@ -81,6 +81,7 @@ _GLOBAL(relocate)
 
 6:     blr
 
+.balign 8
 p_dyn: .llong  __dynamic_start - 0b
 p_rela:        .llong  __rela_dyn_start - 0b
 p_st:  .llong  _stext - 0b
index 673515bc413559a24600f988baeb91e8a6b9b1d4..aa1b2b9088a77b061758f48f4a434185e760d6ca 100644 (file)
@@ -18,7 +18,7 @@
 #define SH_CACHE_ASSOC         8
 
 #if defined(CONFIG_CPU_SUBTYPE_SH7619)
-#define CCR            0xffffffec
+#define SH_CCR         0xffffffec
 
 #define CCR_CACHE_CE   0x01    /* Cache enable */
 #define CCR_CACHE_WT   0x02    /* CCR[bit1=1,bit2=1] */
index defb0baa5a0682b95222bccc60440ee760bd6680..b27ce92cb600bc17c47dacda12d0e3dc9f8d06a1 100644 (file)
@@ -17,8 +17,8 @@
 #define SH_CACHE_COMBINED      4
 #define SH_CACHE_ASSOC         8
 
-#define CCR            0xfffc1000 /* CCR1 */
-#define CCR2           0xfffc1004
+#define SH_CCR         0xfffc1000 /* CCR1 */
+#define SH_CCR2                0xfffc1004
 
 /*
  * Most of the SH-2A CCR1 definitions resemble the SH-4 ones. All others not
index bee2d81c56bfbde6392866f1d4bb05b58d1aa4b8..29700fd88c75d30be16340966c3013f1c880f076 100644 (file)
@@ -17,7 +17,7 @@
 #define SH_CACHE_COMBINED      4
 #define SH_CACHE_ASSOC         8
 
-#define CCR            0xffffffec      /* Address of Cache Control Register */
+#define SH_CCR         0xffffffec      /* Address of Cache Control Register */
 
 #define CCR_CACHE_CE   0x01    /* Cache Enable */
 #define CCR_CACHE_WT   0x02    /* Write-Through (for P0,U0,P3) (else writeback) */
index 7bfb9e8b069c22d789318085e866440502f0c755..92c4cd119b662f99dc4ed9d0f5d1b54dea3001a9 100644 (file)
@@ -17,7 +17,7 @@
 #define SH_CACHE_COMBINED      4
 #define SH_CACHE_ASSOC         8
 
-#define CCR            0xff00001c      /* Address of Cache Control Register */
+#define SH_CCR         0xff00001c      /* Address of Cache Control Register */
 #define CCR_CACHE_OCE  0x0001  /* Operand Cache Enable */
 #define CCR_CACHE_WT   0x0002  /* Write-Through (for P0,U0,P3) (else writeback)*/
 #define CCR_CACHE_CB   0x0004  /* Copy-Back (for P1) (else writethrough) */
index ecf83cd158dc38fefb91f2fa83cdb452cfa5c6bf..0d7360d549c17858c59f93b6dc0d279f4222a640 100644 (file)
@@ -112,7 +112,7 @@ static void cache_init(void)
        unsigned long ccr, flags;
 
        jump_to_uncached();
-       ccr = __raw_readl(CCR);
+       ccr = __raw_readl(SH_CCR);
 
        /*
         * At this point we don't know whether the cache is enabled or not - a
@@ -189,7 +189,7 @@ static void cache_init(void)
 
        l2_cache_init();
 
-       __raw_writel(flags, CCR);
+       __raw_writel(flags, SH_CCR);
        back_to_cached();
 }
 #else
index 115725198038da862a19ef634a0ec38adf23b76f..777e50f33c00f3f96fe4b34f7723132507b13935 100644 (file)
@@ -36,7 +36,7 @@ static int cache_seq_show(struct seq_file *file, void *iter)
         */
        jump_to_uncached();
 
-       ccr = __raw_readl(CCR);
+       ccr = __raw_readl(SH_CCR);
        if ((ccr & CCR_CACHE_ENABLE) == 0) {
                back_to_cached();
 
index defcf719f2e84eb5cae2b6be57fed58227eeb6ec..a74259f2f9815e93d499634b67d9756472662e76 100644 (file)
@@ -63,9 +63,9 @@ static void sh2__flush_invalidate_region(void *start, int size)
        local_irq_save(flags);
        jump_to_uncached();
 
-       ccr = __raw_readl(CCR);
+       ccr = __raw_readl(SH_CCR);
        ccr |= CCR_CACHE_INVALIDATE;
-       __raw_writel(ccr, CCR);
+       __raw_writel(ccr, SH_CCR);
 
        back_to_cached();
        local_irq_restore(flags);
index 949e2d3138a0ca24ffe07e6228a68eeeb72321e9..ee87d081259b86950d527151618a3e52814ade5a 100644 (file)
@@ -134,7 +134,8 @@ static void sh2a__flush_invalidate_region(void *start, int size)
 
        /* If there are too many pages then just blow the cache */
        if (((end - begin) >> PAGE_SHIFT) >= MAX_OCACHE_PAGES) {
-               __raw_writel(__raw_readl(CCR) | CCR_OCACHE_INVALIDATE, CCR);
+               __raw_writel(__raw_readl(SH_CCR) | CCR_OCACHE_INVALIDATE,
+                            SH_CCR);
        } else {
                for (v = begin; v < end; v += L1_CACHE_BYTES)
                        sh2a_invalidate_line(CACHE_OC_ADDRESS_ARRAY, v);
@@ -167,7 +168,8 @@ static void sh2a_flush_icache_range(void *args)
        /* I-Cache invalidate */
        /* If there are too many pages then just blow the cache */
        if (((end - start) >> PAGE_SHIFT) >= MAX_ICACHE_PAGES) {
-               __raw_writel(__raw_readl(CCR) | CCR_ICACHE_INVALIDATE, CCR);
+               __raw_writel(__raw_readl(SH_CCR) | CCR_ICACHE_INVALIDATE,
+                            SH_CCR);
        } else {
                for (v = start; v < end; v += L1_CACHE_BYTES)
                        sh2a_invalidate_line(CACHE_IC_ADDRESS_ARRAY, v);
index 0e529285b28d42c688fb0901349ba70218385d51..51d8f7f31d1d797392ab2813f4fb3d4d33480598 100644 (file)
@@ -133,9 +133,9 @@ static void flush_icache_all(void)
        jump_to_uncached();
 
        /* Flush I-cache */
-       ccr = __raw_readl(CCR);
+       ccr = __raw_readl(SH_CCR);
        ccr |= CCR_CACHE_ICI;
-       __raw_writel(ccr, CCR);
+       __raw_writel(ccr, SH_CCR);
 
        /*
         * back_to_cached() will take care of the barrier for us, don't add
index c0adbee97b5f29a2cdb33f6124c1d62ee821ebec..24c58b7dc02265c795000fa997ac96da59207175 100644 (file)
@@ -19,7 +19,7 @@ void __init shx3_cache_init(void)
 {
        unsigned int ccr;
 
-       ccr = __raw_readl(CCR);
+       ccr = __raw_readl(SH_CCR);
 
        /*
         * If we've got cache aliases, resolve them in hardware.
@@ -40,5 +40,5 @@ void __init shx3_cache_init(void)
        ccr |= CCR_CACHE_IBE;
 #endif
 
-       writel_uncached(ccr, CCR);
+       writel_uncached(ccr, SH_CCR);
 }
index 616966a96cba61d6a680d4bc57cf71150a04fb3e..097c2cdd117f53c543fb919ba6740210b1471a4a 100644 (file)
@@ -285,8 +285,8 @@ void __init cpu_cache_init(void)
 {
        unsigned int cache_disabled = 0;
 
-#ifdef CCR
-       cache_disabled = !(__raw_readl(CCR) & CCR_CACHE_ENABLE);
+#ifdef SH_CCR
+       cache_disabled = !(__raw_readl(SH_CCR) & CCR_CACHE_ENABLE);
 #endif
 
        compute_alias(&boot_cpu_data.icache);
index 3d6b9f81cc683e63fbdac6c3243ab7069087e0cd..acd86c850414e7f3adfffe860ffd22bd17e0d459 100644 (file)
@@ -134,6 +134,7 @@ extern void efi_setup_page_tables(void);
 extern void __init old_map_region(efi_memory_desc_t *md);
 extern void __init runtime_code_page_mkexec(void);
 extern void __init efi_runtime_mkexec(void);
+extern void __init efi_apply_memmap_quirks(void);
 
 struct efi_setup_data {
        u64 fw_vendor;
index 81ba27679f18ec6100bd167c3739608631a7c475..f36bd42d6f0c8b5fc5cd75dcf133b35a1f6bfe3b 100644 (file)
@@ -544,6 +544,10 @@ ENDPROC(early_idt_handlers)
        /* This is global to keep gas from relaxing the jumps */
 ENTRY(early_idt_handler)
        cld
+
+       cmpl $2,(%esp)          # X86_TRAP_NMI
+       je is_nmi               # Ignore NMI
+
        cmpl $2,%ss:early_recursion_flag
        je hlt_loop
        incl %ss:early_recursion_flag
@@ -594,8 +598,9 @@ ex_entry:
        pop %edx
        pop %ecx
        pop %eax
-       addl $8,%esp            /* drop vector number and error code */
        decl %ss:early_recursion_flag
+is_nmi:
+       addl $8,%esp            /* drop vector number and error code */
        iret
 ENDPROC(early_idt_handler)
 
index e1aabdb314c83740dd686eb4c70a6f40c312e258..a468c0a65c42e00df4e10afd9921d81a53dbba3d 100644 (file)
@@ -343,6 +343,9 @@ early_idt_handlers:
 ENTRY(early_idt_handler)
        cld
 
+       cmpl $2,(%rsp)          # X86_TRAP_NMI
+       je is_nmi               # Ignore NMI
+
        cmpl $2,early_recursion_flag(%rip)
        jz  1f
        incl early_recursion_flag(%rip)
@@ -405,8 +408,9 @@ ENTRY(early_idt_handler)
        popq %rdx
        popq %rcx
        popq %rax
-       addq $16,%rsp           # drop vector number and error code
        decl early_recursion_flag(%rip)
+is_nmi:
+       addq $16,%rsp           # drop vector number and error code
        INTERRUPT_RETURN
 ENDPROC(early_idt_handler)
 
index 06853e6703541f8349106e17330f86621fa984db..ce72964b2f469db1b3c626ecadc18c2998ef98a4 100644 (file)
@@ -1239,14 +1239,8 @@ void __init setup_arch(char **cmdline_p)
        register_refined_jiffies(CLOCK_TICK_RATE);
 
 #ifdef CONFIG_EFI
-       /* Once setup is done above, unmap the EFI memory map on
-        * mismatched firmware/kernel archtectures since there is no
-        * support for runtime services.
-        */
-       if (efi_enabled(EFI_BOOT) && !efi_is_native()) {
-               pr_info("efi: Setup done, disabling due to 32/64-bit mismatch\n");
-               efi_unmap_memmap();
-       }
+       if (efi_enabled(EFI_BOOT))
+               efi_apply_memmap_quirks();
 #endif
 }
 
index 6dea040cc3a1d794c60466fb9e78eaa552b8806e..a10c8c79216187d2faa5add449762710d51c759b 100644 (file)
@@ -1020,13 +1020,17 @@ static inline bool smap_violation(int error_code, struct pt_regs *regs)
  * This routine handles page faults.  It determines the address,
  * and the problem, and then passes it off to one of the appropriate
  * routines.
+ *
+ * This function must have noinline because both callers
+ * {,trace_}do_page_fault() have notrace on. Having this an actual function
+ * guarantees there's a function trace entry.
  */
-static void __kprobes
-__do_page_fault(struct pt_regs *regs, unsigned long error_code)
+static void __kprobes noinline
+__do_page_fault(struct pt_regs *regs, unsigned long error_code,
+               unsigned long address)
 {
        struct vm_area_struct *vma;
        struct task_struct *tsk;
-       unsigned long address;
        struct mm_struct *mm;
        int fault;
        unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
@@ -1034,9 +1038,6 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code)
        tsk = current;
        mm = tsk->mm;
 
-       /* Get the faulting address: */
-       address = read_cr2();
-
        /*
         * Detect and handle instructions that would cause a page fault for
         * both a tracked kernel page and a userspace page.
@@ -1248,32 +1249,50 @@ good_area:
        up_read(&mm->mmap_sem);
 }
 
-dotraplinkage void __kprobes
+dotraplinkage void __kprobes notrace
 do_page_fault(struct pt_regs *regs, unsigned long error_code)
 {
+       unsigned long address = read_cr2(); /* Get the faulting address */
        enum ctx_state prev_state;
 
+       /*
+        * We must have this function tagged with __kprobes, notrace and call
+        * read_cr2() before calling anything else. To avoid calling any kind
+        * of tracing machinery before we've observed the CR2 value.
+        *
+        * exception_{enter,exit}() contain all sorts of tracepoints.
+        */
+
        prev_state = exception_enter();
-       __do_page_fault(regs, error_code);
+       __do_page_fault(regs, error_code, address);
        exception_exit(prev_state);
 }
 
-static void trace_page_fault_entries(struct pt_regs *regs,
+#ifdef CONFIG_TRACING
+static void trace_page_fault_entries(unsigned long address, struct pt_regs *regs,
                                     unsigned long error_code)
 {
        if (user_mode(regs))
-               trace_page_fault_user(read_cr2(), regs, error_code);
+               trace_page_fault_user(address, regs, error_code);
        else
-               trace_page_fault_kernel(read_cr2(), regs, error_code);
+               trace_page_fault_kernel(address, regs, error_code);
 }
 
-dotraplinkage void __kprobes
+dotraplinkage void __kprobes notrace
 trace_do_page_fault(struct pt_regs *regs, unsigned long error_code)
 {
+       /*
+        * The exception_enter and tracepoint processing could
+        * trigger another page faults (user space callchain
+        * reading) and destroy the original cr2 value, so read
+        * the faulting address now.
+        */
+       unsigned long address = read_cr2();
        enum ctx_state prev_state;
 
        prev_state = exception_enter();
-       trace_page_fault_entries(regs, error_code);
-       __do_page_fault(regs, error_code);
+       trace_page_fault_entries(address, regs, error_code);
+       __do_page_fault(regs, error_code, address);
        exception_exit(prev_state);
 }
+#endif /* CONFIG_TRACING */
index 1a201ac7cef8a1f7f4ce5cff2a8f8dbc6b540c00..b97acecf3fd95667e2b67bba8f88bb80428480bb 100644 (file)
@@ -52,6 +52,7 @@
 #include <asm/tlbflush.h>
 #include <asm/x86_init.h>
 #include <asm/rtc.h>
+#include <asm/uv/uv.h>
 
 #define EFI_DEBUG
 
@@ -1210,3 +1211,22 @@ static int __init parse_efi_cmdline(char *str)
        return 0;
 }
 early_param("efi", parse_efi_cmdline);
+
+void __init efi_apply_memmap_quirks(void)
+{
+       /*
+        * Once setup is done earlier, unmap the EFI memory map on mismatched
+        * firmware/kernel architectures since there is no support for runtime
+        * services.
+        */
+       if (!efi_is_native()) {
+               pr_info("efi: Setup done, disabling due to 32/64-bit mismatch\n");
+               efi_unmap_memmap();
+       }
+
+       /*
+        * UV doesn't support the new EFI pagetable mapping yet.
+        */
+       if (is_uv_system())
+               set_bit(EFI_OLD_MEMMAP, &x86_efi_facility);
+}
index c68613bb4c79b718b87dd5dc90b3a8cdd307c3fc..dbf4502b1d6779eadd1573f28f75def4e26a031a 100644 (file)
@@ -65,7 +65,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
         * be resued after dying flag is set
         */
        if (q->mq_ops) {
-               blk_mq_insert_request(q, rq, at_head, true);
+               blk_mq_insert_request(rq, at_head, true, false);
                return;
        }
 
index 66e2b697f5db1299b20d8018232fbcd191b279b8..f598f794c3c679e8c7904bb88284c96434123d66 100644 (file)
@@ -137,7 +137,7 @@ static void mq_flush_run(struct work_struct *work)
        rq = container_of(work, struct request, mq_flush_work);
 
        memset(&rq->csd, 0, sizeof(rq->csd));
-       blk_mq_run_request(rq, true, false);
+       blk_mq_insert_request(rq, false, true, false);
 }
 
 static bool blk_flush_queue_rq(struct request *rq)
@@ -411,7 +411,7 @@ void blk_insert_flush(struct request *rq)
        if ((policy & REQ_FSEQ_DATA) &&
            !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) {
                if (q->mq_ops) {
-                       blk_mq_run_request(rq, false, true);
+                       blk_mq_insert_request(rq, false, false, true);
                } else
                        list_add_tail(&rq->queuelist, &q->queue_head);
                return;
index 3146befb56aaac7b925428d0afee89c9e083094a..136ef8643bbade3dd2d5d84e35183c749bc00399 100644 (file)
@@ -11,7 +11,7 @@
 #include "blk-mq.h"
 
 static LIST_HEAD(blk_mq_cpu_notify_list);
-static DEFINE_SPINLOCK(blk_mq_cpu_notify_lock);
+static DEFINE_RAW_SPINLOCK(blk_mq_cpu_notify_lock);
 
 static int blk_mq_main_cpu_notify(struct notifier_block *self,
                                  unsigned long action, void *hcpu)
@@ -19,12 +19,12 @@ static int blk_mq_main_cpu_notify(struct notifier_block *self,
        unsigned int cpu = (unsigned long) hcpu;
        struct blk_mq_cpu_notifier *notify;
 
-       spin_lock(&blk_mq_cpu_notify_lock);
+       raw_spin_lock(&blk_mq_cpu_notify_lock);
 
        list_for_each_entry(notify, &blk_mq_cpu_notify_list, list)
                notify->notify(notify->data, action, cpu);
 
-       spin_unlock(&blk_mq_cpu_notify_lock);
+       raw_spin_unlock(&blk_mq_cpu_notify_lock);
        return NOTIFY_OK;
 }
 
@@ -32,16 +32,16 @@ void blk_mq_register_cpu_notifier(struct blk_mq_cpu_notifier *notifier)
 {
        BUG_ON(!notifier->notify);
 
-       spin_lock(&blk_mq_cpu_notify_lock);
+       raw_spin_lock(&blk_mq_cpu_notify_lock);
        list_add_tail(&notifier->list, &blk_mq_cpu_notify_list);
-       spin_unlock(&blk_mq_cpu_notify_lock);
+       raw_spin_unlock(&blk_mq_cpu_notify_lock);
 }
 
 void blk_mq_unregister_cpu_notifier(struct blk_mq_cpu_notifier *notifier)
 {
-       spin_lock(&blk_mq_cpu_notify_lock);
+       raw_spin_lock(&blk_mq_cpu_notify_lock);
        list_del(&notifier->list);
-       spin_unlock(&blk_mq_cpu_notify_lock);
+       raw_spin_unlock(&blk_mq_cpu_notify_lock);
 }
 
 void blk_mq_init_cpu_notifier(struct blk_mq_cpu_notifier *notifier,
index 1fa9dd153fde22a976483ef3b5da3ec39f0f458e..883f7208901585ab1ee02891a3ad5b67599a4243 100644 (file)
@@ -73,8 +73,8 @@ static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx,
                set_bit(ctx->index_hw, hctx->ctx_map);
 }
 
-static struct request *blk_mq_alloc_rq(struct blk_mq_hw_ctx *hctx, gfp_t gfp,
-                                      bool reserved)
+static struct request *__blk_mq_alloc_request(struct blk_mq_hw_ctx *hctx,
+                                             gfp_t gfp, bool reserved)
 {
        struct request *rq;
        unsigned int tag;
@@ -193,12 +193,6 @@ static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
        ctx->rq_dispatched[rw_is_sync(rw_flags)]++;
 }
 
-static struct request *__blk_mq_alloc_request(struct blk_mq_hw_ctx *hctx,
-                                             gfp_t gfp, bool reserved)
-{
-       return blk_mq_alloc_rq(hctx, gfp, reserved);
-}
-
 static struct request *blk_mq_alloc_request_pinned(struct request_queue *q,
                                                   int rw, gfp_t gfp,
                                                   bool reserved)
@@ -289,38 +283,10 @@ void blk_mq_free_request(struct request *rq)
        __blk_mq_free_request(hctx, ctx, rq);
 }
 
-static void blk_mq_bio_endio(struct request *rq, struct bio *bio, int error)
-{
-       if (error)
-               clear_bit(BIO_UPTODATE, &bio->bi_flags);
-       else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
-               error = -EIO;
-
-       if (unlikely(rq->cmd_flags & REQ_QUIET))
-               set_bit(BIO_QUIET, &bio->bi_flags);
-
-       /* don't actually finish bio if it's part of flush sequence */
-       if (!(rq->cmd_flags & REQ_FLUSH_SEQ))
-               bio_endio(bio, error);
-}
-
-void blk_mq_end_io(struct request *rq, int error)
+bool blk_mq_end_io_partial(struct request *rq, int error, unsigned int nr_bytes)
 {
-       struct bio *bio = rq->bio;
-       unsigned int bytes = 0;
-
-       trace_block_rq_complete(rq->q, rq);
-
-       while (bio) {
-               struct bio *next = bio->bi_next;
-
-               bio->bi_next = NULL;
-               bytes += bio->bi_iter.bi_size;
-               blk_mq_bio_endio(rq, bio, error);
-               bio = next;
-       }
-
-       blk_account_io_completion(rq, bytes);
+       if (blk_update_request(rq, error, blk_rq_bytes(rq)))
+               return true;
 
        blk_account_io_done(rq);
 
@@ -328,8 +294,9 @@ void blk_mq_end_io(struct request *rq, int error)
                rq->end_io(rq, error);
        else
                blk_mq_free_request(rq);
+       return false;
 }
-EXPORT_SYMBOL(blk_mq_end_io);
+EXPORT_SYMBOL(blk_mq_end_io_partial);
 
 static void __blk_mq_complete_request_remote(void *data)
 {
@@ -730,60 +697,27 @@ static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx,
        blk_mq_add_timer(rq);
 }
 
-void blk_mq_insert_request(struct request_queue *q, struct request *rq,
-                          bool at_head, bool run_queue)
+void blk_mq_insert_request(struct request *rq, bool at_head, bool run_queue,
+               bool async)
 {
+       struct request_queue *q = rq->q;
        struct blk_mq_hw_ctx *hctx;
-       struct blk_mq_ctx *ctx, *current_ctx;
+       struct blk_mq_ctx *ctx = rq->mq_ctx, *current_ctx;
+
+       current_ctx = blk_mq_get_ctx(q);
+       if (!cpu_online(ctx->cpu))
+               rq->mq_ctx = ctx = current_ctx;
 
-       ctx = rq->mq_ctx;
        hctx = q->mq_ops->map_queue(q, ctx->cpu);
 
-       if (rq->cmd_flags & (REQ_FLUSH | REQ_FUA)) {
+       if (rq->cmd_flags & (REQ_FLUSH | REQ_FUA) &&
+           !(rq->cmd_flags & (REQ_FLUSH_SEQ))) {
                blk_insert_flush(rq);
        } else {
-               current_ctx = blk_mq_get_ctx(q);
-
-               if (!cpu_online(ctx->cpu)) {
-                       ctx = current_ctx;
-                       hctx = q->mq_ops->map_queue(q, ctx->cpu);
-                       rq->mq_ctx = ctx;
-               }
                spin_lock(&ctx->lock);
                __blk_mq_insert_request(hctx, rq, at_head);
                spin_unlock(&ctx->lock);
-
-               blk_mq_put_ctx(current_ctx);
-       }
-
-       if (run_queue)
-               __blk_mq_run_hw_queue(hctx);
-}
-EXPORT_SYMBOL(blk_mq_insert_request);
-
-/*
- * This is a special version of blk_mq_insert_request to bypass FLUSH request
- * check. Should only be used internally.
- */
-void blk_mq_run_request(struct request *rq, bool run_queue, bool async)
-{
-       struct request_queue *q = rq->q;
-       struct blk_mq_hw_ctx *hctx;
-       struct blk_mq_ctx *ctx, *current_ctx;
-
-       current_ctx = blk_mq_get_ctx(q);
-
-       ctx = rq->mq_ctx;
-       if (!cpu_online(ctx->cpu)) {
-               ctx = current_ctx;
-               rq->mq_ctx = ctx;
        }
-       hctx = q->mq_ops->map_queue(q, ctx->cpu);
-
-       /* ctx->cpu might be offline */
-       spin_lock(&ctx->lock);
-       __blk_mq_insert_request(hctx, rq, false);
-       spin_unlock(&ctx->lock);
 
        blk_mq_put_ctx(current_ctx);
 
@@ -926,6 +860,8 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
        ctx = blk_mq_get_ctx(q);
        hctx = q->mq_ops->map_queue(q, ctx->cpu);
 
+       if (is_sync)
+               rw |= REQ_SYNC;
        trace_block_getrq(q, bio, rw);
        rq = __blk_mq_alloc_request(hctx, GFP_ATOMIC, false);
        if (likely(rq))
index ed0035cd458ee8f78691a8f95415a8665707ca11..72beba1f9d55efa827e8667535a7f2956dd2b924 100644 (file)
@@ -23,7 +23,6 @@ struct blk_mq_ctx {
 };
 
 void __blk_mq_complete_request(struct request *rq);
-void blk_mq_run_request(struct request *rq, bool run_queue, bool async);
 void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
 void blk_mq_init_flush(struct request_queue *q);
 void blk_mq_drain_queue(struct request_queue *q);
index 959d41acc108a847308773f754fdb4fff3b1fd26..d7d32c28829b17834507bf8683f2c2a5c77d0a0d 100644 (file)
@@ -67,6 +67,8 @@ enum ec_command {
 #define ACPI_EC_DELAY          500     /* Wait 500ms max. during EC ops */
 #define ACPI_EC_UDELAY_GLK     1000    /* Wait 1ms max. to get global lock */
 #define ACPI_EC_MSI_UDELAY     550     /* Wait 550us for MSI EC */
+#define ACPI_EC_CLEAR_MAX      100     /* Maximum number of events to query
+                                        * when trying to clear the EC */
 
 enum {
        EC_FLAGS_QUERY_PENDING,         /* Query is pending */
@@ -116,6 +118,7 @@ EXPORT_SYMBOL(first_ec);
 static int EC_FLAGS_MSI; /* Out-of-spec MSI controller */
 static int EC_FLAGS_VALIDATE_ECDT; /* ASUStec ECDTs need to be validated */
 static int EC_FLAGS_SKIP_DSDT_SCAN; /* Not all BIOS survive early DSDT scan */
+static int EC_FLAGS_CLEAR_ON_RESUME; /* Needs acpi_ec_clear() on boot/resume */
 
 /* --------------------------------------------------------------------------
                              Transaction Management
@@ -440,6 +443,29 @@ acpi_handle ec_get_handle(void)
 
 EXPORT_SYMBOL(ec_get_handle);
 
+static int acpi_ec_query_unlocked(struct acpi_ec *ec, u8 *data);
+
+/*
+ * Clears stale _Q events that might have accumulated in the EC.
+ * Run with locked ec mutex.
+ */
+static void acpi_ec_clear(struct acpi_ec *ec)
+{
+       int i, status;
+       u8 value = 0;
+
+       for (i = 0; i < ACPI_EC_CLEAR_MAX; i++) {
+               status = acpi_ec_query_unlocked(ec, &value);
+               if (status || !value)
+                       break;
+       }
+
+       if (unlikely(i == ACPI_EC_CLEAR_MAX))
+               pr_warn("Warning: Maximum of %d stale EC events cleared\n", i);
+       else
+               pr_info("%d stale EC events cleared\n", i);
+}
+
 void acpi_ec_block_transactions(void)
 {
        struct acpi_ec *ec = first_ec;
@@ -463,6 +489,10 @@ void acpi_ec_unblock_transactions(void)
        mutex_lock(&ec->mutex);
        /* Allow transactions to be carried out again */
        clear_bit(EC_FLAGS_BLOCKED, &ec->flags);
+
+       if (EC_FLAGS_CLEAR_ON_RESUME)
+               acpi_ec_clear(ec);
+
        mutex_unlock(&ec->mutex);
 }
 
@@ -821,6 +851,13 @@ static int acpi_ec_add(struct acpi_device *device)
 
        /* EC is fully operational, allow queries */
        clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags);
+
+       /* Clear stale _Q events if hardware might require that */
+       if (EC_FLAGS_CLEAR_ON_RESUME) {
+               mutex_lock(&ec->mutex);
+               acpi_ec_clear(ec);
+               mutex_unlock(&ec->mutex);
+       }
        return ret;
 }
 
@@ -922,6 +959,30 @@ static int ec_enlarge_storm_threshold(const struct dmi_system_id *id)
        return 0;
 }
 
+/*
+ * On some hardware it is necessary to clear events accumulated by the EC during
+ * sleep. These ECs stop reporting GPEs until they are manually polled, if too
+ * many events are accumulated. (e.g. Samsung Series 5/9 notebooks)
+ *
+ * https://bugzilla.kernel.org/show_bug.cgi?id=44161
+ *
+ * Ideally, the EC should also be instructed NOT to accumulate events during
+ * sleep (which Windows seems to do somehow), but the interface to control this
+ * behaviour is not known at this time.
+ *
+ * Models known to be affected are Samsung 530Uxx/535Uxx/540Uxx/550Pxx/900Xxx,
+ * however it is very likely that other Samsung models are affected.
+ *
+ * On systems which don't accumulate _Q events during sleep, this extra check
+ * should be harmless.
+ */
+static int ec_clear_on_resume(const struct dmi_system_id *id)
+{
+       pr_debug("Detected system needing EC poll on resume.\n");
+       EC_FLAGS_CLEAR_ON_RESUME = 1;
+       return 0;
+}
+
 static struct dmi_system_id ec_dmi_table[] __initdata = {
        {
        ec_skip_dsdt_scan, "Compal JFL92", {
@@ -965,6 +1026,9 @@ static struct dmi_system_id ec_dmi_table[] __initdata = {
        ec_validate_ecdt, "ASUS hardware", {
        DMI_MATCH(DMI_SYS_VENDOR, "ASUSTek Computer Inc."),
        DMI_MATCH(DMI_PRODUCT_NAME, "L4R"),}, NULL},
+       {
+       ec_clear_on_resume, "Samsung hardware", {
+       DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD.")}, NULL},
        {},
 };
 
index b7201fc6f1e19c06c798889900c1e9c689494c8d..0bdacc5e26a3b9411b82a5fe4772c2c9f9b7d490 100644 (file)
@@ -77,18 +77,24 @@ bool acpi_dev_resource_memory(struct acpi_resource *ares, struct resource *res)
        switch (ares->type) {
        case ACPI_RESOURCE_TYPE_MEMORY24:
                memory24 = &ares->data.memory24;
+               if (!memory24->address_length)
+                       return false;
                acpi_dev_get_memresource(res, memory24->minimum,
                                         memory24->address_length,
                                         memory24->write_protect);
                break;
        case ACPI_RESOURCE_TYPE_MEMORY32:
                memory32 = &ares->data.memory32;
+               if (!memory32->address_length)
+                       return false;
                acpi_dev_get_memresource(res, memory32->minimum,
                                         memory32->address_length,
                                         memory32->write_protect);
                break;
        case ACPI_RESOURCE_TYPE_FIXED_MEMORY32:
                fixed_memory32 = &ares->data.fixed_memory32;
+               if (!fixed_memory32->address_length)
+                       return false;
                acpi_dev_get_memresource(res, fixed_memory32->address,
                                         fixed_memory32->address_length,
                                         fixed_memory32->write_protect);
@@ -144,12 +150,16 @@ bool acpi_dev_resource_io(struct acpi_resource *ares, struct resource *res)
        switch (ares->type) {
        case ACPI_RESOURCE_TYPE_IO:
                io = &ares->data.io;
+               if (!io->address_length)
+                       return false;
                acpi_dev_get_ioresource(res, io->minimum,
                                        io->address_length,
                                        io->io_decode);
                break;
        case ACPI_RESOURCE_TYPE_FIXED_IO:
                fixed_io = &ares->data.fixed_io;
+               if (!fixed_io->address_length)
+                       return false;
                acpi_dev_get_ioresource(res, fixed_io->address,
                                        fixed_io->address_length,
                                        ACPI_DECODE_10);
index 1a3dbd1b196ecb121b1ee9d34388ecd00c307b71..65d3f1b5966ce9d567dfdd3ed0806d4624d203b1 100644 (file)
@@ -4175,6 +4175,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
 
        /* Seagate Momentus SpinPoint M8 seem to have FPMDA_AA issues */
        { "ST1000LM024 HN-M101MBB", "2AR10001", ATA_HORKAGE_BROKEN_FPDMA_AA },
+       { "ST1000LM024 HN-M101MBB", "2BA30001", ATA_HORKAGE_BROKEN_FPDMA_AA },
 
        /* Blacklist entries taken from Silicon Image 3124/3132
           Windows driver .inf file - also several Linux problem reports */
@@ -4225,6 +4226,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
        /* devices that don't properly handle queued TRIM commands */
        { "Micron_M500*",               NULL,   ATA_HORKAGE_NO_NCQ_TRIM, },
        { "Crucial_CT???M500SSD1",      NULL,   ATA_HORKAGE_NO_NCQ_TRIM, },
+       { "Crucial_CT???M500SSD3",      NULL,   ATA_HORKAGE_NO_NCQ_TRIM, },
 
        /*
         * Some WD SATA-I drives spin up and down erratically when the link
index 8184451b57c04999bd23c286080e14ca7f069031..422b7d84f686b90147f97565210687d343a3d9eb 100644 (file)
@@ -874,7 +874,7 @@ bio_pageinc(struct bio *bio)
                /* Non-zero page count for non-head members of
                 * compound pages is no longer allowed by the kernel.
                 */
-               page = compound_trans_head(bv.bv_page);
+               page = compound_head(bv.bv_page);
                atomic_inc(&page->_count);
        }
 }
@@ -887,7 +887,7 @@ bio_pagedec(struct bio *bio)
        struct bvec_iter iter;
 
        bio_for_each_segment(bv, bio, iter) {
-               page = compound_trans_head(bv.bv_page);
+               page = compound_head(bv.bv_page);
                atomic_dec(&page->_count);
        }
 }
index b52e9a6d6aad6d602bf3a9d6f73c9b4c277c7e17..54174cb32febe10a45eddd8e837d8267bfe594a1 100644 (file)
@@ -53,7 +53,7 @@
 #define MTIP_FTL_REBUILD_TIMEOUT_MS    2400000
 
 /* unaligned IO handling */
-#define MTIP_MAX_UNALIGNED_SLOTS       8
+#define MTIP_MAX_UNALIGNED_SLOTS       2
 
 /* Macro to extract the tag bit number from a tag value. */
 #define MTIP_TAG_BIT(tag)      (tag & 0x1F)
index 011e55d820b1811a3379a65a4ef754fcc785f3ef..51c557cfd92b37cf6b7aecdfa50d212fdacc05ee 100644 (file)
@@ -612,6 +612,8 @@ static ssize_t disksize_store(struct device *dev,
 
        disksize = PAGE_ALIGN(disksize);
        meta = zram_meta_alloc(disksize);
+       if (!meta)
+               return -ENOMEM;
        down_write(&zram->init_lock);
        if (zram->init_done) {
                up_write(&zram->init_lock);
index bd313f7816a8d9461deba6889b99415b87010467..c1af80bcdf2082c8d0d7b32e28eb9122c59ac53f 100644 (file)
@@ -242,7 +242,7 @@ of_at91_clk_master_setup(struct device_node *np, struct at91_pmc *pmc,
 
        irq = irq_of_parse_and_map(np, 0);
        if (!irq)
-               return;
+               goto out_free_characteristics;
 
        clk = at91_clk_register_master(pmc, irq, name, num_parents,
                                       parent_names, layout,
index 6a934a5296bd4ca2867146dc06272ea1003e238e..05e04ce0f1488f7301ad5153f687860bf872f8bc 100644 (file)
@@ -494,6 +494,9 @@ static const struct file_operations nomadik_src_clk_debugfs_ops = {
 
 static int __init nomadik_src_clk_init_debugfs(void)
 {
+       /* Vital for multiplatform */
+       if (!src_base)
+               return -ENODEV;
        src_pcksr0_boot = readl(src_base + SRC_PCKSR0);
        src_pcksr1_boot = readl(src_base + SRC_PCKSR1);
        debugfs_create_file("nomadik-src-clk", S_IFREG | S_IRUGO,
index 5517944495d893cc3c8dd60569b58e67e9289765..c42e608af6bbe0980717a74e69a143cdc742f1b1 100644 (file)
@@ -2226,24 +2226,25 @@ EXPORT_SYMBOL_GPL(devm_clk_unregister);
  */
 int __clk_get(struct clk *clk)
 {
-       if (clk && !try_module_get(clk->owner))
-               return 0;
+       if (clk) {
+               if (!try_module_get(clk->owner))
+                       return 0;
 
-       kref_get(&clk->ref);
+               kref_get(&clk->ref);
+       }
        return 1;
 }
 
 void __clk_put(struct clk *clk)
 {
-       if (WARN_ON_ONCE(IS_ERR(clk)))
+       if (!clk || WARN_ON_ONCE(IS_ERR(clk)))
                return;
 
        clk_prepare_lock();
        kref_put(&clk->ref, __clk_release);
        clk_prepare_unlock();
 
-       if (clk)
-               module_put(clk->owner);
+       module_put(clk->owner);
 }
 
 /***        clk rate change notifiers        ***/
index 17a598398a53df461e110ca5c16d29e7bc92e6f5..86f1e362eafb8e29c02fae393620d39d32278529 100644 (file)
@@ -179,6 +179,7 @@ static struct clk *clk_register_psc(struct device *dev,
 
        init.name = name;
        init.ops = &clk_psc_ops;
+       init.flags = 0;
        init.parent_names = (parent_name ? &parent_name : NULL);
        init.num_parents = (parent_name ? 1 : 0);
 
index 81a202d12a7ad89ab56909fce6782baa7fb94ec2..bef198a83863aaa79e21ca1c4e1401238cae31b7 100644 (file)
@@ -141,13 +141,6 @@ static const struct coreclk_soc_desc a370_coreclks = {
        .num_ratios = ARRAY_SIZE(a370_coreclk_ratios),
 };
 
-static void __init a370_coreclk_init(struct device_node *np)
-{
-       mvebu_coreclk_setup(np, &a370_coreclks);
-}
-CLK_OF_DECLARE(a370_core_clk, "marvell,armada-370-core-clock",
-              a370_coreclk_init);
-
 /*
  * Clock Gating Control
  */
@@ -168,9 +161,15 @@ static const struct clk_gating_soc_desc a370_gating_desc[] __initconst = {
        { }
 };
 
-static void __init a370_clk_gating_init(struct device_node *np)
+static void __init a370_clk_init(struct device_node *np)
 {
-       mvebu_clk_gating_setup(np, a370_gating_desc);
+       struct device_node *cgnp =
+               of_find_compatible_node(NULL, NULL, "marvell,armada-370-gating-clock");
+
+       mvebu_coreclk_setup(np, &a370_coreclks);
+
+       if (cgnp)
+               mvebu_clk_gating_setup(cgnp, a370_gating_desc);
 }
-CLK_OF_DECLARE(a370_clk_gating, "marvell,armada-370-gating-clock",
-              a370_clk_gating_init);
+CLK_OF_DECLARE(a370_clk, "marvell,armada-370-core-clock", a370_clk_init);
+
index 9922c4475aa8b3f7d01c2881c8a757b94f9027af..b3094315a3c0faa89926dcf7442d5034f39b4f15 100644 (file)
@@ -158,13 +158,6 @@ static const struct coreclk_soc_desc axp_coreclks = {
        .num_ratios = ARRAY_SIZE(axp_coreclk_ratios),
 };
 
-static void __init axp_coreclk_init(struct device_node *np)
-{
-       mvebu_coreclk_setup(np, &axp_coreclks);
-}
-CLK_OF_DECLARE(axp_core_clk, "marvell,armada-xp-core-clock",
-              axp_coreclk_init);
-
 /*
  * Clock Gating Control
  */
@@ -202,9 +195,14 @@ static const struct clk_gating_soc_desc axp_gating_desc[] __initconst = {
        { }
 };
 
-static void __init axp_clk_gating_init(struct device_node *np)
+static void __init axp_clk_init(struct device_node *np)
 {
-       mvebu_clk_gating_setup(np, axp_gating_desc);
+       struct device_node *cgnp =
+               of_find_compatible_node(NULL, NULL, "marvell,armada-xp-gating-clock");
+
+       mvebu_coreclk_setup(np, &axp_coreclks);
+
+       if (cgnp)
+               mvebu_clk_gating_setup(cgnp, axp_gating_desc);
 }
-CLK_OF_DECLARE(axp_clk_gating, "marvell,armada-xp-gating-clock",
-              axp_clk_gating_init);
+CLK_OF_DECLARE(axp_clk, "marvell,armada-xp-core-clock", axp_clk_init);
index 38aee1e3f242b237079aaeb775aa2fd8dae89453..b8c2424ac9261dea5c36553b09f5262bba359299 100644 (file)
@@ -154,12 +154,6 @@ static const struct coreclk_soc_desc dove_coreclks = {
        .num_ratios = ARRAY_SIZE(dove_coreclk_ratios),
 };
 
-static void __init dove_coreclk_init(struct device_node *np)
-{
-       mvebu_coreclk_setup(np, &dove_coreclks);
-}
-CLK_OF_DECLARE(dove_core_clk, "marvell,dove-core-clock", dove_coreclk_init);
-
 /*
  * Clock Gating Control
  */
@@ -186,9 +180,14 @@ static const struct clk_gating_soc_desc dove_gating_desc[] __initconst = {
        { }
 };
 
-static void __init dove_clk_gating_init(struct device_node *np)
+static void __init dove_clk_init(struct device_node *np)
 {
-       mvebu_clk_gating_setup(np, dove_gating_desc);
+       struct device_node *cgnp =
+               of_find_compatible_node(NULL, NULL, "marvell,dove-gating-clock");
+
+       mvebu_coreclk_setup(np, &dove_coreclks);
+
+       if (cgnp)
+               mvebu_clk_gating_setup(cgnp, dove_gating_desc);
 }
-CLK_OF_DECLARE(dove_clk_gating, "marvell,dove-gating-clock",
-              dove_clk_gating_init);
+CLK_OF_DECLARE(dove_clk, "marvell,dove-core-clock", dove_clk_init);
index 2636a55f29f9403df92aec0c11e5a504fc7f9deb..ddb666a86500964201db0ad6d3a3724511302b59 100644 (file)
@@ -193,13 +193,6 @@ static const struct coreclk_soc_desc kirkwood_coreclks = {
        .num_ratios = ARRAY_SIZE(kirkwood_coreclk_ratios),
 };
 
-static void __init kirkwood_coreclk_init(struct device_node *np)
-{
-       mvebu_coreclk_setup(np, &kirkwood_coreclks);
-}
-CLK_OF_DECLARE(kirkwood_core_clk, "marvell,kirkwood-core-clock",
-              kirkwood_coreclk_init);
-
 static const struct coreclk_soc_desc mv88f6180_coreclks = {
        .get_tclk_freq = kirkwood_get_tclk_freq,
        .get_cpu_freq = mv88f6180_get_cpu_freq,
@@ -208,13 +201,6 @@ static const struct coreclk_soc_desc mv88f6180_coreclks = {
        .num_ratios = ARRAY_SIZE(kirkwood_coreclk_ratios),
 };
 
-static void __init mv88f6180_coreclk_init(struct device_node *np)
-{
-       mvebu_coreclk_setup(np, &mv88f6180_coreclks);
-}
-CLK_OF_DECLARE(mv88f6180_core_clk, "marvell,mv88f6180-core-clock",
-              mv88f6180_coreclk_init);
-
 /*
  * Clock Gating Control
  */
@@ -239,9 +225,21 @@ static const struct clk_gating_soc_desc kirkwood_gating_desc[] __initconst = {
        { }
 };
 
-static void __init kirkwood_clk_gating_init(struct device_node *np)
+static void __init kirkwood_clk_init(struct device_node *np)
 {
-       mvebu_clk_gating_setup(np, kirkwood_gating_desc);
+       struct device_node *cgnp =
+               of_find_compatible_node(NULL, NULL, "marvell,kirkwood-gating-clock");
+
+
+       if (of_device_is_compatible(np, "marvell,mv88f6180-core-clock"))
+               mvebu_coreclk_setup(np, &mv88f6180_coreclks);
+       else
+               mvebu_coreclk_setup(np, &kirkwood_coreclks);
+
+       if (cgnp)
+               mvebu_clk_gating_setup(cgnp, kirkwood_gating_desc);
 }
-CLK_OF_DECLARE(kirkwood_clk_gating, "marvell,kirkwood-gating-clock",
-              kirkwood_clk_gating_init);
+CLK_OF_DECLARE(kirkwood_clk, "marvell,kirkwood-core-clock",
+              kirkwood_clk_init);
+CLK_OF_DECLARE(mv88f6180_clk, "marvell,mv88f6180-core-clock",
+              kirkwood_clk_init);
index a59ec217a12437785ea1c689b28744bfcd19ac3d..99c27b1c625b8e3eaf514895a0a13ebf8b94fdef 100644 (file)
@@ -26,6 +26,8 @@ struct rcar_gen2_cpg {
        void __iomem *reg;
 };
 
+#define CPG_FRQCRB                     0x00000004
+#define CPG_FRQCRB_KICK                        BIT(31)
 #define CPG_SDCKCR                     0x00000074
 #define CPG_PLL0CR                     0x000000d8
 #define CPG_FRQCRC                     0x000000e0
@@ -45,6 +47,7 @@ struct rcar_gen2_cpg {
 struct cpg_z_clk {
        struct clk_hw hw;
        void __iomem *reg;
+       void __iomem *kick_reg;
 };
 
 #define to_z_clk(_hw)  container_of(_hw, struct cpg_z_clk, hw)
@@ -83,17 +86,45 @@ static int cpg_z_clk_set_rate(struct clk_hw *hw, unsigned long rate,
 {
        struct cpg_z_clk *zclk = to_z_clk(hw);
        unsigned int mult;
-       u32 val;
+       u32 val, kick;
+       unsigned int i;
 
        mult = div_u64((u64)rate * 32, parent_rate);
        mult = clamp(mult, 1U, 32U);
 
+       if (clk_readl(zclk->kick_reg) & CPG_FRQCRB_KICK)
+               return -EBUSY;
+
        val = clk_readl(zclk->reg);
        val &= ~CPG_FRQCRC_ZFC_MASK;
        val |= (32 - mult) << CPG_FRQCRC_ZFC_SHIFT;
        clk_writel(val, zclk->reg);
 
-       return 0;
+       /*
+        * Set KICK bit in FRQCRB to update hardware setting and wait for
+        * clock change completion.
+        */
+       kick = clk_readl(zclk->kick_reg);
+       kick |= CPG_FRQCRB_KICK;
+       clk_writel(kick, zclk->kick_reg);
+
+       /*
+        * Note: There is no HW information about the worst case latency.
+        *
+        * Using experimental measurements, it seems that no more than
+        * ~10 iterations are needed, independently of the CPU rate.
+        * Since this value might be dependant of external xtal rate, pll1
+        * rate or even the other emulation clocks rate, use 1000 as a
+        * "super" safe value.
+        */
+       for (i = 1000; i; i--) {
+               if (!(clk_readl(zclk->kick_reg) & CPG_FRQCRB_KICK))
+                       return 0;
+
+               cpu_relax();
+       }
+
+       return -ETIMEDOUT;
 }
 
 static const struct clk_ops cpg_z_clk_ops = {
@@ -120,6 +151,7 @@ static struct clk * __init cpg_z_clk_register(struct rcar_gen2_cpg *cpg)
        init.num_parents = 1;
 
        zclk->reg = cpg->reg + CPG_FRQCRC;
+       zclk->kick_reg = cpg->reg + CPG_FRQCRB;
        zclk->hw.init = &init;
 
        clk = clk_register(NULL, &zclk->hw);
@@ -186,7 +218,7 @@ rcar_gen2_cpg_register_clock(struct device_node *np, struct rcar_gen2_cpg *cpg,
                             const char *name)
 {
        const struct clk_div_table *table = NULL;
-       const char *parent_name = "main";
+       const char *parent_name;
        unsigned int shift;
        unsigned int mult = 1;
        unsigned int div = 1;
@@ -201,23 +233,31 @@ rcar_gen2_cpg_register_clock(struct device_node *np, struct rcar_gen2_cpg *cpg,
                 * the multiplier value.
                 */
                u32 value = clk_readl(cpg->reg + CPG_PLL0CR);
+               parent_name = "main";
                mult = ((value >> 24) & ((1 << 7) - 1)) + 1;
        } else if (!strcmp(name, "pll1")) {
+               parent_name = "main";
                mult = config->pll1_mult / 2;
        } else if (!strcmp(name, "pll3")) {
+               parent_name = "main";
                mult = config->pll3_mult;
        } else if (!strcmp(name, "lb")) {
+               parent_name = "pll1_div2";
                div = cpg_mode & BIT(18) ? 36 : 24;
        } else if (!strcmp(name, "qspi")) {
+               parent_name = "pll1_div2";
                div = (cpg_mode & (BIT(3) | BIT(2) | BIT(1))) == BIT(2)
-                   ? 16 : 20;
+                   ? 8 : 10;
        } else if (!strcmp(name, "sdh")) {
+               parent_name = "pll1_div2";
                table = cpg_sdh_div_table;
                shift = 8;
        } else if (!strcmp(name, "sd0")) {
+               parent_name = "pll1_div2";
                table = cpg_sd01_div_table;
                shift = 4;
        } else if (!strcmp(name, "sd1")) {
+               parent_name = "pll1_div2";
                table = cpg_sd01_div_table;
                shift = 0;
        } else if (!strcmp(name, "z")) {
index 4d75b1f37e3a4b74ee4606ce33d54b8a771beb6a..290f9c1a37498ccf16ae0b09804ca15e722a2da2 100644 (file)
@@ -59,7 +59,7 @@ static int get_div(struct tegra_clk_frac_div *divider, unsigned long rate,
                return 0;
 
        if (divider_ux1 > get_max_div(divider))
-               return -EINVAL;
+               return get_max_div(divider);
 
        return divider_ux1;
 }
index cf0c323f2c36ec453deed1c5374fbbce4df543b2..c39613c519af85aa5faf14cc02857219cde56a3b 100644 (file)
@@ -180,9 +180,13 @@ enum clk_id {
        tegra_clk_sbc6_8,
        tegra_clk_sclk,
        tegra_clk_sdmmc1,
+       tegra_clk_sdmmc1_8,
        tegra_clk_sdmmc2,
+       tegra_clk_sdmmc2_8,
        tegra_clk_sdmmc3,
+       tegra_clk_sdmmc3_8,
        tegra_clk_sdmmc4,
+       tegra_clk_sdmmc4_8,
        tegra_clk_se,
        tegra_clk_soc_therm,
        tegra_clk_sor0,
index 5c35885f4a7cee9a4ecb9309f93c345befe28a2e..1fa5c3f33b2033a5e4a32716706d2221d548adf1 100644 (file)
@@ -371,9 +371,7 @@ static const char *mux_pllp3_pllc_clkm[] = {
 static const char *mux_pllm_pllc_pllp_plla_pllc2_c3_clkm[] = {
        "pll_m", "pll_c", "pll_p", "pll_a", "pll_c2", "pll_c3", "clk_m"
 };
-static u32 mux_pllm_pllc_pllp_plla_pllc2_c3_clkm_idx[] = {
-       [0] = 0, [1] = 1, [2] = 2, [3] = 3, [4] = 4, [5] = 6,
-};
+#define mux_pllm_pllc_pllp_plla_pllc2_c3_clkm_idx NULL
 
 static const char *mux_pllm_pllc2_c_c3_pllp_plla_pllc4[] = {
        "pll_m", "pll_c2", "pll_c", "pll_c3", "pll_p", "pll_a_out0", "pll_c4",
@@ -465,6 +463,10 @@ static struct tegra_periph_init_data periph_clks[] = {
        MUX("adx1", mux_plla_pllc_pllp_clkm, CLK_SOURCE_ADX1, 180, TEGRA_PERIPH_ON_APB, tegra_clk_adx1),
        MUX("amx1", mux_plla_pllc_pllp_clkm, CLK_SOURCE_AMX1, 185, TEGRA_PERIPH_ON_APB, tegra_clk_amx1),
        MUX("vi_sensor2", mux_pllm_pllc2_c_c3_pllp_plla, CLK_SOURCE_VI_SENSOR2, 20, TEGRA_PERIPH_NO_RESET, tegra_clk_vi_sensor2),
+       MUX8("sdmmc1", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SDMMC1, 14, 0, tegra_clk_sdmmc1_8),
+       MUX8("sdmmc2", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SDMMC2, 9, 0, tegra_clk_sdmmc2_8),
+       MUX8("sdmmc3", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SDMMC3, 69, 0, tegra_clk_sdmmc3_8),
+       MUX8("sdmmc4", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SDMMC4, 15, 0, tegra_clk_sdmmc4_8),
        MUX8("sbc1", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SBC1, 41, TEGRA_PERIPH_ON_APB, tegra_clk_sbc1_8),
        MUX8("sbc2", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SBC2, 44, TEGRA_PERIPH_ON_APB, tegra_clk_sbc2_8),
        MUX8("sbc3", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SBC3, 46, TEGRA_PERIPH_ON_APB, tegra_clk_sbc3_8),
@@ -492,7 +494,7 @@ static struct tegra_periph_init_data periph_clks[] = {
        UART("uartb", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_UARTB, 7, tegra_clk_uartb),
        UART("uartc", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_UARTC, 55, tegra_clk_uartc),
        UART("uartd", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_UARTD, 65, tegra_clk_uartd),
-       UART("uarte", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_UARTE, 65, tegra_clk_uarte),
+       UART("uarte", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_UARTE, 66, tegra_clk_uarte),
        XUSB("xusb_host_src", mux_clkm_pllp_pllc_pllre, CLK_SOURCE_XUSB_HOST_SRC, 143, TEGRA_PERIPH_ON_APB | TEGRA_PERIPH_NO_RESET, tegra_clk_xusb_host_src),
        XUSB("xusb_falcon_src", mux_clkm_pllp_pllc_pllre, CLK_SOURCE_XUSB_FALCON_SRC, 143, TEGRA_PERIPH_NO_RESET, tegra_clk_xusb_falcon_src),
        XUSB("xusb_fs_src", mux_clkm_48M_pllp_480M, CLK_SOURCE_XUSB_FS_SRC, 143, TEGRA_PERIPH_NO_RESET, tegra_clk_xusb_fs_src),
index 05dce4aa2c11e2a0d3e73cb84f4dc6231537b08d..feb3201c85ce5df6d8786eaf2e4a0408e5890ec0 100644 (file)
@@ -120,7 +120,7 @@ void __init tegra_super_clk_gen4_init(void __iomem *clk_base,
                                        ARRAY_SIZE(cclk_lp_parents),
                                        CLK_SET_RATE_PARENT,
                                        clk_base + CCLKLP_BURST_POLICY,
-                                       0, 4, 8, 9, NULL);
+                                       TEGRA_DIVIDER_2, 4, 8, 9, NULL);
                *dt_clk = clk;
        }
 
index 90d9d25f2228195308f328a9d7c05c1a5bbe5a40..80431f0fb2688f84557383b20ea39513ed2ffcc4 100644 (file)
@@ -682,12 +682,12 @@ static struct tegra_clk tegra114_clks[tegra_clk_max] __initdata = {
        [tegra_clk_timer] = { .dt_id = TEGRA114_CLK_TIMER, .present = true },
        [tegra_clk_uarta] = { .dt_id = TEGRA114_CLK_UARTA, .present = true },
        [tegra_clk_uartd] = { .dt_id = TEGRA114_CLK_UARTD, .present = true },
-       [tegra_clk_sdmmc2] = { .dt_id = TEGRA114_CLK_SDMMC2, .present = true },
+       [tegra_clk_sdmmc2_8] = { .dt_id = TEGRA114_CLK_SDMMC2, .present = true },
        [tegra_clk_i2s1] = { .dt_id = TEGRA114_CLK_I2S1, .present = true },
        [tegra_clk_i2c1] = { .dt_id = TEGRA114_CLK_I2C1, .present = true },
        [tegra_clk_ndflash] = { .dt_id = TEGRA114_CLK_NDFLASH, .present = true },
-       [tegra_clk_sdmmc1] = { .dt_id = TEGRA114_CLK_SDMMC1, .present = true },
-       [tegra_clk_sdmmc4] = { .dt_id = TEGRA114_CLK_SDMMC4, .present = true },
+       [tegra_clk_sdmmc1_8] = { .dt_id = TEGRA114_CLK_SDMMC1, .present = true },
+       [tegra_clk_sdmmc4_8] = { .dt_id = TEGRA114_CLK_SDMMC4, .present = true },
        [tegra_clk_pwm] = { .dt_id = TEGRA114_CLK_PWM, .present = true },
        [tegra_clk_i2s0] = { .dt_id = TEGRA114_CLK_I2S0, .present = true },
        [tegra_clk_i2s2] = { .dt_id = TEGRA114_CLK_I2S2, .present = true },
@@ -723,7 +723,7 @@ static struct tegra_clk tegra114_clks[tegra_clk_max] __initdata = {
        [tegra_clk_bsev] = { .dt_id = TEGRA114_CLK_BSEV, .present = true },
        [tegra_clk_i2c3] = { .dt_id = TEGRA114_CLK_I2C3, .present = true },
        [tegra_clk_sbc4_8] = { .dt_id = TEGRA114_CLK_SBC4, .present = true },
-       [tegra_clk_sdmmc3] = { .dt_id = TEGRA114_CLK_SDMMC3, .present = true },
+       [tegra_clk_sdmmc3_8] = { .dt_id = TEGRA114_CLK_SDMMC3, .present = true },
        [tegra_clk_owr] = { .dt_id = TEGRA114_CLK_OWR, .present = true },
        [tegra_clk_csite] = { .dt_id = TEGRA114_CLK_CSITE, .present = true },
        [tegra_clk_la] = { .dt_id = TEGRA114_CLK_LA, .present = true },
index aff86b5bc7455c190fbf5ebaf2be08c204262e26..166e02f16c8a25f28f4441584dc6e8babc448f3b 100644 (file)
@@ -516,11 +516,11 @@ static struct div_nmp pllp_nmp = {
 };
 
 static struct tegra_clk_pll_freq_table pll_p_freq_table[] = {
-       {12000000, 216000000, 432, 12, 1, 8},
-       {13000000, 216000000, 432, 13, 1, 8},
-       {16800000, 216000000, 360, 14, 1, 8},
-       {19200000, 216000000, 360, 16, 1, 8},
-       {26000000, 216000000, 432, 26, 1, 8},
+       {12000000, 408000000, 408, 12, 0, 8},
+       {13000000, 408000000, 408, 13, 0, 8},
+       {16800000, 408000000, 340, 14, 0, 8},
+       {19200000, 408000000, 340, 16, 0, 8},
+       {26000000, 408000000, 408, 26, 0, 8},
        {0, 0, 0, 0, 0, 0},
 };
 
@@ -570,6 +570,15 @@ static struct tegra_clk_pll_params pll_a_params = {
        .flags = TEGRA_PLL_HAS_CPCON | TEGRA_PLL_USE_LOCK,
 };
 
+static struct div_nmp plld_nmp = {
+       .divm_shift = 0,
+       .divm_width = 5,
+       .divn_shift = 8,
+       .divn_width = 11,
+       .divp_shift = 20,
+       .divp_width = 3,
+};
+
 static struct tegra_clk_pll_freq_table pll_d_freq_table[] = {
        {12000000, 216000000, 864, 12, 4, 12},
        {13000000, 216000000, 864, 13, 4, 12},
@@ -603,19 +612,18 @@ static struct tegra_clk_pll_params pll_d_params = {
        .lock_mask = PLL_BASE_LOCK,
        .lock_enable_bit_idx = PLLDU_MISC_LOCK_ENABLE,
        .lock_delay = 1000,
-       .div_nmp = &pllp_nmp,
+       .div_nmp = &plld_nmp,
        .freq_table = pll_d_freq_table,
        .flags = TEGRA_PLL_HAS_CPCON | TEGRA_PLL_SET_LFCON |
                 TEGRA_PLL_USE_LOCK,
 };
 
 static struct tegra_clk_pll_freq_table tegra124_pll_d2_freq_table[] = {
-       { 12000000, 148500000,  99, 1, 8},
-       { 12000000, 594000000,  99, 1, 1},
-       { 13000000, 594000000,  91, 1, 1},      /* actual: 591.5 MHz */
-       { 16800000, 594000000,  71, 1, 1},      /* actual: 596.4 MHz */
-       { 19200000, 594000000,  62, 1, 1},      /* actual: 595.2 MHz */
-       { 26000000, 594000000,  91, 2, 1},      /* actual: 591.5 MHz */
+       { 12000000, 594000000,  99, 1, 2},
+       { 13000000, 594000000,  91, 1, 2},      /* actual: 591.5 MHz */
+       { 16800000, 594000000,  71, 1, 2},      /* actual: 596.4 MHz */
+       { 19200000, 594000000,  62, 1, 2},      /* actual: 595.2 MHz */
+       { 26000000, 594000000,  91, 2, 2},      /* actual: 591.5 MHz */
        { 0, 0, 0, 0, 0, 0 },
 };
 
@@ -753,21 +761,19 @@ static struct tegra_clk tegra124_clks[tegra_clk_max] __initdata = {
        [tegra_clk_rtc] = { .dt_id = TEGRA124_CLK_RTC, .present = true },
        [tegra_clk_timer] = { .dt_id = TEGRA124_CLK_TIMER, .present = true },
        [tegra_clk_uarta] = { .dt_id = TEGRA124_CLK_UARTA, .present = true },
-       [tegra_clk_sdmmc2] = { .dt_id = TEGRA124_CLK_SDMMC2, .present = true },
+       [tegra_clk_sdmmc2_8] = { .dt_id = TEGRA124_CLK_SDMMC2, .present = true },
        [tegra_clk_i2s1] = { .dt_id = TEGRA124_CLK_I2S1, .present = true },
        [tegra_clk_i2c1] = { .dt_id = TEGRA124_CLK_I2C1, .present = true },
        [tegra_clk_ndflash] = { .dt_id = TEGRA124_CLK_NDFLASH, .present = true },
-       [tegra_clk_sdmmc1] = { .dt_id = TEGRA124_CLK_SDMMC1, .present = true },
-       [tegra_clk_sdmmc4] = { .dt_id = TEGRA124_CLK_SDMMC4, .present = true },
+       [tegra_clk_sdmmc1_8] = { .dt_id = TEGRA124_CLK_SDMMC1, .present = true },
+       [tegra_clk_sdmmc4_8] = { .dt_id = TEGRA124_CLK_SDMMC4, .present = true },
        [tegra_clk_pwm] = { .dt_id = TEGRA124_CLK_PWM, .present = true },
        [tegra_clk_i2s2] = { .dt_id = TEGRA124_CLK_I2S2, .present = true },
-       [tegra_clk_gr2d] = { .dt_id = TEGRA124_CLK_GR_2D, .present = true },
        [tegra_clk_usbd] = { .dt_id = TEGRA124_CLK_USBD, .present = true },
        [tegra_clk_isp_8] = { .dt_id = TEGRA124_CLK_ISP, .present = true },
-       [tegra_clk_gr3d] = { .dt_id = TEGRA124_CLK_GR_3D, .present = true },
        [tegra_clk_disp2] = { .dt_id = TEGRA124_CLK_DISP2, .present = true },
        [tegra_clk_disp1] = { .dt_id = TEGRA124_CLK_DISP1, .present = true },
-       [tegra_clk_host1x] = { .dt_id = TEGRA124_CLK_HOST1X, .present = true },
+       [tegra_clk_host1x_8] = { .dt_id = TEGRA124_CLK_HOST1X, .present = true },
        [tegra_clk_vcp] = { .dt_id = TEGRA124_CLK_VCP, .present = true },
        [tegra_clk_i2s0] = { .dt_id = TEGRA124_CLK_I2S0, .present = true },
        [tegra_clk_apbdma] = { .dt_id = TEGRA124_CLK_APBDMA, .present = true },
@@ -794,7 +800,7 @@ static struct tegra_clk tegra124_clks[tegra_clk_max] __initdata = {
        [tegra_clk_uartd] = { .dt_id = TEGRA124_CLK_UARTD, .present = true },
        [tegra_clk_i2c3] = { .dt_id = TEGRA124_CLK_I2C3, .present = true },
        [tegra_clk_sbc4] = { .dt_id = TEGRA124_CLK_SBC4, .present = true },
-       [tegra_clk_sdmmc3] = { .dt_id = TEGRA124_CLK_SDMMC3, .present = true },
+       [tegra_clk_sdmmc3_8] = { .dt_id = TEGRA124_CLK_SDMMC3, .present = true },
        [tegra_clk_pcie] = { .dt_id = TEGRA124_CLK_PCIE, .present = true },
        [tegra_clk_owr] = { .dt_id = TEGRA124_CLK_OWR, .present = true },
        [tegra_clk_afi] = { .dt_id = TEGRA124_CLK_AFI, .present = true },
@@ -1286,9 +1292,9 @@ static void __init tegra124_pll_init(void __iomem *clk_base,
        clk_register_clkdev(clk, "pll_d2", NULL);
        clks[TEGRA124_CLK_PLL_D2] = clk;
 
-       /* PLLD2_OUT0 ?? */
+       /* PLLD2_OUT0 */
        clk = clk_register_fixed_factor(NULL, "pll_d2_out0", "pll_d2",
-                                       CLK_SET_RATE_PARENT, 1, 2);
+                                       CLK_SET_RATE_PARENT, 1, 1);
        clk_register_clkdev(clk, "pll_d2_out0", NULL);
        clks[TEGRA124_CLK_PLL_D2_OUT0] = clk;
 
index dbace152b2faa9e4f1699b8369d935683900df89..dace2b1b5ae66dafab4fe4639e6291872491119d 100644 (file)
@@ -574,6 +574,8 @@ static struct tegra_clk tegra20_clks[tegra_clk_max] __initdata = {
        [tegra_clk_tvdac] = { .dt_id = TEGRA20_CLK_TVDAC, .present = true },
        [tegra_clk_vi_sensor] = { .dt_id = TEGRA20_CLK_VI_SENSOR, .present = true },
        [tegra_clk_afi] = { .dt_id = TEGRA20_CLK_AFI, .present = true },
+       [tegra_clk_fuse] = { .dt_id = TEGRA20_CLK_FUSE, .present = true },
+       [tegra_clk_kfuse] = { .dt_id = TEGRA20_CLK_KFUSE, .present = true },
 };
 
 static unsigned long tegra20_clk_measure_input_freq(void)
index cb003a6b72c86f10f31afd0d7b3c156308f95acc..cf485d9289035fe58ed9c84223ccd9f412a4b6cd 100644 (file)
@@ -1109,6 +1109,21 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
                goto err_set_policy_cpu;
        }
 
+       /* related cpus should atleast have policy->cpus */
+       cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
+
+       /*
+        * affected cpus must always be the one, which are online. We aren't
+        * managing offline cpus here.
+        */
+       cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
+
+       if (!frozen) {
+               policy->user_policy.min = policy->min;
+               policy->user_policy.max = policy->max;
+       }
+
+       down_write(&policy->rwsem);
        write_lock_irqsave(&cpufreq_driver_lock, flags);
        for_each_cpu(j, policy->cpus)
                per_cpu(cpufreq_cpu_data, j) = policy;
@@ -1162,20 +1177,6 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
                }
        }
 
-       /* related cpus should atleast have policy->cpus */
-       cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
-
-       /*
-        * affected cpus must always be the one, which are online. We aren't
-        * managing offline cpus here.
-        */
-       cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
-
-       if (!frozen) {
-               policy->user_policy.min = policy->min;
-               policy->user_policy.max = policy->max;
-       }
-
        blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
                                     CPUFREQ_START, policy);
 
@@ -1206,6 +1207,7 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
                policy->user_policy.policy = policy->policy;
                policy->user_policy.governor = policy->governor;
        }
+       up_write(&policy->rwsem);
 
        kobject_uevent(&policy->kobj, KOBJ_ADD);
        up_read(&cpufreq_rwsem);
@@ -1546,23 +1548,16 @@ static unsigned int __cpufreq_get(unsigned int cpu)
  */
 unsigned int cpufreq_get(unsigned int cpu)
 {
-       struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
+       struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
        unsigned int ret_freq = 0;
 
-       if (cpufreq_disabled() || !cpufreq_driver)
-               return -ENOENT;
-
-       BUG_ON(!policy);
-
-       if (!down_read_trylock(&cpufreq_rwsem))
-               return 0;
-
-       down_read(&policy->rwsem);
-
-       ret_freq = __cpufreq_get(cpu);
+       if (policy) {
+               down_read(&policy->rwsem);
+               ret_freq = __cpufreq_get(cpu);
+               up_read(&policy->rwsem);
 
-       up_read(&policy->rwsem);
-       up_read(&cpufreq_rwsem);
+               cpufreq_cpu_put(policy);
+       }
 
        return ret_freq;
 }
index de4aa409abe2988d8dc5b421e969ab5256fb4009..2c6d5e118ac129e5ab9c24f09557b27e3248f3ea 100644 (file)
@@ -916,7 +916,7 @@ static int lookup_existing_device(struct device *dev, void *data)
                old->config_rom_retries = 0;
                fw_notice(card, "rediscovered device %s\n", dev_name(dev));
 
-               PREPARE_DELAYED_WORK(&old->work, fw_device_update);
+               old->workfn = fw_device_update;
                fw_schedule_device_work(old, 0);
 
                if (current_node == card->root_node)
@@ -1075,7 +1075,7 @@ static void fw_device_init(struct work_struct *work)
        if (atomic_cmpxchg(&device->state,
                           FW_DEVICE_INITIALIZING,
                           FW_DEVICE_RUNNING) == FW_DEVICE_GONE) {
-               PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown);
+               device->workfn = fw_device_shutdown;
                fw_schedule_device_work(device, SHUTDOWN_DELAY);
        } else {
                fw_notice(card, "created device %s: GUID %08x%08x, S%d00\n",
@@ -1196,13 +1196,20 @@ static void fw_device_refresh(struct work_struct *work)
                  dev_name(&device->device), fw_rcode_string(ret));
  gone:
        atomic_set(&device->state, FW_DEVICE_GONE);
-       PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown);
+       device->workfn = fw_device_shutdown;
        fw_schedule_device_work(device, SHUTDOWN_DELAY);
  out:
        if (node_id == card->root_node->node_id)
                fw_schedule_bm_work(card, 0);
 }
 
+static void fw_device_workfn(struct work_struct *work)
+{
+       struct fw_device *device = container_of(to_delayed_work(work),
+                                               struct fw_device, work);
+       device->workfn(work);
+}
+
 void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
 {
        struct fw_device *device;
@@ -1252,7 +1259,8 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
                 * power-up after getting plugged in.  We schedule the
                 * first config rom scan half a second after bus reset.
                 */
-               INIT_DELAYED_WORK(&device->work, fw_device_init);
+               device->workfn = fw_device_init;
+               INIT_DELAYED_WORK(&device->work, fw_device_workfn);
                fw_schedule_device_work(device, INITIAL_DELAY);
                break;
 
@@ -1268,7 +1276,7 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
                if (atomic_cmpxchg(&device->state,
                            FW_DEVICE_RUNNING,
                            FW_DEVICE_INITIALIZING) == FW_DEVICE_RUNNING) {
-                       PREPARE_DELAYED_WORK(&device->work, fw_device_refresh);
+                       device->workfn = fw_device_refresh;
                        fw_schedule_device_work(device,
                                device->is_local ? 0 : INITIAL_DELAY);
                }
@@ -1283,7 +1291,7 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
                smp_wmb();  /* update node_id before generation */
                device->generation = card->generation;
                if (atomic_read(&device->state) == FW_DEVICE_RUNNING) {
-                       PREPARE_DELAYED_WORK(&device->work, fw_device_update);
+                       device->workfn = fw_device_update;
                        fw_schedule_device_work(device, 0);
                }
                break;
@@ -1308,7 +1316,7 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
                device = node->data;
                if (atomic_xchg(&device->state,
                                FW_DEVICE_GONE) == FW_DEVICE_RUNNING) {
-                       PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown);
+                       device->workfn = fw_device_shutdown;
                        fw_schedule_device_work(device,
                                list_empty(&card->link) ? 0 : SHUTDOWN_DELAY);
                }
index 6b895986dc225115573add201958e60897934853..4af0a7bad7f21561cc4e7a29031c238a34f0135d 100644 (file)
@@ -929,8 +929,6 @@ static void fwnet_write_complete(struct fw_card *card, int rcode,
        if (rcode == RCODE_COMPLETE) {
                fwnet_transmit_packet_done(ptask);
        } else {
-               fwnet_transmit_packet_failed(ptask);
-
                if (printk_timed_ratelimit(&j,  1000) || rcode != last_rcode) {
                        dev_err(&ptask->dev->netdev->dev,
                                "fwnet_write_complete failed: %x (skipped %d)\n",
@@ -938,8 +936,10 @@ static void fwnet_write_complete(struct fw_card *card, int rcode,
 
                        errors_skipped = 0;
                        last_rcode = rcode;
-               } else
+               } else {
                        errors_skipped++;
+               }
+               fwnet_transmit_packet_failed(ptask);
        }
 }
 
index 6f74d8d3f70015a089f02948294454863cb6bcd7..8db66321956068701cde997e0bc25e167e781eb3 100644 (file)
@@ -290,7 +290,6 @@ static char ohci_driver_name[] = KBUILD_MODNAME;
 #define QUIRK_NO_MSI                   0x10
 #define QUIRK_TI_SLLZ059               0x20
 #define QUIRK_IR_WAKE                  0x40
-#define QUIRK_PHY_LCTRL_TIMEOUT                0x80
 
 /* In case of multiple matches in ohci_quirks[], only the first one is used. */
 static const struct {
@@ -303,10 +302,7 @@ static const struct {
                QUIRK_BE_HEADERS},
 
        {PCI_VENDOR_ID_ATT, PCI_DEVICE_ID_AGERE_FW643, 6,
-               QUIRK_PHY_LCTRL_TIMEOUT | QUIRK_NO_MSI},
-
-       {PCI_VENDOR_ID_ATT, PCI_ANY_ID, PCI_ANY_ID,
-               QUIRK_PHY_LCTRL_TIMEOUT},
+               QUIRK_NO_MSI},
 
        {PCI_VENDOR_ID_CREATIVE, PCI_DEVICE_ID_CREATIVE_SB1394, PCI_ANY_ID,
                QUIRK_RESET_PACKET},
@@ -353,7 +349,6 @@ MODULE_PARM_DESC(quirks, "Chip quirks (default = 0"
        ", disable MSI = "              __stringify(QUIRK_NO_MSI)
        ", TI SLLZ059 erratum = "       __stringify(QUIRK_TI_SLLZ059)
        ", IR wake unreliable = "       __stringify(QUIRK_IR_WAKE)
-       ", phy LCtrl timeout = "        __stringify(QUIRK_PHY_LCTRL_TIMEOUT)
        ")");
 
 #define OHCI_PARAM_DEBUG_AT_AR         1
@@ -2299,9 +2294,6 @@ static int ohci_enable(struct fw_card *card,
         * TI TSB82AA2 + TSB81BA3(A) cards signal LPS enabled early but
         * cannot actually use the phy at that time.  These need tens of
         * millisecods pause between LPS write and first phy access too.
-        *
-        * But do not wait for 50msec on Agere/LSI cards.  Their phy
-        * arbitration state machine may time out during such a long wait.
         */
 
        reg_write(ohci, OHCI1394_HCControlSet,
@@ -2309,11 +2301,8 @@ static int ohci_enable(struct fw_card *card,
                  OHCI1394_HCControl_postedWriteEnable);
        flush_writes(ohci);
 
-       if (!(ohci->quirks & QUIRK_PHY_LCTRL_TIMEOUT))
+       for (lps = 0, i = 0; !lps && i < 3; i++) {
                msleep(50);
-
-       for (lps = 0, i = 0; !lps && i < 150; i++) {
-               msleep(1);
                lps = reg_read(ohci, OHCI1394_HCControlSet) &
                      OHCI1394_HCControl_LPS;
        }
index 281029daf98c7ea291886d46ecf05378bfa98718..7aef911fdc716d4874b7152b3c0d6e27bc8c1642 100644 (file)
@@ -146,6 +146,7 @@ struct sbp2_logical_unit {
         */
        int generation;
        int retries;
+       work_func_t workfn;
        struct delayed_work work;
        bool has_sdev;
        bool blocked;
@@ -864,7 +865,7 @@ static void sbp2_login(struct work_struct *work)
        /* set appropriate retry limit(s) in BUSY_TIMEOUT register */
        sbp2_set_busy_timeout(lu);
 
-       PREPARE_DELAYED_WORK(&lu->work, sbp2_reconnect);
+       lu->workfn = sbp2_reconnect;
        sbp2_agent_reset(lu);
 
        /* This was a re-login. */
@@ -918,7 +919,7 @@ static void sbp2_login(struct work_struct *work)
         * If a bus reset happened, sbp2_update will have requeued
         * lu->work already.  Reset the work from reconnect to login.
         */
-       PREPARE_DELAYED_WORK(&lu->work, sbp2_login);
+       lu->workfn = sbp2_login;
 }
 
 static void sbp2_reconnect(struct work_struct *work)
@@ -952,7 +953,7 @@ static void sbp2_reconnect(struct work_struct *work)
                    lu->retries++ >= 5) {
                        dev_err(tgt_dev(tgt), "failed to reconnect\n");
                        lu->retries = 0;
-                       PREPARE_DELAYED_WORK(&lu->work, sbp2_login);
+                       lu->workfn = sbp2_login;
                }
                sbp2_queue_work(lu, DIV_ROUND_UP(HZ, 5));
 
@@ -972,6 +973,13 @@ static void sbp2_reconnect(struct work_struct *work)
        sbp2_conditionally_unblock(lu);
 }
 
+static void sbp2_lu_workfn(struct work_struct *work)
+{
+       struct sbp2_logical_unit *lu = container_of(to_delayed_work(work),
+                                               struct sbp2_logical_unit, work);
+       lu->workfn(work);
+}
+
 static int sbp2_add_logical_unit(struct sbp2_target *tgt, int lun_entry)
 {
        struct sbp2_logical_unit *lu;
@@ -998,7 +1006,8 @@ static int sbp2_add_logical_unit(struct sbp2_target *tgt, int lun_entry)
        lu->blocked  = false;
        ++tgt->dont_block;
        INIT_LIST_HEAD(&lu->orb_list);
-       INIT_DELAYED_WORK(&lu->work, sbp2_login);
+       lu->workfn = sbp2_login;
+       INIT_DELAYED_WORK(&lu->work, sbp2_lu_workfn);
 
        list_add_tail(&lu->link, &tgt->lu_list);
        return 0;
index acf3a36c9ebc453737b1a849aa6715f41b241d68..32982da82694be753d1072c90136d842a8a866ec 100644 (file)
@@ -68,15 +68,7 @@ void __armada_drm_queue_unref_work(struct drm_device *dev,
 {
        struct armada_private *priv = dev->dev_private;
 
-       /*
-        * Yes, we really must jump through these hoops just to store a
-        * _pointer_ to something into the kfifo.  This is utterly insane
-        * and idiotic, because it kfifo requires the _data_ pointed to by
-        * the pointer const, not the pointer itself.  Not only that, but
-        * you have to pass a pointer _to_ the pointer you want stored.
-        */
-       const struct drm_framebuffer *silly_api_alert = fb;
-       WARN_ON(!kfifo_put(&priv->fb_unref, &silly_api_alert));
+       WARN_ON(!kfifo_put(&priv->fb_unref, fb));
        schedule_work(&priv->fb_unref_work);
 }
 
index c8fcf12019f09c1eaffff1d72ea72c74714325e2..5f8b0c2b9a4461d2e07b1d7a3abb25f69837fc90 100644 (file)
@@ -2,6 +2,7 @@ config DRM_BOCHS
        tristate "DRM Support for bochs dispi vga interface (qemu stdvga)"
        depends on DRM && PCI
        select DRM_KMS_HELPER
+       select DRM_KMS_FB_HELPER
        select FB_SYS_FILLRECT
        select FB_SYS_COPYAREA
        select FB_SYS_IMAGEBLIT
index 04f1f02c4019f57873caeb0c70fea14e0816ddeb..ec7bb0fc71bcfc8c19c2231c9bb0e3165f89e047 100644 (file)
@@ -403,7 +403,7 @@ MODULE_DEVICE_TABLE(pci, pciidlist);
 void intel_detect_pch(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct pci_dev *pch;
+       struct pci_dev *pch = NULL;
 
        /* In all current cases, num_pipes is equivalent to the PCH_NOP setting
         * (which really amounts to a PCH but no South Display).
@@ -424,12 +424,9 @@ void intel_detect_pch(struct drm_device *dev)
         * all the ISA bridge devices and check for the first match, instead
         * of only checking the first one.
         */
-       pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
-       while (pch) {
-               struct pci_dev *curr = pch;
+       while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) {
                if (pch->vendor == PCI_VENDOR_ID_INTEL) {
-                       unsigned short id;
-                       id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
+                       unsigned short id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
                        dev_priv->pch_id = id;
 
                        if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
@@ -461,18 +458,16 @@ void intel_detect_pch(struct drm_device *dev)
                                DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
                                WARN_ON(!IS_HASWELL(dev));
                                WARN_ON(!IS_ULT(dev));
-                       } else {
-                               goto check_next;
-                       }
-                       pci_dev_put(pch);
+                       } else
+                               continue;
+
                        break;
                }
-check_next:
-               pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, curr);
-               pci_dev_put(curr);
        }
        if (!pch)
-               DRM_DEBUG_KMS("No PCH found?\n");
+               DRM_DEBUG_KMS("No PCH found.\n");
+
+       pci_dev_put(pch);
 }
 
 bool i915_semaphore_is_enabled(struct drm_device *dev)
index 1a24e84f231578ae772d40ad75adda4c356b8d68..d58b4e287e3289a278741522a09baeaca584be0b 100644 (file)
@@ -82,9 +82,22 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
        r = devm_request_mem_region(dev->dev, base, dev_priv->gtt.stolen_size,
                                    "Graphics Stolen Memory");
        if (r == NULL) {
-               DRM_ERROR("conflict detected with stolen region: [0x%08x - 0x%08x]\n",
-                         base, base + (uint32_t)dev_priv->gtt.stolen_size);
-               base = 0;
+               /*
+                * One more attempt but this time requesting region from
+                * base + 1, as we have seen that this resolves the region
+                * conflict with the PCI Bus.
+                * This is a BIOS w/a: Some BIOS wrap stolen in the root
+                * PCI bus, but have an off-by-one error. Hence retry the
+                * reservation starting from 1 instead of 0.
+                */
+               r = devm_request_mem_region(dev->dev, base + 1,
+                                           dev_priv->gtt.stolen_size - 1,
+                                           "Graphics Stolen Memory");
+               if (r == NULL) {
+                       DRM_ERROR("conflict detected with stolen region: [0x%08x - 0x%08x]\n",
+                                 base, base + (uint32_t)dev_priv->gtt.stolen_size);
+                       base = 0;
+               }
        }
 
        return base;
index 4c1672809493c795714bc3eddaa1238c258451ed..9b8a7c7ea7fc1925610b4dcb11ca9f768057fd41 100644 (file)
@@ -1092,12 +1092,12 @@ static void assert_cursor(struct drm_i915_private *dev_priv,
        struct drm_device *dev = dev_priv->dev;
        bool cur_state;
 
-       if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
-               cur_state = I915_READ(CURCNTR_IVB(pipe)) & CURSOR_MODE;
-       else if (IS_845G(dev) || IS_I865G(dev))
+       if (IS_845G(dev) || IS_I865G(dev))
                cur_state = I915_READ(_CURACNTR) & CURSOR_ENABLE;
-       else
+       else if (INTEL_INFO(dev)->gen <= 6 || IS_VALLEYVIEW(dev))
                cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
+       else
+               cur_state = I915_READ(CURCNTR_IVB(pipe)) & CURSOR_MODE;
 
        WARN(cur_state != state,
             "cursor on pipe %c assertion failure (expected %s, current %s)\n",
index 6db0d9d17f47e7b5ce5c43a7aa7795de71ea6346..ee3181ebcc9236078835ee18a48fe86c97525ff9 100644 (file)
@@ -845,7 +845,7 @@ static int hdmi_portclock_limit(struct intel_hdmi *hdmi)
 {
        struct drm_device *dev = intel_hdmi_to_dev(hdmi);
 
-       if (IS_G4X(dev))
+       if (!hdmi->has_hdmi_sink || IS_G4X(dev))
                return 165000;
        else if (IS_HASWELL(dev) || INTEL_INFO(dev)->gen >= 8)
                return 300000;
@@ -899,8 +899,8 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
         * outputs. We also need to check that the higher clock still fits
         * within limits.
         */
-       if (pipe_config->pipe_bpp > 8*3 && clock_12bpc <= portclock_limit
-           && HAS_PCH_SPLIT(dev)) {
+       if (pipe_config->pipe_bpp > 8*3 && intel_hdmi->has_hdmi_sink &&
+           clock_12bpc <= portclock_limit && HAS_PCH_SPLIT(dev)) {
                DRM_DEBUG_KMS("picking bpc to 12 for HDMI output\n");
                desired_bpp = 12*3;
 
index 350de359123af9cbd42354c182424f356281aa16..079ea38f14d9b4b54092a1ebeca4cd77dac27cec 100644 (file)
@@ -698,7 +698,7 @@ static void i9xx_enable_backlight(struct intel_connector *connector)
                freq /= 0xff;
 
        ctl = freq << 17;
-       if (IS_GEN2(dev) && panel->backlight.combination_mode)
+       if (panel->backlight.combination_mode)
                ctl |= BLM_LEGACY_MODE;
        if (IS_PINEVIEW(dev) && panel->backlight.active_low_pwm)
                ctl |= BLM_POLARITY_PNV;
@@ -979,7 +979,7 @@ static int i9xx_setup_backlight(struct intel_connector *connector)
 
        ctl = I915_READ(BLC_PWM_CTL);
 
-       if (IS_GEN2(dev))
+       if (IS_GEN2(dev) || IS_I915GM(dev) || IS_I945GM(dev))
                panel->backlight.combination_mode = ctl & BLM_LEGACY_MODE;
 
        if (IS_PINEVIEW(dev))
index d77cc81900f92100ba2f61d7130bc9b0b60454b5..e1fc35a726564cc9f3526231780672d09d151838 100644 (file)
@@ -3493,6 +3493,8 @@ static void valleyview_setup_pctx(struct drm_device *dev)
        u32 pcbr;
        int pctx_size = 24*1024;
 
+       WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
        pcbr = I915_READ(VLV_PCBR);
        if (pcbr) {
                /* BIOS set it up already, grab the pre-alloc'd space */
@@ -3542,8 +3544,6 @@ static void valleyview_enable_rps(struct drm_device *dev)
                I915_WRITE(GTFIFODBG, gtfifodbg);
        }
 
-       valleyview_setup_pctx(dev);
-
        /* If VLV, Forcewake all wells, else re-direct to regular path */
        gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
 
@@ -4395,6 +4395,8 @@ void intel_enable_gt_powersave(struct drm_device *dev)
                ironlake_enable_rc6(dev);
                intel_init_emon(dev);
        } else if (IS_GEN6(dev) || IS_GEN7(dev)) {
+               if (IS_VALLEYVIEW(dev))
+                       valleyview_setup_pctx(dev);
                /*
                 * PCU communication is slow and this doesn't need to be
                 * done at any specific time, so do this out of our fast path
index 2cec2ab02f80c193ce929393cd3d0c86bdd3d7c9..607dc14d195e3540f103bf798d2e6f0e10e5634f 100644 (file)
@@ -1314,7 +1314,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
                        }
                        if (is_dp)
                                args.v5.ucLaneNum = dp_lane_count;
-                       else if (radeon_encoder->pixel_clock > 165000)
+                       else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
                                args.v5.ucLaneNum = 8;
                        else
                                args.v5.ucLaneNum = 4;
index e6419ca7cd375d6958ebcd339fb68a004c56cd1a..e22be8458d92783f426a699d2e3feef271b7a3f0 100644 (file)
@@ -3046,7 +3046,7 @@ static u32 cik_create_bitmask(u32 bit_width)
 }
 
 /**
- * cik_select_se_sh - select which SE, SH to address
+ * cik_get_rb_disabled - computes the mask of disabled RBs
  *
  * @rdev: radeon_device pointer
  * @max_rb_num: max RBs (render backends) for the asic
@@ -7902,7 +7902,8 @@ int cik_resume(struct radeon_device *rdev)
        /* init golden registers */
        cik_init_golden_registers(rdev);
 
-       radeon_pm_resume(rdev);
+       if (rdev->pm.pm_method == PM_METHOD_DPM)
+               radeon_pm_resume(rdev);
 
        rdev->accel_working = true;
        r = cik_startup(rdev);
index 8a2c010b7dc53b22f0c6ead8dec361c66538a041..27b0ff16082ebbbe10eb385b8bce24e127b5091d 100644 (file)
@@ -5299,7 +5299,8 @@ int evergreen_resume(struct radeon_device *rdev)
        /* init golden registers */
        evergreen_init_golden_registers(rdev);
 
-       radeon_pm_resume(rdev);
+       if (rdev->pm.pm_method == PM_METHOD_DPM)
+               radeon_pm_resume(rdev);
 
        rdev->accel_working = true;
        r = evergreen_startup(rdev);
index 76ada8cfe902a4d9eaa9890404c390371f856621..3a03ba37d04346fb5200d67ee002716ae549a600 100644 (file)
@@ -57,7 +57,7 @@ typedef struct SMC_Evergreen_MCRegisters SMC_Evergreen_MCRegisters;
 
 #define EVERGREEN_SMC_FIRMWARE_HEADER_LOCATION 0x100
 
-#define EVERGREEN_SMC_FIRMWARE_HEADER_softRegisters   0x0
+#define EVERGREEN_SMC_FIRMWARE_HEADER_softRegisters   0x8
 #define EVERGREEN_SMC_FIRMWARE_HEADER_stateTable      0xC
 #define EVERGREEN_SMC_FIRMWARE_HEADER_mcRegisterTable 0x20
 
index ea932ac66fc6647da43a8cf775b60ade25d478de..bf6300cfd62d0799deaefbd698027cf8d811dbc6 100644 (file)
@@ -2105,7 +2105,8 @@ int cayman_resume(struct radeon_device *rdev)
        /* init golden registers */
        ni_init_golden_registers(rdev);
 
-       radeon_pm_resume(rdev);
+       if (rdev->pm.pm_method == PM_METHOD_DPM)
+               radeon_pm_resume(rdev);
 
        rdev->accel_working = true;
        r = cayman_startup(rdev);
index ef024ce3f7ccfd39b1be47be9840a401b588b1dc..3cc78bb66042fc5509f26825560847374888288e 100644 (file)
@@ -3942,8 +3942,6 @@ int r100_resume(struct radeon_device *rdev)
        /* Initialize surface registers */
        radeon_surface_init(rdev);
 
-       radeon_pm_resume(rdev);
-
        rdev->accel_working = true;
        r = r100_startup(rdev);
        if (r) {
index 7c63ef840e86abaf04f216201b32ee2f40323b9b..0b658b34b33ab1d2874c0c85a668b4fa48da0ecc 100644 (file)
@@ -1430,8 +1430,6 @@ int r300_resume(struct radeon_device *rdev)
        /* Initialize surface registers */
        radeon_surface_init(rdev);
 
-       radeon_pm_resume(rdev);
-
        rdev->accel_working = true;
        r = r300_startup(rdev);
        if (r) {
index 3768aab2710b3943261312c4fcc5184b4f18a6fb..802b19220a21358712dc304a894db892ec3537fb 100644 (file)
@@ -325,8 +325,6 @@ int r420_resume(struct radeon_device *rdev)
        /* Initialize surface registers */
        radeon_surface_init(rdev);
 
-       radeon_pm_resume(rdev);
-
        rdev->accel_working = true;
        r = r420_startup(rdev);
        if (r) {
index e209eb75024f9dc1c441518cbe8f8d70a41844f5..98d6053c36c678aaf598e7a0b63193d00d042f5b 100644 (file)
@@ -240,8 +240,6 @@ int r520_resume(struct radeon_device *rdev)
        /* Initialize surface registers */
        radeon_surface_init(rdev);
 
-       radeon_pm_resume(rdev);
-
        rdev->accel_working = true;
        r = r520_startup(rdev);
        if (r) {
index cdbc4171fe73743bd9bcf9ce2bbb74020ad6ece5..647ef407921764692758c56e03f92b737202e317 100644 (file)
@@ -2968,7 +2968,8 @@ int r600_resume(struct radeon_device *rdev)
        /* post card */
        atom_asic_init(rdev->mode_info.atom_context);
 
-       radeon_pm_resume(rdev);
+       if (rdev->pm.pm_method == PM_METHOD_DPM)
+               radeon_pm_resume(rdev);
 
        rdev->accel_working = true;
        r = r600_startup(rdev);
index b012cbbc3ed5a9b892b433eff0ee5f3134a130de..044bc98fb459e46fac62b929696ad047f835360d 100644 (file)
@@ -1521,13 +1521,16 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
        if (r)
                DRM_ERROR("ib ring test failed (%d).\n", r);
 
-       if (rdev->pm.dpm_enabled) {
+       if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
                /* do dpm late init */
                r = radeon_pm_late_init(rdev);
                if (r) {
                        rdev->pm.dpm_enabled = false;
                        DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n");
                }
+       } else {
+               /* resume old pm late */
+               radeon_pm_resume(rdev);
        }
 
        radeon_restore_bios_scratch_regs(rdev);
index 77f5b0c3edb8d8b1f4835620d626c3981171c7a6..040a2a10ea17e8136a4274decc83de131ad79e3f 100644 (file)
@@ -714,6 +714,9 @@ int radeon_ttm_init(struct radeon_device *rdev)
                DRM_ERROR("Failed initializing VRAM heap.\n");
                return r;
        }
+       /* Change the size here instead of the init above so only lpfn is affected */
+       radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
+
        r = radeon_bo_create(rdev, 256 * 1024, PAGE_SIZE, true,
                             RADEON_GEM_DOMAIN_VRAM,
                             NULL, &rdev->stollen_vga_memory);
@@ -935,7 +938,7 @@ static ssize_t radeon_ttm_gtt_read(struct file *f, char __user *buf,
        while (size) {
                loff_t p = *pos / PAGE_SIZE;
                unsigned off = *pos & ~PAGE_MASK;
-               ssize_t cur_size = min(size, PAGE_SIZE - off);
+               size_t cur_size = min_t(size_t, size, PAGE_SIZE - off);
                struct page *page;
                void *ptr;
 
index b5c2369cda2fe28ca043fe91f4fcfe12fc227fc6..130d5cc50d436fd90c1d3055bd03e177ba2dce6c 100644 (file)
@@ -474,8 +474,6 @@ int rs400_resume(struct radeon_device *rdev)
        /* Initialize surface registers */
        radeon_surface_init(rdev);
 
-       radeon_pm_resume(rdev);
-
        rdev->accel_working = true;
        r = rs400_startup(rdev);
        if (r) {
index fdcde7693032c28cc30008b494fd573fd82e58f3..72d3616de08e054efe6199fcd529f59cef3e18d5 100644 (file)
@@ -1048,8 +1048,6 @@ int rs600_resume(struct radeon_device *rdev)
        /* Initialize surface registers */
        radeon_surface_init(rdev);
 
-       radeon_pm_resume(rdev);
-
        rdev->accel_working = true;
        r = rs600_startup(rdev);
        if (r) {
index 35950738bd5e449465e0e5062d096e3e8ae9ff0e..3462b64369bfe6142a4c8acfaec09e0bb7d8826c 100644 (file)
@@ -756,8 +756,6 @@ int rs690_resume(struct radeon_device *rdev)
        /* Initialize surface registers */
        radeon_surface_init(rdev);
 
-       radeon_pm_resume(rdev);
-
        rdev->accel_working = true;
        r = rs690_startup(rdev);
        if (r) {
index 98e8138ff77945ecdba447bdcb94310c30dcb1c7..237dd29d9f1c893b1e17600fec59660848b394c3 100644 (file)
@@ -586,8 +586,6 @@ int rv515_resume(struct radeon_device *rdev)
        /* Initialize surface registers */
        radeon_surface_init(rdev);
 
-       radeon_pm_resume(rdev);
-
        rdev->accel_working = true;
        r =  rv515_startup(rdev);
        if (r) {
index 4e37a42305d83be2df8b3853bdff0e13b7b0f46f..fef310773aadc71260888372b896df0afcced749 100644 (file)
@@ -1811,7 +1811,8 @@ int rv770_resume(struct radeon_device *rdev)
        /* init golden registers */
        rv770_init_golden_registers(rdev);
 
-       radeon_pm_resume(rdev);
+       if (rdev->pm.pm_method == PM_METHOD_DPM)
+               radeon_pm_resume(rdev);
 
        rdev->accel_working = true;
        r = rv770_startup(rdev);
index 83578324e5d1383167f75071cdaf47d85a0e60ea..9a124d0608b36f3ae594709ac92c4d4ad563b6d7 100644 (file)
@@ -6618,7 +6618,8 @@ int si_resume(struct radeon_device *rdev)
        /* init golden registers */
        si_init_golden_registers(rdev);
 
-       radeon_pm_resume(rdev);
+       if (rdev->pm.pm_method == PM_METHOD_DPM)
+               radeon_pm_resume(rdev);
 
        rdev->accel_working = true;
        r = si_startup(rdev);
index e81c5547e6479a87683dba621c169529f4209d5a..f9c12e92fdd661a4e639790c9b2b2dd19ba7eb36 100644 (file)
@@ -53,8 +53,8 @@
 #include "user.h"
 
 #define DRV_NAME       MLX4_IB_DRV_NAME
-#define DRV_VERSION    "1.0"
-#define DRV_RELDATE    "April 4, 2008"
+#define DRV_VERSION    "2.2-1"
+#define DRV_RELDATE    "Feb 2014"
 
 #define MLX4_IB_FLOW_MAX_PRIO 0xFFF
 #define MLX4_IB_FLOW_QPN_MASK 0xFFFFFF
index aa03e732b6a88d7cfa81a3d9ba4dee61d8f6c70d..bf900579ac08b4cae5ac09320b5189727b292485 100644 (file)
@@ -46,8 +46,8 @@
 #include "mlx5_ib.h"
 
 #define DRIVER_NAME "mlx5_ib"
-#define DRIVER_VERSION "1.0"
-#define DRIVER_RELDATE "June 2013"
+#define DRIVER_VERSION "2.2-1"
+#define DRIVER_RELDATE "Feb 2014"
 
 MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
 MODULE_DESCRIPTION("Mellanox Connect-IB HCA IB driver");
index d18d08a076e8e4a15a67f71d8eb8089dd976198d..8ee228e9ab5aa28b85ee2893a92b3ab243c35388 100644 (file)
@@ -492,12 +492,11 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
        isert_conn->state = ISER_CONN_INIT;
        INIT_LIST_HEAD(&isert_conn->conn_accept_node);
        init_completion(&isert_conn->conn_login_comp);
-       init_waitqueue_head(&isert_conn->conn_wait);
-       init_waitqueue_head(&isert_conn->conn_wait_comp_err);
+       init_completion(&isert_conn->conn_wait);
+       init_completion(&isert_conn->conn_wait_comp_err);
        kref_init(&isert_conn->conn_kref);
        kref_get(&isert_conn->conn_kref);
        mutex_init(&isert_conn->conn_mutex);
-       mutex_init(&isert_conn->conn_comp_mutex);
        spin_lock_init(&isert_conn->conn_lock);
 
        cma_id->context = isert_conn;
@@ -688,11 +687,11 @@ isert_disconnect_work(struct work_struct *work)
 
        pr_debug("isert_disconnect_work(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
        mutex_lock(&isert_conn->conn_mutex);
-       isert_conn->state = ISER_CONN_DOWN;
+       if (isert_conn->state == ISER_CONN_UP)
+               isert_conn->state = ISER_CONN_TERMINATING;
 
        if (isert_conn->post_recv_buf_count == 0 &&
            atomic_read(&isert_conn->post_send_buf_count) == 0) {
-               pr_debug("Calling wake_up(&isert_conn->conn_wait);\n");
                mutex_unlock(&isert_conn->conn_mutex);
                goto wake_up;
        }
@@ -712,7 +711,7 @@ isert_disconnect_work(struct work_struct *work)
        mutex_unlock(&isert_conn->conn_mutex);
 
 wake_up:
-       wake_up(&isert_conn->conn_wait);
+       complete(&isert_conn->conn_wait);
        isert_put_conn(isert_conn);
 }
 
@@ -888,16 +887,17 @@ isert_init_send_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
         * Coalesce send completion interrupts by only setting IB_SEND_SIGNALED
         * bit for every ISERT_COMP_BATCH_COUNT number of ib_post_send() calls.
         */
-       mutex_lock(&isert_conn->conn_comp_mutex);
-       if (coalesce &&
+       mutex_lock(&isert_conn->conn_mutex);
+       if (coalesce && isert_conn->state == ISER_CONN_UP &&
            ++isert_conn->conn_comp_batch < ISERT_COMP_BATCH_COUNT) {
+               tx_desc->llnode_active = true;
                llist_add(&tx_desc->comp_llnode, &isert_conn->conn_comp_llist);
-               mutex_unlock(&isert_conn->conn_comp_mutex);
+               mutex_unlock(&isert_conn->conn_mutex);
                return;
        }
        isert_conn->conn_comp_batch = 0;
        tx_desc->comp_llnode_batch = llist_del_all(&isert_conn->conn_comp_llist);
-       mutex_unlock(&isert_conn->conn_comp_mutex);
+       mutex_unlock(&isert_conn->conn_mutex);
 
        send_wr->send_flags = IB_SEND_SIGNALED;
 }
@@ -1464,7 +1464,7 @@ isert_put_cmd(struct isert_cmd *isert_cmd)
        case ISCSI_OP_SCSI_CMD:
                spin_lock_bh(&conn->cmd_lock);
                if (!list_empty(&cmd->i_conn_node))
-                       list_del(&cmd->i_conn_node);
+                       list_del_init(&cmd->i_conn_node);
                spin_unlock_bh(&conn->cmd_lock);
 
                if (cmd->data_direction == DMA_TO_DEVICE)
@@ -1476,7 +1476,7 @@ isert_put_cmd(struct isert_cmd *isert_cmd)
        case ISCSI_OP_SCSI_TMFUNC:
                spin_lock_bh(&conn->cmd_lock);
                if (!list_empty(&cmd->i_conn_node))
-                       list_del(&cmd->i_conn_node);
+                       list_del_init(&cmd->i_conn_node);
                spin_unlock_bh(&conn->cmd_lock);
 
                transport_generic_free_cmd(&cmd->se_cmd, 0);
@@ -1486,7 +1486,7 @@ isert_put_cmd(struct isert_cmd *isert_cmd)
        case ISCSI_OP_TEXT:
                spin_lock_bh(&conn->cmd_lock);
                if (!list_empty(&cmd->i_conn_node))
-                       list_del(&cmd->i_conn_node);
+                       list_del_init(&cmd->i_conn_node);
                spin_unlock_bh(&conn->cmd_lock);
 
                /*
@@ -1549,6 +1549,7 @@ isert_completion_rdma_read(struct iser_tx_desc *tx_desc,
        iscsit_stop_dataout_timer(cmd);
        device->unreg_rdma_mem(isert_cmd, isert_conn);
        cmd->write_data_done = wr->cur_rdma_length;
+       wr->send_wr_num = 0;
 
        pr_debug("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd);
        spin_lock_bh(&cmd->istate_lock);
@@ -1589,7 +1590,7 @@ isert_do_control_comp(struct work_struct *work)
                pr_debug("Calling iscsit_logout_post_handler >>>>>>>>>>>>>>\n");
                /*
                 * Call atomic_dec(&isert_conn->post_send_buf_count)
-                * from isert_free_conn()
+                * from isert_wait_conn()
                 */
                isert_conn->logout_posted = true;
                iscsit_logout_post_handler(cmd, cmd->conn);
@@ -1613,6 +1614,7 @@ isert_response_completion(struct iser_tx_desc *tx_desc,
                          struct ib_device *ib_dev)
 {
        struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
+       struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
 
        if (cmd->i_state == ISTATE_SEND_TASKMGTRSP ||
            cmd->i_state == ISTATE_SEND_LOGOUTRSP ||
@@ -1624,7 +1626,7 @@ isert_response_completion(struct iser_tx_desc *tx_desc,
                queue_work(isert_comp_wq, &isert_cmd->comp_work);
                return;
        }
-       atomic_dec(&isert_conn->post_send_buf_count);
+       atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count);
 
        cmd->i_state = ISTATE_SENT_STATUS;
        isert_completion_put(tx_desc, isert_cmd, ib_dev);
@@ -1662,7 +1664,7 @@ __isert_send_completion(struct iser_tx_desc *tx_desc,
        case ISER_IB_RDMA_READ:
                pr_debug("isert_send_completion: Got ISER_IB_RDMA_READ:\n");
 
-               atomic_dec(&isert_conn->post_send_buf_count);
+               atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count);
                isert_completion_rdma_read(tx_desc, isert_cmd);
                break;
        default:
@@ -1691,31 +1693,76 @@ isert_send_completion(struct iser_tx_desc *tx_desc,
 }
 
 static void
-isert_cq_comp_err(struct iser_tx_desc *tx_desc, struct isert_conn *isert_conn)
+isert_cq_drain_comp_llist(struct isert_conn *isert_conn, struct ib_device *ib_dev)
+{
+       struct llist_node *llnode;
+       struct isert_rdma_wr *wr;
+       struct iser_tx_desc *t;
+
+       mutex_lock(&isert_conn->conn_mutex);
+       llnode = llist_del_all(&isert_conn->conn_comp_llist);
+       isert_conn->conn_comp_batch = 0;
+       mutex_unlock(&isert_conn->conn_mutex);
+
+       while (llnode) {
+               t = llist_entry(llnode, struct iser_tx_desc, comp_llnode);
+               llnode = llist_next(llnode);
+               wr = &t->isert_cmd->rdma_wr;
+
+               atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count);
+               isert_completion_put(t, t->isert_cmd, ib_dev);
+       }
+}
+
+static void
+isert_cq_tx_comp_err(struct iser_tx_desc *tx_desc, struct isert_conn *isert_conn)
 {
        struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
+       struct isert_cmd *isert_cmd = tx_desc->isert_cmd;
+       struct llist_node *llnode = tx_desc->comp_llnode_batch;
+       struct isert_rdma_wr *wr;
+       struct iser_tx_desc *t;
 
-       if (tx_desc) {
-               struct isert_cmd *isert_cmd = tx_desc->isert_cmd;
+       while (llnode) {
+               t = llist_entry(llnode, struct iser_tx_desc, comp_llnode);
+               llnode = llist_next(llnode);
+               wr = &t->isert_cmd->rdma_wr;
 
-               if (!isert_cmd)
-                       isert_unmap_tx_desc(tx_desc, ib_dev);
-               else
-                       isert_completion_put(tx_desc, isert_cmd, ib_dev);
+               atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count);
+               isert_completion_put(t, t->isert_cmd, ib_dev);
        }
+       tx_desc->comp_llnode_batch = NULL;
 
-       if (isert_conn->post_recv_buf_count == 0 &&
-           atomic_read(&isert_conn->post_send_buf_count) == 0) {
-               pr_debug("isert_cq_comp_err >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
-               pr_debug("Calling wake_up from isert_cq_comp_err\n");
+       if (!isert_cmd)
+               isert_unmap_tx_desc(tx_desc, ib_dev);
+       else
+               isert_completion_put(tx_desc, isert_cmd, ib_dev);
+}
 
-               mutex_lock(&isert_conn->conn_mutex);
-               if (isert_conn->state != ISER_CONN_DOWN)
-                       isert_conn->state = ISER_CONN_TERMINATING;
-               mutex_unlock(&isert_conn->conn_mutex);
+static void
+isert_cq_rx_comp_err(struct isert_conn *isert_conn)
+{
+       struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
+       struct iscsi_conn *conn = isert_conn->conn;
 
-               wake_up(&isert_conn->conn_wait_comp_err);
+       if (isert_conn->post_recv_buf_count)
+               return;
+
+       isert_cq_drain_comp_llist(isert_conn, ib_dev);
+
+       if (conn->sess) {
+               target_sess_cmd_list_set_waiting(conn->sess->se_sess);
+               target_wait_for_sess_cmds(conn->sess->se_sess);
        }
+
+       while (atomic_read(&isert_conn->post_send_buf_count))
+               msleep(3000);
+
+       mutex_lock(&isert_conn->conn_mutex);
+       isert_conn->state = ISER_CONN_DOWN;
+       mutex_unlock(&isert_conn->conn_mutex);
+
+       complete(&isert_conn->conn_wait_comp_err);
 }
 
 static void
@@ -1740,8 +1787,14 @@ isert_cq_tx_work(struct work_struct *work)
                        pr_debug("TX wc.status != IB_WC_SUCCESS >>>>>>>>>>>>>>\n");
                        pr_debug("TX wc.status: 0x%08x\n", wc.status);
                        pr_debug("TX wc.vendor_err: 0x%08x\n", wc.vendor_err);
-                       atomic_dec(&isert_conn->post_send_buf_count);
-                       isert_cq_comp_err(tx_desc, isert_conn);
+
+                       if (wc.wr_id != ISER_FASTREG_LI_WRID) {
+                               if (tx_desc->llnode_active)
+                                       continue;
+
+                               atomic_dec(&isert_conn->post_send_buf_count);
+                               isert_cq_tx_comp_err(tx_desc, isert_conn);
+                       }
                }
        }
 
@@ -1784,7 +1837,7 @@ isert_cq_rx_work(struct work_struct *work)
                                         wc.vendor_err);
                        }
                        isert_conn->post_recv_buf_count--;
-                       isert_cq_comp_err(NULL, isert_conn);
+                       isert_cq_rx_comp_err(isert_conn);
                }
        }
 
@@ -2202,6 +2255,7 @@ isert_fast_reg_mr(struct fast_reg_descriptor *fr_desc,
 
        if (!fr_desc->valid) {
                memset(&inv_wr, 0, sizeof(inv_wr));
+               inv_wr.wr_id = ISER_FASTREG_LI_WRID;
                inv_wr.opcode = IB_WR_LOCAL_INV;
                inv_wr.ex.invalidate_rkey = fr_desc->data_mr->rkey;
                wr = &inv_wr;
@@ -2212,6 +2266,7 @@ isert_fast_reg_mr(struct fast_reg_descriptor *fr_desc,
 
        /* Prepare FASTREG WR */
        memset(&fr_wr, 0, sizeof(fr_wr));
+       fr_wr.wr_id = ISER_FASTREG_LI_WRID;
        fr_wr.opcode = IB_WR_FAST_REG_MR;
        fr_wr.wr.fast_reg.iova_start =
                fr_desc->data_frpl->page_list[0] + page_off;
@@ -2377,12 +2432,12 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
        isert_init_send_wr(isert_conn, isert_cmd,
                           &isert_cmd->tx_desc.send_wr, true);
 
-       atomic_inc(&isert_conn->post_send_buf_count);
+       atomic_add(wr->send_wr_num + 1, &isert_conn->post_send_buf_count);
 
        rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed);
        if (rc) {
                pr_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n");
-               atomic_dec(&isert_conn->post_send_buf_count);
+               atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count);
        }
        pr_debug("Cmd: %p posted RDMA_WRITE + Response for iSER Data READ\n",
                 isert_cmd);
@@ -2410,12 +2465,12 @@ isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery)
                return rc;
        }
 
-       atomic_inc(&isert_conn->post_send_buf_count);
+       atomic_add(wr->send_wr_num, &isert_conn->post_send_buf_count);
 
        rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed);
        if (rc) {
                pr_warn("ib_post_send() failed for IB_WR_RDMA_READ\n");
-               atomic_dec(&isert_conn->post_send_buf_count);
+               atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count);
        }
        pr_debug("Cmd: %p posted RDMA_READ memory for ISER Data WRITE\n",
                 isert_cmd);
@@ -2702,22 +2757,11 @@ isert_free_np(struct iscsi_np *np)
        kfree(isert_np);
 }
 
-static int isert_check_state(struct isert_conn *isert_conn, int state)
-{
-       int ret;
-
-       mutex_lock(&isert_conn->conn_mutex);
-       ret = (isert_conn->state == state);
-       mutex_unlock(&isert_conn->conn_mutex);
-
-       return ret;
-}
-
-static void isert_free_conn(struct iscsi_conn *conn)
+static void isert_wait_conn(struct iscsi_conn *conn)
 {
        struct isert_conn *isert_conn = conn->context;
 
-       pr_debug("isert_free_conn: Starting \n");
+       pr_debug("isert_wait_conn: Starting \n");
        /*
         * Decrement post_send_buf_count for special case when called
         * from isert_do_control_comp() -> iscsit_logout_post_handler()
@@ -2727,38 +2771,29 @@ static void isert_free_conn(struct iscsi_conn *conn)
                atomic_dec(&isert_conn->post_send_buf_count);
 
        if (isert_conn->conn_cm_id && isert_conn->state != ISER_CONN_DOWN) {
-               pr_debug("Calling rdma_disconnect from isert_free_conn\n");
+               pr_debug("Calling rdma_disconnect from isert_wait_conn\n");
                rdma_disconnect(isert_conn->conn_cm_id);
        }
        /*
         * Only wait for conn_wait_comp_err if the isert_conn made it
         * into full feature phase..
         */
-       if (isert_conn->state == ISER_CONN_UP) {
-               pr_debug("isert_free_conn: Before wait_event comp_err %d\n",
-                        isert_conn->state);
-               mutex_unlock(&isert_conn->conn_mutex);
-
-               wait_event(isert_conn->conn_wait_comp_err,
-                         (isert_check_state(isert_conn, ISER_CONN_TERMINATING)));
-
-               wait_event(isert_conn->conn_wait,
-                         (isert_check_state(isert_conn, ISER_CONN_DOWN)));
-
-               isert_put_conn(isert_conn);
-               return;
-       }
        if (isert_conn->state == ISER_CONN_INIT) {
                mutex_unlock(&isert_conn->conn_mutex);
-               isert_put_conn(isert_conn);
                return;
        }
-       pr_debug("isert_free_conn: wait_event conn_wait %d\n",
-                isert_conn->state);
+       if (isert_conn->state == ISER_CONN_UP)
+               isert_conn->state = ISER_CONN_TERMINATING;
        mutex_unlock(&isert_conn->conn_mutex);
 
-       wait_event(isert_conn->conn_wait,
-                 (isert_check_state(isert_conn, ISER_CONN_DOWN)));
+       wait_for_completion(&isert_conn->conn_wait_comp_err);
+
+       wait_for_completion(&isert_conn->conn_wait);
+}
+
+static void isert_free_conn(struct iscsi_conn *conn)
+{
+       struct isert_conn *isert_conn = conn->context;
 
        isert_put_conn(isert_conn);
 }
@@ -2771,6 +2806,7 @@ static struct iscsit_transport iser_target_transport = {
        .iscsit_setup_np        = isert_setup_np,
        .iscsit_accept_np       = isert_accept_np,
        .iscsit_free_np         = isert_free_np,
+       .iscsit_wait_conn       = isert_wait_conn,
        .iscsit_free_conn       = isert_free_conn,
        .iscsit_get_login_rx    = isert_get_login_rx,
        .iscsit_put_login_tx    = isert_put_login_tx,
index 708a069002f3530e00002c5d1e454c1823119bb4..f6ae7f5dd4082768f3b6f0dfb36c1d45506b5eb6 100644 (file)
@@ -6,6 +6,7 @@
 
 #define ISERT_RDMA_LISTEN_BACKLOG      10
 #define ISCSI_ISER_SG_TABLESIZE                256
+#define ISER_FASTREG_LI_WRID           0xffffffffffffffffULL
 
 enum isert_desc_type {
        ISCSI_TX_CONTROL,
@@ -45,6 +46,7 @@ struct iser_tx_desc {
        struct isert_cmd *isert_cmd;
        struct llist_node *comp_llnode_batch;
        struct llist_node comp_llnode;
+       bool            llnode_active;
        struct ib_send_wr send_wr;
 } __packed;
 
@@ -116,8 +118,8 @@ struct isert_conn {
        struct isert_device     *conn_device;
        struct work_struct      conn_logout_work;
        struct mutex            conn_mutex;
-       wait_queue_head_t       conn_wait;
-       wait_queue_head_t       conn_wait_comp_err;
+       struct completion       conn_wait;
+       struct completion       conn_wait_comp_err;
        struct kref             conn_kref;
        struct list_head        conn_fr_pool;
        int                     conn_fr_pool_size;
@@ -126,7 +128,6 @@ struct isert_conn {
 #define ISERT_COMP_BATCH_COUNT 8
        int                     conn_comp_batch;
        struct llist_head       conn_comp_llist;
-       struct mutex            conn_comp_mutex;
 };
 
 #define ISERT_MAX_CQ 64
index 9a06fe8837666036fe04afbde6d5e2adf8de6256..95ad936e60482477c1f87c0aa654fded90d06ba5 100644 (file)
@@ -254,16 +254,6 @@ config DM_THIN_PROVISIONING
        ---help---
          Provides thin provisioning and snapshots that share a data store.
 
-config DM_DEBUG_BLOCK_STACK_TRACING
-       boolean "Keep stack trace of persistent data block lock holders"
-       depends on STACKTRACE_SUPPORT && DM_PERSISTENT_DATA
-       select STACKTRACE
-       ---help---
-         Enable this for messages that may help debug problems with the
-         block manager locking used by thin provisioning and caching.
-
-         If unsure, say N.
-
 config DM_CACHE
        tristate "Cache target (EXPERIMENTAL)"
        depends on BLK_DEV_DM
index 1e018e986610a57ef9f82a818aa1f70a8c364e30..0e385e40909e74fcde4da4ee5d785149101ad9cb 100644 (file)
@@ -872,7 +872,7 @@ static void mq_destroy(struct dm_cache_policy *p)
 {
        struct mq_policy *mq = to_mq_policy(p);
 
-       kfree(mq->table);
+       vfree(mq->table);
        epool_exit(&mq->cache_pool);
        epool_exit(&mq->pre_cache_pool);
        kfree(mq);
@@ -1245,7 +1245,7 @@ static struct dm_cache_policy *mq_create(dm_cblock_t cache_size,
 
        mq->nr_buckets = next_power(from_cblock(cache_size) / 2, 16);
        mq->hash_bits = ffs(mq->nr_buckets) - 1;
-       mq->table = kzalloc(sizeof(*mq->table) * mq->nr_buckets, GFP_KERNEL);
+       mq->table = vzalloc(sizeof(*mq->table) * mq->nr_buckets);
        if (!mq->table)
                goto bad_alloc_table;
 
index afc3d017de4c6a479f418df9197a62677539752d..d6e88178d22cff0e88fb8b21cb20558f51e8d6a1 100644 (file)
@@ -546,6 +546,9 @@ static int read_exceptions(struct pstore *ps,
                r = insert_exceptions(ps, area, callback, callback_context,
                                      &full);
 
+               if (!full)
+                       memcpy(ps->area, area, ps->store->chunk_size << SECTOR_SHIFT);
+
                dm_bufio_release(bp);
 
                dm_bufio_forget(client, chunk);
index baa87ff12816382245c520a9a29ed2171d3d4d6a..fb9efc829182d4cce55c9c8cfac1e850e65abe79 100644 (file)
@@ -76,7 +76,7 @@
 
 #define THIN_SUPERBLOCK_MAGIC 27022010
 #define THIN_SUPERBLOCK_LOCATION 0
-#define THIN_VERSION 1
+#define THIN_VERSION 2
 #define THIN_METADATA_CACHE_SIZE 64
 #define SECTOR_TO_BLOCK_SHIFT 3
 
@@ -1755,3 +1755,38 @@ int dm_pool_register_metadata_threshold(struct dm_pool_metadata *pmd,
 
        return r;
 }
+
+int dm_pool_metadata_set_needs_check(struct dm_pool_metadata *pmd)
+{
+       int r;
+       struct dm_block *sblock;
+       struct thin_disk_superblock *disk_super;
+
+       down_write(&pmd->root_lock);
+       pmd->flags |= THIN_METADATA_NEEDS_CHECK_FLAG;
+
+       r = superblock_lock(pmd, &sblock);
+       if (r) {
+               DMERR("couldn't read superblock");
+               goto out;
+       }
+
+       disk_super = dm_block_data(sblock);
+       disk_super->flags = cpu_to_le32(pmd->flags);
+
+       dm_bm_unlock(sblock);
+out:
+       up_write(&pmd->root_lock);
+       return r;
+}
+
+bool dm_pool_metadata_needs_check(struct dm_pool_metadata *pmd)
+{
+       bool needs_check;
+
+       down_read(&pmd->root_lock);
+       needs_check = pmd->flags & THIN_METADATA_NEEDS_CHECK_FLAG;
+       up_read(&pmd->root_lock);
+
+       return needs_check;
+}
index 82ea384d36ff9869b10864bc7e7d7f89d2bf3a4f..e3c857db195a7453d192f5f55cfb766a046a789a 100644 (file)
 
 /*----------------------------------------------------------------*/
 
+/*
+ * Thin metadata superblock flags.
+ */
+#define THIN_METADATA_NEEDS_CHECK_FLAG (1 << 0)
+
 struct dm_pool_metadata;
 struct dm_thin_device;
 
@@ -202,6 +207,12 @@ int dm_pool_register_metadata_threshold(struct dm_pool_metadata *pmd,
                                        dm_sm_threshold_fn fn,
                                        void *context);
 
+/*
+ * Updates the superblock immediately.
+ */
+int dm_pool_metadata_set_needs_check(struct dm_pool_metadata *pmd);
+bool dm_pool_metadata_needs_check(struct dm_pool_metadata *pmd);
+
 /*----------------------------------------------------------------*/
 
 #endif
index 7e84baccf0ad6c90f7a3f62ec084007003319b21..be70d38745f7a7f5da51bd4ba83a29afe851b238 100644 (file)
@@ -130,10 +130,11 @@ static void build_virtual_key(struct dm_thin_device *td, dm_block_t b,
 struct dm_thin_new_mapping;
 
 /*
- * The pool runs in 3 modes.  Ordered in degraded order for comparisons.
+ * The pool runs in 4 modes.  Ordered in degraded order for comparisons.
  */
 enum pool_mode {
        PM_WRITE,               /* metadata may be changed */
+       PM_OUT_OF_DATA_SPACE,   /* metadata may be changed, though data may not be allocated */
        PM_READ_ONLY,           /* metadata may not be changed */
        PM_FAIL,                /* all I/O fails */
 };
@@ -198,7 +199,6 @@ struct pool {
 };
 
 static enum pool_mode get_pool_mode(struct pool *pool);
-static void out_of_data_space(struct pool *pool);
 static void metadata_operation_failed(struct pool *pool, const char *op, int r);
 
 /*
@@ -226,6 +226,7 @@ struct thin_c {
 
        struct pool *pool;
        struct dm_thin_device *td;
+       bool requeue_mode:1;
 };
 
 /*----------------------------------------------------------------*/
@@ -369,14 +370,18 @@ struct dm_thin_endio_hook {
        struct dm_thin_new_mapping *overwrite_mapping;
 };
 
-static void __requeue_bio_list(struct thin_c *tc, struct bio_list *master)
+static void requeue_bio_list(struct thin_c *tc, struct bio_list *master)
 {
        struct bio *bio;
        struct bio_list bios;
+       unsigned long flags;
 
        bio_list_init(&bios);
+
+       spin_lock_irqsave(&tc->pool->lock, flags);
        bio_list_merge(&bios, master);
        bio_list_init(master);
+       spin_unlock_irqrestore(&tc->pool->lock, flags);
 
        while ((bio = bio_list_pop(&bios))) {
                struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
@@ -391,12 +396,26 @@ static void __requeue_bio_list(struct thin_c *tc, struct bio_list *master)
 static void requeue_io(struct thin_c *tc)
 {
        struct pool *pool = tc->pool;
+
+       requeue_bio_list(tc, &pool->deferred_bios);
+       requeue_bio_list(tc, &pool->retry_on_resume_list);
+}
+
+static void error_retry_list(struct pool *pool)
+{
+       struct bio *bio;
        unsigned long flags;
+       struct bio_list bios;
+
+       bio_list_init(&bios);
 
        spin_lock_irqsave(&pool->lock, flags);
-       __requeue_bio_list(tc, &pool->deferred_bios);
-       __requeue_bio_list(tc, &pool->retry_on_resume_list);
+       bio_list_merge(&bios, &pool->retry_on_resume_list);
+       bio_list_init(&pool->retry_on_resume_list);
        spin_unlock_irqrestore(&pool->lock, flags);
+
+       while ((bio = bio_list_pop(&bios)))
+               bio_io_error(bio);
 }
 
 /*
@@ -925,13 +944,15 @@ static void check_low_water_mark(struct pool *pool, dm_block_t free_blocks)
        }
 }
 
+static void set_pool_mode(struct pool *pool, enum pool_mode new_mode);
+
 static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
 {
        int r;
        dm_block_t free_blocks;
        struct pool *pool = tc->pool;
 
-       if (get_pool_mode(pool) != PM_WRITE)
+       if (WARN_ON(get_pool_mode(pool) != PM_WRITE))
                return -EINVAL;
 
        r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
@@ -958,7 +979,7 @@ static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
                }
 
                if (!free_blocks) {
-                       out_of_data_space(pool);
+                       set_pool_mode(pool, PM_OUT_OF_DATA_SPACE);
                        return -ENOSPC;
                }
        }
@@ -988,15 +1009,32 @@ static void retry_on_resume(struct bio *bio)
        spin_unlock_irqrestore(&pool->lock, flags);
 }
 
-static void handle_unserviceable_bio(struct pool *pool, struct bio *bio)
+static bool should_error_unserviceable_bio(struct pool *pool)
 {
-       /*
-        * When pool is read-only, no cell locking is needed because
-        * nothing is changing.
-        */
-       WARN_ON_ONCE(get_pool_mode(pool) != PM_READ_ONLY);
+       enum pool_mode m = get_pool_mode(pool);
+
+       switch (m) {
+       case PM_WRITE:
+               /* Shouldn't get here */
+               DMERR_LIMIT("bio unserviceable, yet pool is in PM_WRITE mode");
+               return true;
+
+       case PM_OUT_OF_DATA_SPACE:
+               return pool->pf.error_if_no_space;
 
-       if (pool->pf.error_if_no_space)
+       case PM_READ_ONLY:
+       case PM_FAIL:
+               return true;
+       default:
+               /* Shouldn't get here */
+               DMERR_LIMIT("bio unserviceable, yet pool has an unknown mode");
+               return true;
+       }
+}
+
+static void handle_unserviceable_bio(struct pool *pool, struct bio *bio)
+{
+       if (should_error_unserviceable_bio(pool))
                bio_io_error(bio);
        else
                retry_on_resume(bio);
@@ -1007,11 +1045,20 @@ static void retry_bios_on_resume(struct pool *pool, struct dm_bio_prison_cell *c
        struct bio *bio;
        struct bio_list bios;
 
+       if (should_error_unserviceable_bio(pool)) {
+               cell_error(pool, cell);
+               return;
+       }
+
        bio_list_init(&bios);
        cell_release(pool, cell, &bios);
 
-       while ((bio = bio_list_pop(&bios)))
-               handle_unserviceable_bio(pool, bio);
+       if (should_error_unserviceable_bio(pool))
+               while ((bio = bio_list_pop(&bios)))
+                       bio_io_error(bio);
+       else
+               while ((bio = bio_list_pop(&bios)))
+                       retry_on_resume(bio);
 }
 
 static void process_discard(struct thin_c *tc, struct bio *bio)
@@ -1296,6 +1343,11 @@ static void process_bio_read_only(struct thin_c *tc, struct bio *bio)
        }
 }
 
+static void process_bio_success(struct thin_c *tc, struct bio *bio)
+{
+       bio_endio(bio, 0);
+}
+
 static void process_bio_fail(struct thin_c *tc, struct bio *bio)
 {
        bio_io_error(bio);
@@ -1328,6 +1380,11 @@ static void process_deferred_bios(struct pool *pool)
                struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
                struct thin_c *tc = h->tc;
 
+               if (tc->requeue_mode) {
+                       bio_endio(bio, DM_ENDIO_REQUEUE);
+                       continue;
+               }
+
                /*
                 * If we've got no free new_mapping structs, and processing
                 * this bio might require one, we pause until there are some
@@ -1394,51 +1451,134 @@ static void do_waker(struct work_struct *ws)
 
 /*----------------------------------------------------------------*/
 
+struct noflush_work {
+       struct work_struct worker;
+       struct thin_c *tc;
+
+       atomic_t complete;
+       wait_queue_head_t wait;
+};
+
+static void complete_noflush_work(struct noflush_work *w)
+{
+       atomic_set(&w->complete, 1);
+       wake_up(&w->wait);
+}
+
+static void do_noflush_start(struct work_struct *ws)
+{
+       struct noflush_work *w = container_of(ws, struct noflush_work, worker);
+       w->tc->requeue_mode = true;
+       requeue_io(w->tc);
+       complete_noflush_work(w);
+}
+
+static void do_noflush_stop(struct work_struct *ws)
+{
+       struct noflush_work *w = container_of(ws, struct noflush_work, worker);
+       w->tc->requeue_mode = false;
+       complete_noflush_work(w);
+}
+
+static void noflush_work(struct thin_c *tc, void (*fn)(struct work_struct *))
+{
+       struct noflush_work w;
+
+       INIT_WORK(&w.worker, fn);
+       w.tc = tc;
+       atomic_set(&w.complete, 0);
+       init_waitqueue_head(&w.wait);
+
+       queue_work(tc->pool->wq, &w.worker);
+
+       wait_event(w.wait, atomic_read(&w.complete));
+}
+
+/*----------------------------------------------------------------*/
+
 static enum pool_mode get_pool_mode(struct pool *pool)
 {
        return pool->pf.mode;
 }
 
+static void notify_of_pool_mode_change(struct pool *pool, const char *new_mode)
+{
+       dm_table_event(pool->ti->table);
+       DMINFO("%s: switching pool to %s mode",
+              dm_device_name(pool->pool_md), new_mode);
+}
+
 static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
 {
-       int r;
-       enum pool_mode old_mode = pool->pf.mode;
+       struct pool_c *pt = pool->ti->private;
+       bool needs_check = dm_pool_metadata_needs_check(pool->pmd);
+       enum pool_mode old_mode = get_pool_mode(pool);
+
+       /*
+        * Never allow the pool to transition to PM_WRITE mode if user
+        * intervention is required to verify metadata and data consistency.
+        */
+       if (new_mode == PM_WRITE && needs_check) {
+               DMERR("%s: unable to switch pool to write mode until repaired.",
+                     dm_device_name(pool->pool_md));
+               if (old_mode != new_mode)
+                       new_mode = old_mode;
+               else
+                       new_mode = PM_READ_ONLY;
+       }
+       /*
+        * If we were in PM_FAIL mode, rollback of metadata failed.  We're
+        * not going to recover without a thin_repair.  So we never let the
+        * pool move out of the old mode.
+        */
+       if (old_mode == PM_FAIL)
+               new_mode = old_mode;
 
        switch (new_mode) {
        case PM_FAIL:
                if (old_mode != new_mode)
-                       DMERR("%s: switching pool to failure mode",
-                             dm_device_name(pool->pool_md));
+                       notify_of_pool_mode_change(pool, "failure");
                dm_pool_metadata_read_only(pool->pmd);
                pool->process_bio = process_bio_fail;
                pool->process_discard = process_bio_fail;
                pool->process_prepared_mapping = process_prepared_mapping_fail;
                pool->process_prepared_discard = process_prepared_discard_fail;
+
+               error_retry_list(pool);
                break;
 
        case PM_READ_ONLY:
                if (old_mode != new_mode)
-                       DMERR("%s: switching pool to read-only mode",
-                             dm_device_name(pool->pool_md));
-               r = dm_pool_abort_metadata(pool->pmd);
-               if (r) {
-                       DMERR("%s: aborting transaction failed",
-                             dm_device_name(pool->pool_md));
-                       new_mode = PM_FAIL;
-                       set_pool_mode(pool, new_mode);
-               } else {
-                       dm_pool_metadata_read_only(pool->pmd);
-                       pool->process_bio = process_bio_read_only;
-                       pool->process_discard = process_discard;
-                       pool->process_prepared_mapping = process_prepared_mapping_fail;
-                       pool->process_prepared_discard = process_prepared_discard_passdown;
-               }
+                       notify_of_pool_mode_change(pool, "read-only");
+               dm_pool_metadata_read_only(pool->pmd);
+               pool->process_bio = process_bio_read_only;
+               pool->process_discard = process_bio_success;
+               pool->process_prepared_mapping = process_prepared_mapping_fail;
+               pool->process_prepared_discard = process_prepared_discard_passdown;
+
+               error_retry_list(pool);
+               break;
+
+       case PM_OUT_OF_DATA_SPACE:
+               /*
+                * Ideally we'd never hit this state; the low water mark
+                * would trigger userland to extend the pool before we
+                * completely run out of data space.  However, many small
+                * IOs to unprovisioned space can consume data space at an
+                * alarming rate.  Adjust your low water mark if you're
+                * frequently seeing this mode.
+                */
+               if (old_mode != new_mode)
+                       notify_of_pool_mode_change(pool, "out-of-data-space");
+               pool->process_bio = process_bio_read_only;
+               pool->process_discard = process_discard;
+               pool->process_prepared_mapping = process_prepared_mapping;
+               pool->process_prepared_discard = process_prepared_discard_passdown;
                break;
 
        case PM_WRITE:
                if (old_mode != new_mode)
-                       DMINFO("%s: switching pool to write mode",
-                              dm_device_name(pool->pool_md));
+                       notify_of_pool_mode_change(pool, "write");
                dm_pool_metadata_read_write(pool->pmd);
                pool->process_bio = process_bio;
                pool->process_discard = process_discard;
@@ -1448,32 +1588,35 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
        }
 
        pool->pf.mode = new_mode;
+       /*
+        * The pool mode may have changed, sync it so bind_control_target()
+        * doesn't cause an unexpected mode transition on resume.
+        */
+       pt->adjusted_pf.mode = new_mode;
 }
 
-/*
- * Rather than calling set_pool_mode directly, use these which describe the
- * reason for mode degradation.
- */
-static void out_of_data_space(struct pool *pool)
+static void abort_transaction(struct pool *pool)
 {
-       DMERR_LIMIT("%s: no free data space available.",
-                   dm_device_name(pool->pool_md));
-       set_pool_mode(pool, PM_READ_ONLY);
+       const char *dev_name = dm_device_name(pool->pool_md);
+
+       DMERR_LIMIT("%s: aborting current metadata transaction", dev_name);
+       if (dm_pool_abort_metadata(pool->pmd)) {
+               DMERR("%s: failed to abort metadata transaction", dev_name);
+               set_pool_mode(pool, PM_FAIL);
+       }
+
+       if (dm_pool_metadata_set_needs_check(pool->pmd)) {
+               DMERR("%s: failed to set 'needs_check' flag in metadata", dev_name);
+               set_pool_mode(pool, PM_FAIL);
+       }
 }
 
 static void metadata_operation_failed(struct pool *pool, const char *op, int r)
 {
-       dm_block_t free_blocks;
-
        DMERR_LIMIT("%s: metadata operation '%s' failed: error = %d",
                    dm_device_name(pool->pool_md), op, r);
 
-       if (r == -ENOSPC &&
-           !dm_pool_get_free_metadata_block_count(pool->pmd, &free_blocks) &&
-           !free_blocks)
-               DMERR_LIMIT("%s: no free metadata space available.",
-                           dm_device_name(pool->pool_md));
-
+       abort_transaction(pool);
        set_pool_mode(pool, PM_READ_ONLY);
 }
 
@@ -1524,6 +1667,11 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio)
 
        thin_hook_bio(tc, bio);
 
+       if (tc->requeue_mode) {
+               bio_endio(bio, DM_ENDIO_REQUEUE);
+               return DM_MAPIO_SUBMITTED;
+       }
+
        if (get_pool_mode(tc->pool) == PM_FAIL) {
                bio_io_error(bio);
                return DM_MAPIO_SUBMITTED;
@@ -1687,7 +1835,7 @@ static int bind_control_target(struct pool *pool, struct dm_target *ti)
        /*
         * We want to make sure that a pool in PM_FAIL mode is never upgraded.
         */
-       enum pool_mode old_mode = pool->pf.mode;
+       enum pool_mode old_mode = get_pool_mode(pool);
        enum pool_mode new_mode = pt->adjusted_pf.mode;
 
        /*
@@ -1701,16 +1849,6 @@ static int bind_control_target(struct pool *pool, struct dm_target *ti)
        pool->pf = pt->adjusted_pf;
        pool->low_water_blocks = pt->low_water_blocks;
 
-       /*
-        * If we were in PM_FAIL mode, rollback of metadata failed.  We're
-        * not going to recover without a thin_repair.  So we never let the
-        * pool move out of the old mode.  On the other hand a PM_READ_ONLY
-        * may have been due to a lack of metadata or data space, and may
-        * now work (ie. if the underlying devices have been resized).
-        */
-       if (old_mode == PM_FAIL)
-               new_mode = old_mode;
-
        set_pool_mode(pool, new_mode);
 
        return 0;
@@ -2253,6 +2391,12 @@ static int maybe_resize_data_dev(struct dm_target *ti, bool *need_commit)
                return -EINVAL;
 
        } else if (data_size > sb_data_size) {
+               if (dm_pool_metadata_needs_check(pool->pmd)) {
+                       DMERR("%s: unable to grow the data device until repaired.",
+                             dm_device_name(pool->pool_md));
+                       return 0;
+               }
+
                if (sb_data_size)
                        DMINFO("%s: growing the data device from %llu to %llu blocks",
                               dm_device_name(pool->pool_md),
@@ -2294,6 +2438,12 @@ static int maybe_resize_metadata_dev(struct dm_target *ti, bool *need_commit)
                return -EINVAL;
 
        } else if (metadata_dev_size > sb_metadata_dev_size) {
+               if (dm_pool_metadata_needs_check(pool->pmd)) {
+                       DMERR("%s: unable to grow the metadata device until repaired.",
+                             dm_device_name(pool->pool_md));
+                       return 0;
+               }
+
                warn_if_metadata_device_too_big(pool->md_dev);
                DMINFO("%s: growing the metadata device from %llu to %llu blocks",
                       dm_device_name(pool->pool_md),
@@ -2681,7 +2831,9 @@ static void pool_status(struct dm_target *ti, status_type_t type,
                else
                        DMEMIT("- ");
 
-               if (pool->pf.mode == PM_READ_ONLY)
+               if (pool->pf.mode == PM_OUT_OF_DATA_SPACE)
+                       DMEMIT("out_of_data_space ");
+               else if (pool->pf.mode == PM_READ_ONLY)
                        DMEMIT("ro ");
                else
                        DMEMIT("rw ");
@@ -2795,7 +2947,7 @@ static struct target_type pool_target = {
        .name = "thin-pool",
        .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
                    DM_TARGET_IMMUTABLE,
-       .version = {1, 10, 0},
+       .version = {1, 11, 0},
        .module = THIS_MODULE,
        .ctr = pool_ctr,
        .dtr = pool_dtr,
@@ -2997,10 +3149,23 @@ static int thin_endio(struct dm_target *ti, struct bio *bio, int err)
        return 0;
 }
 
-static void thin_postsuspend(struct dm_target *ti)
+static void thin_presuspend(struct dm_target *ti)
 {
+       struct thin_c *tc = ti->private;
+
        if (dm_noflush_suspending(ti))
-               requeue_io((struct thin_c *)ti->private);
+               noflush_work(tc, do_noflush_start);
+}
+
+static void thin_postsuspend(struct dm_target *ti)
+{
+       struct thin_c *tc = ti->private;
+
+       /*
+        * The dm_noflush_suspending flag has been cleared by now, so
+        * unfortunately we must always run this.
+        */
+       noflush_work(tc, do_noflush_stop);
 }
 
 /*
@@ -3085,12 +3250,13 @@ static int thin_iterate_devices(struct dm_target *ti,
 
 static struct target_type thin_target = {
        .name = "thin",
-       .version = {1, 10, 0},
+       .version = {1, 11, 0},
        .module = THIS_MODULE,
        .ctr = thin_ctr,
        .dtr = thin_dtr,
        .map = thin_map,
        .end_io = thin_endio,
+       .presuspend = thin_presuspend,
        .postsuspend = thin_postsuspend,
        .status = thin_status,
        .iterate_devices = thin_iterate_devices,
index 19b268795415b4a1eaf519001511275774a7f96e..0c2dec7aec20fd798d45b24d92156b3f85f4aae0 100644 (file)
@@ -6,3 +6,13 @@ config DM_PERSISTENT_DATA
        ---help---
         Library providing immutable on-disk data structure support for
         device-mapper targets such as the thin provisioning target.
+
+config DM_DEBUG_BLOCK_STACK_TRACING
+       boolean "Keep stack trace of persistent data block lock holders"
+       depends on STACKTRACE_SUPPORT && DM_PERSISTENT_DATA
+       select STACKTRACE
+       ---help---
+        Enable this for messages that may help debug problems with the
+        block manager locking used by thin provisioning and caching.
+
+        If unsure, say N.
index e9bdd462f4f51a7cdcf917c6abbac85a80f58eaf..786b689bdfc7fe0c42d9a651a2369ffadcdc2382 100644 (file)
@@ -91,6 +91,69 @@ struct block_op {
        dm_block_t block;
 };
 
+struct bop_ring_buffer {
+       unsigned begin;
+       unsigned end;
+       struct block_op bops[MAX_RECURSIVE_ALLOCATIONS + 1];
+};
+
+static void brb_init(struct bop_ring_buffer *brb)
+{
+       brb->begin = 0;
+       brb->end = 0;
+}
+
+static bool brb_empty(struct bop_ring_buffer *brb)
+{
+       return brb->begin == brb->end;
+}
+
+static unsigned brb_next(struct bop_ring_buffer *brb, unsigned old)
+{
+       unsigned r = old + 1;
+       return (r >= (sizeof(brb->bops) / sizeof(*brb->bops))) ? 0 : r;
+}
+
+static int brb_push(struct bop_ring_buffer *brb,
+                   enum block_op_type type, dm_block_t b)
+{
+       struct block_op *bop;
+       unsigned next = brb_next(brb, brb->end);
+
+       /*
+        * We don't allow the last bop to be filled, this way we can
+        * differentiate between full and empty.
+        */
+       if (next == brb->begin)
+               return -ENOMEM;
+
+       bop = brb->bops + brb->end;
+       bop->type = type;
+       bop->block = b;
+
+       brb->end = next;
+
+       return 0;
+}
+
+static int brb_pop(struct bop_ring_buffer *brb, struct block_op *result)
+{
+       struct block_op *bop;
+
+       if (brb_empty(brb))
+               return -ENODATA;
+
+       bop = brb->bops + brb->begin;
+       result->type = bop->type;
+       result->block = bop->block;
+
+       brb->begin = brb_next(brb, brb->begin);
+
+       return 0;
+}
+
+/*----------------------------------------------------------------*/
+
 struct sm_metadata {
        struct dm_space_map sm;
 
@@ -101,25 +164,20 @@ struct sm_metadata {
 
        unsigned recursion_count;
        unsigned allocated_this_transaction;
-       unsigned nr_uncommitted;
-       struct block_op uncommitted[MAX_RECURSIVE_ALLOCATIONS];
+       struct bop_ring_buffer uncommitted;
 
        struct threshold threshold;
 };
 
 static int add_bop(struct sm_metadata *smm, enum block_op_type type, dm_block_t b)
 {
-       struct block_op *op;
+       int r = brb_push(&smm->uncommitted, type, b);
 
-       if (smm->nr_uncommitted == MAX_RECURSIVE_ALLOCATIONS) {
+       if (r) {
                DMERR("too many recursive allocations");
                return -ENOMEM;
        }
 
-       op = smm->uncommitted + smm->nr_uncommitted++;
-       op->type = type;
-       op->block = b;
-
        return 0;
 }
 
@@ -158,11 +216,17 @@ static int out(struct sm_metadata *smm)
                return -ENOMEM;
        }
 
-       if (smm->recursion_count == 1 && smm->nr_uncommitted) {
-               while (smm->nr_uncommitted && !r) {
-                       smm->nr_uncommitted--;
-                       r = commit_bop(smm, smm->uncommitted +
-                                      smm->nr_uncommitted);
+       if (smm->recursion_count == 1) {
+               while (!brb_empty(&smm->uncommitted)) {
+                       struct block_op bop;
+
+                       r = brb_pop(&smm->uncommitted, &bop);
+                       if (r) {
+                               DMERR("bug in bop ring buffer");
+                               break;
+                       }
+
+                       r = commit_bop(smm, &bop);
                        if (r)
                                break;
                }
@@ -217,7 +281,8 @@ static int sm_metadata_get_nr_free(struct dm_space_map *sm, dm_block_t *count)
 static int sm_metadata_get_count(struct dm_space_map *sm, dm_block_t b,
                                 uint32_t *result)
 {
-       int r, i;
+       int r;
+       unsigned i;
        struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
        unsigned adjustment = 0;
 
@@ -225,8 +290,10 @@ static int sm_metadata_get_count(struct dm_space_map *sm, dm_block_t b,
         * We may have some uncommitted adjustments to add.  This list
         * should always be really short.
         */
-       for (i = 0; i < smm->nr_uncommitted; i++) {
-               struct block_op *op = smm->uncommitted + i;
+       for (i = smm->uncommitted.begin;
+            i != smm->uncommitted.end;
+            i = brb_next(&smm->uncommitted, i)) {
+               struct block_op *op = smm->uncommitted.bops + i;
 
                if (op->block != b)
                        continue;
@@ -254,7 +321,8 @@ static int sm_metadata_get_count(struct dm_space_map *sm, dm_block_t b,
 static int sm_metadata_count_is_more_than_one(struct dm_space_map *sm,
                                              dm_block_t b, int *result)
 {
-       int r, i, adjustment = 0;
+       int r, adjustment = 0;
+       unsigned i;
        struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
        uint32_t rc;
 
@@ -262,8 +330,11 @@ static int sm_metadata_count_is_more_than_one(struct dm_space_map *sm,
         * We may have some uncommitted adjustments to add.  This list
         * should always be really short.
         */
-       for (i = 0; i < smm->nr_uncommitted; i++) {
-               struct block_op *op = smm->uncommitted + i;
+       for (i = smm->uncommitted.begin;
+            i != smm->uncommitted.end;
+            i = brb_next(&smm->uncommitted, i)) {
+
+               struct block_op *op = smm->uncommitted.bops + i;
 
                if (op->block != b)
                        continue;
@@ -671,7 +742,7 @@ int dm_sm_metadata_create(struct dm_space_map *sm,
        smm->begin = superblock + 1;
        smm->recursion_count = 0;
        smm->allocated_this_transaction = 0;
-       smm->nr_uncommitted = 0;
+       brb_init(&smm->uncommitted);
        threshold_init(&smm->threshold);
 
        memcpy(&smm->sm, &bootstrap_ops, sizeof(smm->sm));
@@ -715,7 +786,7 @@ int dm_sm_metadata_open(struct dm_space_map *sm,
        smm->begin = 0;
        smm->recursion_count = 0;
        smm->allocated_this_transaction = 0;
-       smm->nr_uncommitted = 0;
+       brb_init(&smm->uncommitted);
        threshold_init(&smm->threshold);
 
        memcpy(&smm->old_ll, &smm->ll, sizeof(smm->old_ll));
index 6d20fbde8d43502ca6a211c2cd0b83615ea051a0..dcde56057fe14f7bb64a8221db03b2d72a22d842 100644 (file)
@@ -181,7 +181,7 @@ static inline int __agg_has_partner(struct aggregator *agg)
  */
 static inline void __disable_port(struct port *port)
 {
-       bond_set_slave_inactive_flags(port->slave);
+       bond_set_slave_inactive_flags(port->slave, BOND_SLAVE_NOTIFY_LATER);
 }
 
 /**
@@ -193,7 +193,7 @@ static inline void __enable_port(struct port *port)
        struct slave *slave = port->slave;
 
        if ((slave->link == BOND_LINK_UP) && IS_UP(slave->dev))
-               bond_set_slave_active_flags(slave);
+               bond_set_slave_active_flags(slave, BOND_SLAVE_NOTIFY_LATER);
 }
 
 /**
@@ -2062,6 +2062,7 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
        struct list_head *iter;
        struct slave *slave;
        struct port *port;
+       bool should_notify_rtnl = BOND_SLAVE_NOTIFY_LATER;
 
        read_lock(&bond->lock);
        rcu_read_lock();
@@ -2119,8 +2120,19 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
        }
 
 re_arm:
+       bond_for_each_slave_rcu(bond, slave, iter) {
+               if (slave->should_notify) {
+                       should_notify_rtnl = BOND_SLAVE_NOTIFY_NOW;
+                       break;
+               }
+       }
        rcu_read_unlock();
        read_unlock(&bond->lock);
+
+       if (should_notify_rtnl && rtnl_trylock()) {
+               bond_slave_state_notify(bond);
+               rtnl_unlock();
+       }
        queue_delayed_work(bond->wq, &bond->ad_work, ad_delta_in_ticks);
 }
 
index 1c6104d3501d4df22e95beab7b60be63548dcc53..e5628fc725c3fc3885b7deac79074caa41edbae4 100644 (file)
@@ -829,21 +829,25 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
        if (bond_is_lb(bond)) {
                bond_alb_handle_active_change(bond, new_active);
                if (old_active)
-                       bond_set_slave_inactive_flags(old_active);
+                       bond_set_slave_inactive_flags(old_active,
+                                                     BOND_SLAVE_NOTIFY_NOW);
                if (new_active)
-                       bond_set_slave_active_flags(new_active);
+                       bond_set_slave_active_flags(new_active,
+                                                   BOND_SLAVE_NOTIFY_NOW);
        } else {
                rcu_assign_pointer(bond->curr_active_slave, new_active);
        }
 
        if (bond->params.mode == BOND_MODE_ACTIVEBACKUP) {
                if (old_active)
-                       bond_set_slave_inactive_flags(old_active);
+                       bond_set_slave_inactive_flags(old_active,
+                                                     BOND_SLAVE_NOTIFY_NOW);
 
                if (new_active) {
                        bool should_notify_peers = false;
 
-                       bond_set_slave_active_flags(new_active);
+                       bond_set_slave_active_flags(new_active,
+                                                   BOND_SLAVE_NOTIFY_NOW);
 
                        if (bond->params.fail_over_mac)
                                bond_do_fail_over_mac(bond, new_active,
@@ -1193,6 +1197,11 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
                return -EBUSY;
        }
 
+       if (bond_dev == slave_dev) {
+               pr_err("%s: cannot enslave bond to itself.\n", bond_dev->name);
+               return -EPERM;
+       }
+
        /* vlan challenged mutual exclusion */
        /* no need to lock since we're protected by rtnl_lock */
        if (slave_dev->features & NETIF_F_VLAN_CHALLENGED) {
@@ -1463,14 +1472,15 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
 
        switch (bond->params.mode) {
        case BOND_MODE_ACTIVEBACKUP:
-               bond_set_slave_inactive_flags(new_slave);
+               bond_set_slave_inactive_flags(new_slave,
+                                             BOND_SLAVE_NOTIFY_NOW);
                break;
        case BOND_MODE_8023AD:
                /* in 802.3ad mode, the internal mechanism
                 * will activate the slaves in the selected
                 * aggregator
                 */
-               bond_set_slave_inactive_flags(new_slave);
+               bond_set_slave_inactive_flags(new_slave, BOND_SLAVE_NOTIFY_NOW);
                /* if this is the first slave */
                if (!prev_slave) {
                        SLAVE_AD_INFO(new_slave).id = 1;
@@ -1488,7 +1498,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
        case BOND_MODE_TLB:
        case BOND_MODE_ALB:
                bond_set_active_slave(new_slave);
-               bond_set_slave_inactive_flags(new_slave);
+               bond_set_slave_inactive_flags(new_slave, BOND_SLAVE_NOTIFY_NOW);
                break;
        default:
                pr_debug("This slave is always active in trunk mode\n");
@@ -1654,9 +1664,6 @@ static int __bond_release_one(struct net_device *bond_dev,
                return -EINVAL;
        }
 
-       /* release the slave from its bond */
-       bond->slave_cnt--;
-
        bond_sysfs_slave_del(slave);
 
        bond_upper_dev_unlink(bond_dev, slave_dev);
@@ -1738,6 +1745,7 @@ static int __bond_release_one(struct net_device *bond_dev,
 
        unblock_netpoll_tx();
        synchronize_rcu();
+       bond->slave_cnt--;
 
        if (!bond_has_slaves(bond)) {
                call_netdevice_notifiers(NETDEV_CHANGEADDR, bond->dev);
@@ -2015,7 +2023,8 @@ static void bond_miimon_commit(struct bonding *bond)
 
                        if (bond->params.mode == BOND_MODE_ACTIVEBACKUP ||
                            bond->params.mode == BOND_MODE_8023AD)
-                               bond_set_slave_inactive_flags(slave);
+                               bond_set_slave_inactive_flags(slave,
+                                                             BOND_SLAVE_NOTIFY_NOW);
 
                        pr_info("%s: link status definitely down for interface %s, disabling it\n",
                                bond->dev->name, slave->dev->name);
@@ -2562,7 +2571,8 @@ static void bond_ab_arp_commit(struct bonding *bond)
                                slave->link = BOND_LINK_UP;
                                if (bond->current_arp_slave) {
                                        bond_set_slave_inactive_flags(
-                                               bond->current_arp_slave);
+                                               bond->current_arp_slave,
+                                               BOND_SLAVE_NOTIFY_NOW);
                                        bond->current_arp_slave = NULL;
                                }
 
@@ -2582,7 +2592,8 @@ static void bond_ab_arp_commit(struct bonding *bond)
                                slave->link_failure_count++;
 
                        slave->link = BOND_LINK_DOWN;
-                       bond_set_slave_inactive_flags(slave);
+                       bond_set_slave_inactive_flags(slave,
+                                                     BOND_SLAVE_NOTIFY_NOW);
 
                        pr_info("%s: link status definitely down for interface %s, disabling it\n",
                                bond->dev->name, slave->dev->name);
@@ -2615,17 +2626,17 @@ do_failover:
 
 /*
  * Send ARP probes for active-backup mode ARP monitor.
+ *
+ * Called with rcu_read_lock hold.
  */
 static bool bond_ab_arp_probe(struct bonding *bond)
 {
        struct slave *slave, *before = NULL, *new_slave = NULL,
-                    *curr_arp_slave, *curr_active_slave;
+                    *curr_arp_slave = rcu_dereference(bond->current_arp_slave),
+                    *curr_active_slave = rcu_dereference(bond->curr_active_slave);
        struct list_head *iter;
        bool found = false;
-
-       rcu_read_lock();
-       curr_arp_slave = rcu_dereference(bond->current_arp_slave);
-       curr_active_slave = rcu_dereference(bond->curr_active_slave);
+       bool should_notify_rtnl = BOND_SLAVE_NOTIFY_LATER;
 
        if (curr_arp_slave && curr_active_slave)
                pr_info("PROBE: c_arp %s && cas %s BAD\n",
@@ -2634,32 +2645,23 @@ static bool bond_ab_arp_probe(struct bonding *bond)
 
        if (curr_active_slave) {
                bond_arp_send_all(bond, curr_active_slave);
-               rcu_read_unlock();
-               return true;
+               return should_notify_rtnl;
        }
-       rcu_read_unlock();
 
        /* if we don't have a curr_active_slave, search for the next available
         * backup slave from the current_arp_slave and make it the candidate
         * for becoming the curr_active_slave
         */
 
-       if (!rtnl_trylock())
-               return false;
-       /* curr_arp_slave might have gone away */
-       curr_arp_slave = ACCESS_ONCE(bond->current_arp_slave);
-
        if (!curr_arp_slave) {
-               curr_arp_slave = bond_first_slave(bond);
-               if (!curr_arp_slave) {
-                       rtnl_unlock();
-                       return true;
-               }
+               curr_arp_slave = bond_first_slave_rcu(bond);
+               if (!curr_arp_slave)
+                       return should_notify_rtnl;
        }
 
-       bond_set_slave_inactive_flags(curr_arp_slave);
+       bond_set_slave_inactive_flags(curr_arp_slave, BOND_SLAVE_NOTIFY_LATER);
 
-       bond_for_each_slave(bond, slave, iter) {
+       bond_for_each_slave_rcu(bond, slave, iter) {
                if (!found && !before && IS_UP(slave->dev))
                        before = slave;
 
@@ -2677,7 +2679,8 @@ static bool bond_ab_arp_probe(struct bonding *bond)
                        if (slave->link_failure_count < UINT_MAX)
                                slave->link_failure_count++;
 
-                       bond_set_slave_inactive_flags(slave);
+                       bond_set_slave_inactive_flags(slave,
+                                                     BOND_SLAVE_NOTIFY_LATER);
 
                        pr_info("%s: backup interface %s is now down.\n",
                                bond->dev->name, slave->dev->name);
@@ -2689,26 +2692,31 @@ static bool bond_ab_arp_probe(struct bonding *bond)
        if (!new_slave && before)
                new_slave = before;
 
-       if (!new_slave) {
-               rtnl_unlock();
-               return true;
-       }
+       if (!new_slave)
+               goto check_state;
 
        new_slave->link = BOND_LINK_BACK;
-       bond_set_slave_active_flags(new_slave);
+       bond_set_slave_active_flags(new_slave, BOND_SLAVE_NOTIFY_LATER);
        bond_arp_send_all(bond, new_slave);
        new_slave->jiffies = jiffies;
        rcu_assign_pointer(bond->current_arp_slave, new_slave);
-       rtnl_unlock();
 
-       return true;
+check_state:
+       bond_for_each_slave_rcu(bond, slave, iter) {
+               if (slave->should_notify) {
+                       should_notify_rtnl = BOND_SLAVE_NOTIFY_NOW;
+                       break;
+               }
+       }
+       return should_notify_rtnl;
 }
 
 static void bond_activebackup_arp_mon(struct work_struct *work)
 {
        struct bonding *bond = container_of(work, struct bonding,
                                            arp_work.work);
-       bool should_notify_peers = false, should_commit = false;
+       bool should_notify_peers = false;
+       bool should_notify_rtnl = false;
        int delta_in_ticks;
 
        delta_in_ticks = msecs_to_jiffies(bond->params.arp_interval);
@@ -2717,11 +2725,12 @@ static void bond_activebackup_arp_mon(struct work_struct *work)
                goto re_arm;
 
        rcu_read_lock();
+
        should_notify_peers = bond_should_notify_peers(bond);
-       should_commit = bond_ab_arp_inspect(bond);
-       rcu_read_unlock();
 
-       if (should_commit) {
+       if (bond_ab_arp_inspect(bond)) {
+               rcu_read_unlock();
+
                /* Race avoidance with bond_close flush of workqueue */
                if (!rtnl_trylock()) {
                        delta_in_ticks = 1;
@@ -2730,23 +2739,28 @@ static void bond_activebackup_arp_mon(struct work_struct *work)
                }
 
                bond_ab_arp_commit(bond);
+
                rtnl_unlock();
+               rcu_read_lock();
        }
 
-       if (!bond_ab_arp_probe(bond)) {
-               /* rtnl locking failed, re-arm */
-               delta_in_ticks = 1;
-               should_notify_peers = false;
-       }
+       should_notify_rtnl = bond_ab_arp_probe(bond);
+       rcu_read_unlock();
 
 re_arm:
        if (bond->params.arp_interval)
                queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks);
 
-       if (should_notify_peers) {
+       if (should_notify_peers || should_notify_rtnl) {
                if (!rtnl_trylock())
                        return;
-               call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, bond->dev);
+
+               if (should_notify_peers)
+                       call_netdevice_notifiers(NETDEV_NOTIFY_PEERS,
+                                                bond->dev);
+               if (should_notify_rtnl)
+                       bond_slave_state_notify(bond);
+
                rtnl_unlock();
        }
 }
@@ -3046,9 +3060,11 @@ static int bond_open(struct net_device *bond_dev)
                bond_for_each_slave(bond, slave, iter) {
                        if ((bond->params.mode == BOND_MODE_ACTIVEBACKUP)
                                && (slave != bond->curr_active_slave)) {
-                               bond_set_slave_inactive_flags(slave);
+                               bond_set_slave_inactive_flags(slave,
+                                                             BOND_SLAVE_NOTIFY_NOW);
                        } else {
-                               bond_set_slave_active_flags(slave);
+                               bond_set_slave_active_flags(slave,
+                                                           BOND_SLAVE_NOTIFY_NOW);
                        }
                }
                read_unlock(&bond->curr_slave_lock);
index 86ccfb9f71cc4dd8c843f40f5eee38b7c0c033ab..2b0fdec695f78fbf77d9cb7c9cd43cb5b7adb30b 100644 (file)
@@ -195,7 +195,8 @@ struct slave {
        s8     new_link;
        u8     backup:1,   /* indicates backup slave. Value corresponds with
                              BOND_STATE_ACTIVE and BOND_STATE_BACKUP */
-              inactive:1; /* indicates inactive slave */
+              inactive:1, /* indicates inactive slave */
+              should_notify:1; /* indicateds whether the state changed */
        u8     duplex;
        u32    original_mtu;
        u32    link_failure_count;
@@ -303,6 +304,24 @@ static inline void bond_set_backup_slave(struct slave *slave)
        }
 }
 
+static inline void bond_set_slave_state(struct slave *slave,
+                                       int slave_state, bool notify)
+{
+       if (slave->backup == slave_state)
+               return;
+
+       slave->backup = slave_state;
+       if (notify) {
+               rtmsg_ifinfo(RTM_NEWLINK, slave->dev, 0, GFP_KERNEL);
+               slave->should_notify = 0;
+       } else {
+               if (slave->should_notify)
+                       slave->should_notify = 0;
+               else
+                       slave->should_notify = 1;
+       }
+}
+
 static inline void bond_slave_state_change(struct bonding *bond)
 {
        struct list_head *iter;
@@ -316,6 +335,19 @@ static inline void bond_slave_state_change(struct bonding *bond)
        }
 }
 
+static inline void bond_slave_state_notify(struct bonding *bond)
+{
+       struct list_head *iter;
+       struct slave *tmp;
+
+       bond_for_each_slave(bond, tmp, iter) {
+               if (tmp->should_notify) {
+                       rtmsg_ifinfo(RTM_NEWLINK, tmp->dev, 0, GFP_KERNEL);
+                       tmp->should_notify = 0;
+               }
+       }
+}
+
 static inline int bond_slave_state(struct slave *slave)
 {
        return slave->backup;
@@ -343,6 +375,9 @@ static inline bool bond_is_active_slave(struct slave *slave)
 #define BOND_ARP_VALIDATE_ALL          (BOND_ARP_VALIDATE_ACTIVE | \
                                         BOND_ARP_VALIDATE_BACKUP)
 
+#define BOND_SLAVE_NOTIFY_NOW          true
+#define BOND_SLAVE_NOTIFY_LATER                false
+
 static inline int slave_do_arp_validate(struct bonding *bond,
                                        struct slave *slave)
 {
@@ -394,17 +429,19 @@ static inline void bond_netpoll_send_skb(const struct slave *slave,
 }
 #endif
 
-static inline void bond_set_slave_inactive_flags(struct slave *slave)
+static inline void bond_set_slave_inactive_flags(struct slave *slave,
+                                                bool notify)
 {
        if (!bond_is_lb(slave->bond))
-               bond_set_backup_slave(slave);
+               bond_set_slave_state(slave, BOND_STATE_BACKUP, notify);
        if (!slave->bond->params.all_slaves_active)
                slave->inactive = 1;
 }
 
-static inline void bond_set_slave_active_flags(struct slave *slave)
+static inline void bond_set_slave_active_flags(struct slave *slave,
+                                              bool notify)
 {
-       bond_set_active_slave(slave);
+       bond_set_slave_state(slave, BOND_STATE_ACTIVE, notify);
        slave->inactive = 0;
 }
 
index 320bef2dba427f266511330bc11b1a4097368520..61376abdab395cd941f9a118c46e9912e17f4452 100644 (file)
 
 #define FLEXCAN_MB_CODE_MASK           (0xf0ffffff)
 
+#define FLEXCAN_TIMEOUT_US             (50)
+
 /*
  * FLEXCAN hardware feature flags
  *
@@ -262,6 +264,22 @@ static inline void flexcan_write(u32 val, void __iomem *addr)
 }
 #endif
 
+static inline int flexcan_transceiver_enable(const struct flexcan_priv *priv)
+{
+       if (!priv->reg_xceiver)
+               return 0;
+
+       return regulator_enable(priv->reg_xceiver);
+}
+
+static inline int flexcan_transceiver_disable(const struct flexcan_priv *priv)
+{
+       if (!priv->reg_xceiver)
+               return 0;
+
+       return regulator_disable(priv->reg_xceiver);
+}
+
 static inline int flexcan_has_and_handle_berr(const struct flexcan_priv *priv,
                                              u32 reg_esr)
 {
@@ -269,26 +287,95 @@ static inline int flexcan_has_and_handle_berr(const struct flexcan_priv *priv,
                (reg_esr & FLEXCAN_ESR_ERR_BUS);
 }
 
-static inline void flexcan_chip_enable(struct flexcan_priv *priv)
+static int flexcan_chip_enable(struct flexcan_priv *priv)
 {
        struct flexcan_regs __iomem *regs = priv->base;
+       unsigned int timeout = FLEXCAN_TIMEOUT_US / 10;
        u32 reg;
 
        reg = flexcan_read(&regs->mcr);
        reg &= ~FLEXCAN_MCR_MDIS;
        flexcan_write(reg, &regs->mcr);
 
-       udelay(10);
+       while (timeout-- && (flexcan_read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK))
+               usleep_range(10, 20);
+
+       if (flexcan_read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK)
+               return -ETIMEDOUT;
+
+       return 0;
 }
 
-static inline void flexcan_chip_disable(struct flexcan_priv *priv)
+static int flexcan_chip_disable(struct flexcan_priv *priv)
 {
        struct flexcan_regs __iomem *regs = priv->base;
+       unsigned int timeout = FLEXCAN_TIMEOUT_US / 10;
        u32 reg;
 
        reg = flexcan_read(&regs->mcr);
        reg |= FLEXCAN_MCR_MDIS;
        flexcan_write(reg, &regs->mcr);
+
+       while (timeout-- && !(flexcan_read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK))
+               usleep_range(10, 20);
+
+       if (!(flexcan_read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK))
+               return -ETIMEDOUT;
+
+       return 0;
+}
+
+static int flexcan_chip_freeze(struct flexcan_priv *priv)
+{
+       struct flexcan_regs __iomem *regs = priv->base;
+       unsigned int timeout = 1000 * 1000 * 10 / priv->can.bittiming.bitrate;
+       u32 reg;
+
+       reg = flexcan_read(&regs->mcr);
+       reg |= FLEXCAN_MCR_HALT;
+       flexcan_write(reg, &regs->mcr);
+
+       while (timeout-- && !(flexcan_read(&regs->mcr) & FLEXCAN_MCR_FRZ_ACK))
+               usleep_range(100, 200);
+
+       if (!(flexcan_read(&regs->mcr) & FLEXCAN_MCR_FRZ_ACK))
+               return -ETIMEDOUT;
+
+       return 0;
+}
+
+static int flexcan_chip_unfreeze(struct flexcan_priv *priv)
+{
+       struct flexcan_regs __iomem *regs = priv->base;
+       unsigned int timeout = FLEXCAN_TIMEOUT_US / 10;
+       u32 reg;
+
+       reg = flexcan_read(&regs->mcr);
+       reg &= ~FLEXCAN_MCR_HALT;
+       flexcan_write(reg, &regs->mcr);
+
+       while (timeout-- && (flexcan_read(&regs->mcr) & FLEXCAN_MCR_FRZ_ACK))
+               usleep_range(10, 20);
+
+       if (flexcan_read(&regs->mcr) & FLEXCAN_MCR_FRZ_ACK)
+               return -ETIMEDOUT;
+
+       return 0;
+}
+
+static int flexcan_chip_softreset(struct flexcan_priv *priv)
+{
+       struct flexcan_regs __iomem *regs = priv->base;
+       unsigned int timeout = FLEXCAN_TIMEOUT_US / 10;
+
+       flexcan_write(FLEXCAN_MCR_SOFTRST, &regs->mcr);
+       while (timeout-- && (flexcan_read(&regs->mcr) & FLEXCAN_MCR_SOFTRST))
+               usleep_range(10, 20);
+
+       if (flexcan_read(&regs->mcr) & FLEXCAN_MCR_SOFTRST)
+               return -ETIMEDOUT;
+
+       return 0;
 }
 
 static int flexcan_get_berr_counter(const struct net_device *dev,
@@ -709,19 +796,14 @@ static int flexcan_chip_start(struct net_device *dev)
        u32 reg_mcr, reg_ctrl;
 
        /* enable module */
-       flexcan_chip_enable(priv);
+       err = flexcan_chip_enable(priv);
+       if (err)
+               return err;
 
        /* soft reset */
-       flexcan_write(FLEXCAN_MCR_SOFTRST, &regs->mcr);
-       udelay(10);
-
-       reg_mcr = flexcan_read(&regs->mcr);
-       if (reg_mcr & FLEXCAN_MCR_SOFTRST) {
-               netdev_err(dev, "Failed to softreset can module (mcr=0x%08x)\n",
-                          reg_mcr);
-               err = -ENODEV;
-               goto out;
-       }
+       err = flexcan_chip_softreset(priv);
+       if (err)
+               goto out_chip_disable;
 
        flexcan_set_bittiming(dev);
 
@@ -788,16 +870,14 @@ static int flexcan_chip_start(struct net_device *dev)
        if (priv->devtype_data->features & FLEXCAN_HAS_V10_FEATURES)
                flexcan_write(0x0, &regs->rxfgmask);
 
-       if (priv->reg_xceiver)  {
-               err = regulator_enable(priv->reg_xceiver);
-               if (err)
-                       goto out;
-       }
+       err = flexcan_transceiver_enable(priv);
+       if (err)
+               goto out_chip_disable;
 
        /* synchronize with the can bus */
-       reg_mcr = flexcan_read(&regs->mcr);
-       reg_mcr &= ~FLEXCAN_MCR_HALT;
-       flexcan_write(reg_mcr, &regs->mcr);
+       err = flexcan_chip_unfreeze(priv);
+       if (err)
+               goto out_transceiver_disable;
 
        priv->can.state = CAN_STATE_ERROR_ACTIVE;
 
@@ -810,7 +890,9 @@ static int flexcan_chip_start(struct net_device *dev)
 
        return 0;
 
- out:
+ out_transceiver_disable:
+       flexcan_transceiver_disable(priv);
+ out_chip_disable:
        flexcan_chip_disable(priv);
        return err;
 }
@@ -825,18 +907,17 @@ static void flexcan_chip_stop(struct net_device *dev)
 {
        struct flexcan_priv *priv = netdev_priv(dev);
        struct flexcan_regs __iomem *regs = priv->base;
-       u32 reg;
+
+       /* freeze + disable module */
+       flexcan_chip_freeze(priv);
+       flexcan_chip_disable(priv);
 
        /* Disable all interrupts */
        flexcan_write(0, &regs->imask1);
+       flexcan_write(priv->reg_ctrl_default & ~FLEXCAN_CTRL_ERR_ALL,
+                     &regs->ctrl);
 
-       /* Disable + halt module */
-       reg = flexcan_read(&regs->mcr);
-       reg |= FLEXCAN_MCR_MDIS | FLEXCAN_MCR_HALT;
-       flexcan_write(reg, &regs->mcr);
-
-       if (priv->reg_xceiver)
-               regulator_disable(priv->reg_xceiver);
+       flexcan_transceiver_disable(priv);
        priv->can.state = CAN_STATE_STOPPED;
 
        return;
@@ -866,7 +947,7 @@ static int flexcan_open(struct net_device *dev)
        /* start chip and queuing */
        err = flexcan_chip_start(dev);
        if (err)
-               goto out_close;
+               goto out_free_irq;
 
        can_led_event(dev, CAN_LED_EVENT_OPEN);
 
@@ -875,6 +956,8 @@ static int flexcan_open(struct net_device *dev)
 
        return 0;
 
+ out_free_irq:
+       free_irq(dev->irq, dev);
  out_close:
        close_candev(dev);
  out_disable_per:
@@ -945,12 +1028,16 @@ static int register_flexcandev(struct net_device *dev)
                goto out_disable_ipg;
 
        /* select "bus clock", chip must be disabled */
-       flexcan_chip_disable(priv);
+       err = flexcan_chip_disable(priv);
+       if (err)
+               goto out_disable_per;
        reg = flexcan_read(&regs->ctrl);
        reg |= FLEXCAN_CTRL_CLK_SRC;
        flexcan_write(reg, &regs->ctrl);
 
-       flexcan_chip_enable(priv);
+       err = flexcan_chip_enable(priv);
+       if (err)
+               goto out_chip_disable;
 
        /* set freeze, halt and activate FIFO, restrict register access */
        reg = flexcan_read(&regs->mcr);
@@ -967,14 +1054,15 @@ static int register_flexcandev(struct net_device *dev)
        if (!(reg & FLEXCAN_MCR_FEN)) {
                netdev_err(dev, "Could not enable RX FIFO, unsupported core\n");
                err = -ENODEV;
-               goto out_disable_per;
+               goto out_chip_disable;
        }
 
        err = register_candev(dev);
 
- out_disable_per:
        /* disable core and turn off clocks */
+ out_chip_disable:
        flexcan_chip_disable(priv);
+ out_disable_per:
        clk_disable_unprepare(priv->clk_per);
  out_disable_ipg:
        clk_disable_unprepare(priv->clk_ipg);
@@ -1104,9 +1192,10 @@ static int flexcan_probe(struct platform_device *pdev)
 static int flexcan_remove(struct platform_device *pdev)
 {
        struct net_device *dev = platform_get_drvdata(pdev);
+       struct flexcan_priv *priv = netdev_priv(dev);
 
        unregister_flexcandev(dev);
-
+       netif_napi_del(&priv->napi);
        free_candev(dev);
 
        return 0;
@@ -1117,8 +1206,11 @@ static int flexcan_suspend(struct device *device)
 {
        struct net_device *dev = dev_get_drvdata(device);
        struct flexcan_priv *priv = netdev_priv(dev);
+       int err;
 
-       flexcan_chip_disable(priv);
+       err = flexcan_chip_disable(priv);
+       if (err)
+               return err;
 
        if (netif_running(dev)) {
                netif_stop_queue(dev);
@@ -1139,9 +1231,7 @@ static int flexcan_resume(struct device *device)
                netif_device_attach(dev);
                netif_start_queue(dev);
        }
-       flexcan_chip_enable(priv);
-
-       return 0;
+       return flexcan_chip_enable(priv);
 }
 #endif /* CONFIG_PM_SLEEP */
 
index 1f7b5aa114fae3ee3adf589af58e5def31fc84d7..8a7bf7dad89823fadaa7b98f74ae08fe6a56838b 100644 (file)
@@ -1484,6 +1484,10 @@ static int b44_open(struct net_device *dev)
        add_timer(&bp->timer);
 
        b44_enable_ints(bp);
+
+       if (bp->flags & B44_FLAG_EXTERNAL_PHY)
+               phy_start(bp->phydev);
+
        netif_start_queue(dev);
 out:
        return err;
@@ -1646,6 +1650,9 @@ static int b44_close(struct net_device *dev)
 
        netif_stop_queue(dev);
 
+       if (bp->flags & B44_FLAG_EXTERNAL_PHY)
+               phy_stop(bp->phydev);
+
        napi_disable(&bp->napi);
 
        del_timer_sync(&bp->timer);
@@ -2222,7 +2229,12 @@ static void b44_adjust_link(struct net_device *dev)
        }
 
        if (status_changed) {
-               b44_check_phy(bp);
+               u32 val = br32(bp, B44_TX_CTRL);
+               if (bp->flags & B44_FLAG_FULL_DUPLEX)
+                       val |= TX_CTRL_DUPLEX;
+               else
+                       val &= ~TX_CTRL_DUPLEX;
+               bw32(bp, B44_TX_CTRL, val);
                phy_print_status(phydev);
        }
 }
index 66c0df78c3ff9685c495626c6301fc85c6aac345..dbcff509dc3f6d62cf48c729563196de8c4c7904 100644 (file)
@@ -3875,7 +3875,9 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
                                                     xmit_type);
                }
 
-               /* Add the macs to the parsing BD this is a vf */
+               /* Add the macs to the parsing BD if this is a vf or if
+                * Tx Switching is enabled.
+                */
                if (IS_VF(bp)) {
                        /* override GRE parameters in BD */
                        bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
@@ -3883,6 +3885,11 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
                                              &pbd_e2->data.mac_addr.src_lo,
                                              eth->h_source);
 
+                       bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi,
+                                             &pbd_e2->data.mac_addr.dst_mid,
+                                             &pbd_e2->data.mac_addr.dst_lo,
+                                             eth->h_dest);
+               } else if (bp->flags & TX_SWITCHING) {
                        bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi,
                                              &pbd_e2->data.mac_addr.dst_mid,
                                              &pbd_e2->data.mac_addr.dst_lo,
index 3167ed6593b0410bc0382294a015e3ac06e1ffed..3b6d0ba86c714d34204bd359d61a54ece62cd139 100644 (file)
@@ -6843,8 +6843,7 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
 
                work_mask |= opaque_key;
 
-               if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
-                   (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
+               if (desc->err_vlan & RXD_ERR_MASK) {
                drop_it:
                        tg3_recycle_rx(tnapi, tpr, opaque_key,
                                       desc_idx, *post_ptr);
index ef472385bce47dcea87f43e48f202ffabbddf16d..04321e5a356e45a0f7fc642f3817035d983dbd90 100644 (file)
@@ -2608,7 +2608,11 @@ struct tg3_rx_buffer_desc {
 #define RXD_ERR_TOO_SMALL              0x00400000
 #define RXD_ERR_NO_RESOURCES           0x00800000
 #define RXD_ERR_HUGE_FRAME             0x01000000
-#define RXD_ERR_MASK                   0xffff0000
+
+#define RXD_ERR_MASK   (RXD_ERR_BAD_CRC | RXD_ERR_COLLISION |          \
+                        RXD_ERR_LINK_LOST | RXD_ERR_PHY_DECODE |       \
+                        RXD_ERR_MAC_ABRT | RXD_ERR_TOO_SMALL |         \
+                        RXD_ERR_NO_RESOURCES | RXD_ERR_HUGE_FRAME)
 
        u32                             reserved;
        u32                             opaque;
index cf64f3d0b60d91a1de68836523c6304086dc9ece..4ad1187e82fb463e9642fd3e73b2850cf1137a01 100644 (file)
@@ -707,7 +707,8 @@ bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
                else
                        skb_checksum_none_assert(skb);
 
-               if (flags & BNA_CQ_EF_VLAN)
+               if ((flags & BNA_CQ_EF_VLAN) &&
+                   (bnad->netdev->features & NETIF_F_HW_VLAN_CTAG_RX))
                        __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(cmpl->vlan_tag));
 
                if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
@@ -2094,7 +2095,9 @@ bnad_init_rx_config(struct bnad *bnad, struct bna_rx_config *rx_config)
                rx_config->q1_buf_size = BFI_SMALL_RXBUF_SIZE;
        }
 
-       rx_config->vlan_strip_status = BNA_STATUS_T_ENABLED;
+       rx_config->vlan_strip_status =
+               (bnad->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) ?
+               BNA_STATUS_T_ENABLED : BNA_STATUS_T_DISABLED;
 }
 
 static void
@@ -3245,11 +3248,6 @@ bnad_set_rx_mode(struct net_device *netdev)
                        BNA_RXMODE_ALLMULTI;
        bna_rx_mode_set(bnad->rx_info[0].rx, new_mode, mode_mask, NULL);
 
-       if (bnad->cfg_flags & BNAD_CF_PROMISC)
-               bna_rx_vlan_strip_disable(bnad->rx_info[0].rx);
-       else
-               bna_rx_vlan_strip_enable(bnad->rx_info[0].rx);
-
        spin_unlock_irqrestore(&bnad->bna_lock, flags);
 }
 
@@ -3374,6 +3372,27 @@ bnad_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
        return 0;
 }
 
+static int bnad_set_features(struct net_device *dev, netdev_features_t features)
+{
+       struct bnad *bnad = netdev_priv(dev);
+       netdev_features_t changed = features ^ dev->features;
+
+       if ((changed & NETIF_F_HW_VLAN_CTAG_RX) && netif_running(dev)) {
+               unsigned long flags;
+
+               spin_lock_irqsave(&bnad->bna_lock, flags);
+
+               if (features & NETIF_F_HW_VLAN_CTAG_RX)
+                       bna_rx_vlan_strip_enable(bnad->rx_info[0].rx);
+               else
+                       bna_rx_vlan_strip_disable(bnad->rx_info[0].rx);
+
+               spin_unlock_irqrestore(&bnad->bna_lock, flags);
+       }
+
+       return 0;
+}
+
 #ifdef CONFIG_NET_POLL_CONTROLLER
 static void
 bnad_netpoll(struct net_device *netdev)
@@ -3421,6 +3440,7 @@ static const struct net_device_ops bnad_netdev_ops = {
        .ndo_change_mtu         = bnad_change_mtu,
        .ndo_vlan_rx_add_vid    = bnad_vlan_rx_add_vid,
        .ndo_vlan_rx_kill_vid   = bnad_vlan_rx_kill_vid,
+       .ndo_set_features       = bnad_set_features,
 #ifdef CONFIG_NET_POLL_CONTROLLER
        .ndo_poll_controller    = bnad_netpoll
 #endif
@@ -3433,14 +3453,14 @@ bnad_netdev_init(struct bnad *bnad, bool using_dac)
 
        netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
                NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
-               NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_VLAN_CTAG_TX;
+               NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_VLAN_CTAG_TX |
+               NETIF_F_HW_VLAN_CTAG_RX;
 
        netdev->vlan_features = NETIF_F_SG | NETIF_F_HIGHDMA |
                NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
                NETIF_F_TSO | NETIF_F_TSO6;
 
-       netdev->features |= netdev->hw_features |
-               NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
+       netdev->features |= netdev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
 
        if (using_dac)
                netdev->features |= NETIF_F_HIGHDMA;
index 43ab35fea48d2be9fe6dfcd4d1e9dc10651724be..34e2488767d94d0c6eeaf677ffbfca493d871e54 100644 (file)
@@ -6179,6 +6179,7 @@ static struct pci_driver cxgb4_driver = {
        .id_table = cxgb4_pci_tbl,
        .probe    = init_one,
        .remove   = remove_one,
+       .shutdown = remove_one,
        .err_handler = &cxgb4_eeh,
 };
 
index 8d09615da585671a4d0fa9d00dc9cc6ba80f6517..05529e273050e3718c0ca1e105ce05174fdfe618 100644 (file)
@@ -350,11 +350,13 @@ struct be_drv_stats {
        u32 roce_drops_crc;
 };
 
+/* A vlan-id of 0xFFFF must be used to clear transparent vlan-tagging */
+#define BE_RESET_VLAN_TAG_ID   0xFFFF
+
 struct be_vf_cfg {
        unsigned char mac_addr[ETH_ALEN];
        int if_handle;
        int pmac_id;
-       u16 def_vid;
        u16 vlan_tag;
        u32 tx_rate;
 };
index 04ac9c6a0d3972d4e18ee91a8ce3a8bf2e141cd7..36c80612e21a3ebe6ee52447e9075dc4022f005e 100644 (file)
@@ -913,24 +913,14 @@ static int be_ipv6_tx_stall_chk(struct be_adapter *adapter,
        return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
 }
 
-static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
-                                          struct sk_buff *skb,
-                                          bool *skip_hw_vlan)
+static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
+                                                 struct sk_buff *skb,
+                                                 bool *skip_hw_vlan)
 {
        struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
        unsigned int eth_hdr_len;
        struct iphdr *ip;
 
-       /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or less
-        * may cause a transmit stall on that port. So the work-around is to
-        * pad short packets (<= 32 bytes) to a 36-byte length.
-        */
-       if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
-               if (skb_padto(skb, 36))
-                       goto tx_drop;
-               skb->len = 36;
-       }
-
        /* For padded packets, BE HW modifies tot_len field in IP header
         * incorrecly when VLAN tag is inserted by HW.
         * For padded packets, Lancer computes incorrect checksum.
@@ -959,7 +949,7 @@ static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
            vlan_tx_tag_present(skb)) {
                skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
                if (unlikely(!skb))
-                       goto tx_drop;
+                       goto err;
        }
 
        /* HW may lockup when VLAN HW tagging is requested on
@@ -981,15 +971,39 @@ static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
            be_vlan_tag_tx_chk(adapter, skb)) {
                skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
                if (unlikely(!skb))
-                       goto tx_drop;
+                       goto err;
        }
 
        return skb;
 tx_drop:
        dev_kfree_skb_any(skb);
+err:
        return NULL;
 }
 
+static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
+                                          struct sk_buff *skb,
+                                          bool *skip_hw_vlan)
+{
+       /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or
+        * less may cause a transmit stall on that port. So the work-around is
+        * to pad short packets (<= 32 bytes) to a 36-byte length.
+        */
+       if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
+               if (skb_padto(skb, 36))
+                       return NULL;
+               skb->len = 36;
+       }
+
+       if (BEx_chip(adapter) || lancer_chip(adapter)) {
+               skb = be_lancer_xmit_workarounds(adapter, skb, skip_hw_vlan);
+               if (!skb)
+                       return NULL;
+       }
+
+       return skb;
+}
+
 static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
 {
        struct be_adapter *adapter = netdev_priv(netdev);
@@ -1157,6 +1171,14 @@ ret:
        return status;
 }
 
+static void be_clear_promisc(struct be_adapter *adapter)
+{
+       adapter->promiscuous = false;
+       adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
+
+       be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
+}
+
 static void be_set_rx_mode(struct net_device *netdev)
 {
        struct be_adapter *adapter = netdev_priv(netdev);
@@ -1170,9 +1192,7 @@ static void be_set_rx_mode(struct net_device *netdev)
 
        /* BE was previously in promiscuous mode; disable it */
        if (adapter->promiscuous) {
-               adapter->promiscuous = false;
-               be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
-
+               be_clear_promisc(adapter);
                if (adapter->vlans_added)
                        be_vid_config(adapter);
        }
@@ -1287,24 +1307,20 @@ static int be_set_vf_vlan(struct net_device *netdev,
 
        if (vlan || qos) {
                vlan |= qos << VLAN_PRIO_SHIFT;
-               if (vf_cfg->vlan_tag != vlan) {
-                       /* If this is new value, program it. Else skip. */
-                       vf_cfg->vlan_tag = vlan;
+               if (vf_cfg->vlan_tag != vlan)
                        status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
                                                       vf_cfg->if_handle, 0);
-               }
        } else {
                /* Reset Transparent Vlan Tagging. */
-               vf_cfg->vlan_tag = 0;
-               vlan = vf_cfg->def_vid;
-               status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
-                                              vf_cfg->if_handle, 0);
+               status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID,
+                                              vf + 1, vf_cfg->if_handle, 0);
        }
 
-
-       if (status)
+       if (!status)
+               vf_cfg->vlan_tag = vlan;
+       else
                dev_info(&adapter->pdev->dev,
-                               "VLAN %d config on VF %d failed\n", vlan, vf);
+                        "VLAN %d config on VF %d failed\n", vlan, vf);
        return status;
 }
 
@@ -3013,11 +3029,11 @@ static int be_vf_setup_init(struct be_adapter *adapter)
 
 static int be_vf_setup(struct be_adapter *adapter)
 {
+       struct device *dev = &adapter->pdev->dev;
        struct be_vf_cfg *vf_cfg;
-       u16 def_vlan, lnk_speed;
        int status, old_vfs, vf;
-       struct device *dev = &adapter->pdev->dev;
        u32 privileges;
+       u16 lnk_speed;
 
        old_vfs = pci_num_vf(adapter->pdev);
        if (old_vfs) {
@@ -3084,12 +3100,6 @@ static int be_vf_setup(struct be_adapter *adapter)
                if (!status)
                        vf_cfg->tx_rate = lnk_speed;
 
-               status = be_cmd_get_hsw_config(adapter, &def_vlan,
-                                              vf + 1, vf_cfg->if_handle, NULL);
-               if (status)
-                       goto err;
-               vf_cfg->def_vid = def_vlan;
-
                if (!old_vfs)
                        be_cmd_enable_vf(adapter, vf + 1);
        }
index 903362a7b58435878d790b06d9bb2dadbcdede35..479a7cba45c0632e68df427778eab534e73d7580 100644 (file)
@@ -389,12 +389,6 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
                        netdev_err(ndev, "Tx DMA memory map failed\n");
                return NETDEV_TX_OK;
        }
-       /* Send it on its way.  Tell FEC it's ready, interrupt when done,
-        * it's the last BD of the frame, and to put the CRC on the end.
-        */
-       status |= (BD_ENET_TX_READY | BD_ENET_TX_INTR
-                       | BD_ENET_TX_LAST | BD_ENET_TX_TC);
-       bdp->cbd_sc = status;
 
        if (fep->bufdesc_ex) {
 
@@ -416,6 +410,13 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
                }
        }
 
+       /* Send it on its way.  Tell FEC it's ready, interrupt when done,
+        * it's the last BD of the frame, and to put the CRC on the end.
+        */
+       status |= (BD_ENET_TX_READY | BD_ENET_TX_INTR
+                       | BD_ENET_TX_LAST | BD_ENET_TX_TC);
+       bdp->cbd_sc = status;
+
        bdp_pre = fec_enet_get_prevdesc(bdp, fep);
        if ((id_entry->driver_data & FEC_QUIRK_ERR006358) &&
            !(bdp_pre->cbd_sc & BD_ENET_TX_READY)) {
index 6b65f77952153f5dd1ceab2d074e456b2a9567c4..7aec6c833973c3c84c13bc3d02ca34c5a6dfef4e 100644 (file)
@@ -51,8 +51,8 @@
 
 #define DRV_NAME       "mlx4_core"
 #define PFX            DRV_NAME ": "
-#define DRV_VERSION    "1.1"
-#define DRV_RELDATE    "Dec, 2011"
+#define DRV_VERSION    "2.2-1"
+#define DRV_RELDATE    "Feb, 2014"
 
 #define MLX4_FS_UDP_UC_EN              (1 << 1)
 #define MLX4_FS_TCP_UC_EN              (1 << 2)
index 9ca223bc90fc4fe7445a447828028b8a046f6fcb..b57e8c87a34ea8723ae9316747a69a32386429ea 100644 (file)
@@ -57,8 +57,8 @@
 #include "en_port.h"
 
 #define DRV_NAME       "mlx4_en"
-#define DRV_VERSION    "2.0"
-#define DRV_RELDATE    "Dec 2011"
+#define DRV_VERSION    "2.2-1"
+#define DRV_RELDATE    "Feb 2014"
 
 #define MLX4_EN_MSG_LEVEL      (NETIF_MSG_LINK | NETIF_MSG_IFDOWN)
 
index a064f06e0cb8a244d183c3c48acb1754349115dd..23b7e2d35a93bb0598ac76b71d75cfc3ea7afff8 100644 (file)
@@ -46,8 +46,8 @@
 #include "mlx5_core.h"
 
 #define DRIVER_NAME "mlx5_core"
-#define DRIVER_VERSION "1.0"
-#define DRIVER_RELDATE "June 2013"
+#define DRIVER_VERSION "2.2-1"
+#define DRIVER_RELDATE "Feb 2014"
 
 MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
 MODULE_DESCRIPTION("Mellanox ConnectX-IB HCA core library");
index 4146664d4d6a43978789081713b858b3f982de0e..27c4f131863bc30618ae5996411122f91685144b 100644 (file)
@@ -340,6 +340,7 @@ int qlcnic_83xx_setup_intr(struct qlcnic_adapter *adapter)
                        if (qlcnic_sriov_vf_check(adapter))
                                return -EINVAL;
                        num_msix = 1;
+                       adapter->drv_sds_rings = QLCNIC_SINGLE_RING;
                        adapter->drv_tx_rings = QLCNIC_SINGLE_RING;
                }
        }
index 77f1bce432d2998b3d14b5b6f56c694a877a1ae6..7d4f54912bad526077b145109dc003e532b88d70 100644 (file)
@@ -807,7 +807,7 @@ qlcnic_dcb_get_pg_tc_cfg_tx(struct net_device *netdev, int tc, u8 *prio,
            !type->tc_param_valid)
                return;
 
-       if (tc < 0 || (tc > QLC_DCB_MAX_TC))
+       if (tc < 0 || (tc >= QLC_DCB_MAX_TC))
                return;
 
        tc_cfg = &type->tc_cfg[tc];
@@ -843,7 +843,7 @@ static void qlcnic_dcb_get_pg_bwg_cfg_tx(struct net_device *netdev, int pgid,
            !type->tc_param_valid)
                return;
 
-       if (pgid < 0 || pgid > QLC_DCB_MAX_PG)
+       if (pgid < 0 || pgid >= QLC_DCB_MAX_PG)
                return;
 
        pgcfg = &type->pg_cfg[pgid];
index ba78c7481fa3432f32fd7d5a5e7c56b66423b801..1222865cfb7319b4ec085c035c00c862cff6dcd2 100644 (file)
@@ -816,9 +816,10 @@ static int qlcnic_82xx_setup_intr(struct qlcnic_adapter *adapter)
 
                if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) {
                        qlcnic_disable_multi_tx(adapter);
+                       adapter->drv_sds_rings = QLCNIC_SINGLE_RING;
 
                        err = qlcnic_enable_msi_legacy(adapter);
-                       if (!err)
+                       if (err)
                                return err;
                }
        }
@@ -3863,7 +3864,7 @@ int qlcnic_validate_rings(struct qlcnic_adapter *adapter, __u32 ring_cnt,
                strcpy(buf, "Tx");
        }
 
-       if (!qlcnic_use_msi_x && !qlcnic_use_msi) {
+       if (!QLCNIC_IS_MSI_FAMILY(adapter)) {
                netdev_err(netdev, "No RSS/TSS support in INT-x mode\n");
                return -EINVAL;
        }
index 09acf15c3a564d1482a6e40960c6ef8dafa6f1cc..e5277a632671a1ca23877fb5ca46ab1397ca3cc2 100644 (file)
@@ -13,8 +13,6 @@
 #define QLC_VF_MIN_TX_RATE     100
 #define QLC_VF_MAX_TX_RATE     9999
 #define QLC_MAC_OPCODE_MASK    0x7
-#define QLC_MAC_STAR_ADD       6
-#define QLC_MAC_STAR_DEL       7
 #define QLC_VF_FLOOD_BIT       BIT_16
 #define QLC_FLOOD_MODE         0x5
 
@@ -1206,13 +1204,6 @@ static int qlcnic_sriov_validate_cfg_macvlan(struct qlcnic_adapter *adapter,
        struct qlcnic_vport *vp = vf->vp;
        u8 op, new_op;
 
-       if (((cmd->req.arg[1] & QLC_MAC_OPCODE_MASK) == QLC_MAC_STAR_ADD) ||
-           ((cmd->req.arg[1] & QLC_MAC_OPCODE_MASK) == QLC_MAC_STAR_DEL)) {
-               netdev_err(adapter->netdev, "MAC + any VLAN filter not allowed from VF %d\n",
-                          vf->pci_func);
-               return -EINVAL;
-       }
-
        if (!(cmd->req.arg[1] & BIT_8))
                return -EINVAL;
 
index 91a67ae8f17b9bffc5bf86405a5788055b583ba0..e9779653cd4ce6cee899f92694b071ed5e7cd7a8 100644 (file)
@@ -7118,6 +7118,8 @@ rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        }
 
        mutex_init(&tp->wk.mutex);
+       u64_stats_init(&tp->rx_stats.syncp);
+       u64_stats_init(&tp->tx_stats.syncp);
 
        /* Get MAC address */
        for (i = 0; i < ETH_ALEN; i++)
index eb75fbd11a0115c73195f7d820ed051e3f720d71..d7a36829649a7eea1797057e0bb0e5773bc83c0e 100644 (file)
@@ -1668,6 +1668,13 @@ void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev)
        struct efx_ptp_data *ptp = efx->ptp_data;
        int code = EFX_QWORD_FIELD(*ev, MCDI_EVENT_CODE);
 
+       if (!ptp) {
+               if (net_ratelimit())
+                       netif_warn(efx, drv, efx->net_dev,
+                                  "Received PTP event but PTP not set up\n");
+               return;
+       }
+
        if (!ptp->enabled)
                return;
 
index a2e7d2c96e3678c309377d3e4dbb416feb5fbc38..078ad0ec859307270e432bd0c20658440eebc0fd 100644 (file)
@@ -1705,7 +1705,7 @@ static int stmmac_open(struct net_device *dev)
        priv->dma_rx_size = STMMAC_ALIGN(dma_rxsize);
        priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
 
-       alloc_dma_desc_resources(priv);
+       ret = alloc_dma_desc_resources(priv);
        if (ret < 0) {
                pr_err("%s: DMA descriptors allocation failed\n", __func__);
                goto dma_desc_error;
index 651087b5c8da57c9f9226925ef33f61372986531..ffd4d12acf6dc59e162a7894b88da1eecf933e69 100644 (file)
@@ -1164,11 +1164,17 @@ static void cpsw_init_host_port(struct cpsw_priv *priv)
 
 static void cpsw_slave_stop(struct cpsw_slave *slave, struct cpsw_priv *priv)
 {
+       u32 slave_port;
+
+       slave_port = cpsw_get_slave_port(priv, slave->slave_num);
+
        if (!slave->phy)
                return;
        phy_stop(slave->phy);
        phy_disconnect(slave->phy);
        slave->phy = NULL;
+       cpsw_ale_control_set(priv->ale, slave_port,
+                            ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
 }
 
 static int cpsw_ndo_open(struct net_device *ndev)
index a5d21893670d2692cf73da15113a2f116c3f0352..1831fb7cd0174d9afd6696bf029d37a657b07222 100644 (file)
@@ -506,6 +506,9 @@ static int macvlan_change_mtu(struct net_device *dev, int new_mtu)
 static struct lock_class_key macvlan_netdev_xmit_lock_key;
 static struct lock_class_key macvlan_netdev_addr_lock_key;
 
+#define ALWAYS_ON_FEATURES \
+       (NETIF_F_SG | NETIF_F_GEN_CSUM | NETIF_F_GSO_SOFTWARE | NETIF_F_LLTX)
+
 #define MACVLAN_FEATURES \
        (NETIF_F_SG | NETIF_F_ALL_CSUM | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \
         NETIF_F_GSO | NETIF_F_TSO | NETIF_F_UFO | NETIF_F_GSO_ROBUST | \
@@ -539,7 +542,7 @@ static int macvlan_init(struct net_device *dev)
        dev->state              = (dev->state & ~MACVLAN_STATE_MASK) |
                                  (lowerdev->state & MACVLAN_STATE_MASK);
        dev->features           = lowerdev->features & MACVLAN_FEATURES;
-       dev->features           |= NETIF_F_LLTX;
+       dev->features           |= ALWAYS_ON_FEATURES;
        dev->gso_max_size       = lowerdev->gso_max_size;
        dev->iflink             = lowerdev->ifindex;
        dev->hard_header_len    = lowerdev->hard_header_len;
@@ -699,7 +702,7 @@ static netdev_features_t macvlan_fix_features(struct net_device *dev,
        features = netdev_increment_features(vlan->lowerdev->features,
                                             features,
                                             mask);
-       features |= NETIF_F_LLTX;
+       features |= ALWAYS_ON_FEATURES;
 
        return features;
 }
index 82514e72b3d8b47538cc8c75a8e52f361d1361c8..4b970f7624c0f00df17b41e3b4ca13f0846df9c3 100644 (file)
@@ -916,6 +916,8 @@ int genphy_read_status(struct phy_device *phydev)
        int err;
        int lpa;
        int lpagb = 0;
+       int common_adv;
+       int common_adv_gb = 0;
 
        /* Update the link, but return if there was an error */
        err = genphy_update_link(phydev);
@@ -937,7 +939,7 @@ int genphy_read_status(struct phy_device *phydev)
 
                        phydev->lp_advertising =
                                mii_stat1000_to_ethtool_lpa_t(lpagb);
-                       lpagb &= adv << 2;
+                       common_adv_gb = lpagb & adv << 2;
                }
 
                lpa = phy_read(phydev, MII_LPA);
@@ -950,25 +952,25 @@ int genphy_read_status(struct phy_device *phydev)
                if (adv < 0)
                        return adv;
 
-               lpa &= adv;
+               common_adv = lpa & adv;
 
                phydev->speed = SPEED_10;
                phydev->duplex = DUPLEX_HALF;
                phydev->pause = 0;
                phydev->asym_pause = 0;
 
-               if (lpagb & (LPA_1000FULL | LPA_1000HALF)) {
+               if (common_adv_gb & (LPA_1000FULL | LPA_1000HALF)) {
                        phydev->speed = SPEED_1000;
 
-                       if (lpagb & LPA_1000FULL)
+                       if (common_adv_gb & LPA_1000FULL)
                                phydev->duplex = DUPLEX_FULL;
-               } else if (lpa & (LPA_100FULL | LPA_100HALF)) {
+               } else if (common_adv & (LPA_100FULL | LPA_100HALF)) {
                        phydev->speed = SPEED_100;
 
-                       if (lpa & LPA_100FULL)
+                       if (common_adv & LPA_100FULL)
                                phydev->duplex = DUPLEX_FULL;
                } else
-                       if (lpa & LPA_10FULL)
+                       if (common_adv & LPA_10FULL)
                                phydev->duplex = DUPLEX_FULL;
 
                if (phydev->duplex == DUPLEX_FULL) {
index 8fe9cb7d0f72e9fb7d34307f9a12797772844db1..26f8635b027d3fb44ab12223000badc214098df0 100644 (file)
@@ -1686,7 +1686,9 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
                                   TUN_USER_FEATURES | NETIF_F_HW_VLAN_CTAG_TX |
                                   NETIF_F_HW_VLAN_STAG_TX;
                dev->features = dev->hw_features;
-               dev->vlan_features = dev->features;
+               dev->vlan_features = dev->features &
+                                    ~(NETIF_F_HW_VLAN_CTAG_TX |
+                                      NETIF_F_HW_VLAN_STAG_TX);
 
                INIT_LIST_HEAD(&tun->disabled);
                err = tun_attach(tun, file, false);
index 955df81a43589bc30857b56db94f94886ce9cc30..d2e6fdb25e283214bfe40c0ae9544fe3b91a9cd2 100644 (file)
@@ -1395,6 +1395,19 @@ static const struct driver_info ax88178a_info = {
        .tx_fixup = ax88179_tx_fixup,
 };
 
+static const struct driver_info dlink_dub1312_info = {
+       .description = "D-Link DUB-1312 USB 3.0 to Gigabit Ethernet Adapter",
+       .bind = ax88179_bind,
+       .unbind = ax88179_unbind,
+       .status = ax88179_status,
+       .link_reset = ax88179_link_reset,
+       .reset = ax88179_reset,
+       .stop = ax88179_stop,
+       .flags = FLAG_ETHER | FLAG_FRAMING_AX,
+       .rx_fixup = ax88179_rx_fixup,
+       .tx_fixup = ax88179_tx_fixup,
+};
+
 static const struct driver_info sitecom_info = {
        .description = "Sitecom USB 3.0 to Gigabit Adapter",
        .bind = ax88179_bind,
@@ -1421,6 +1434,19 @@ static const struct driver_info samsung_info = {
        .tx_fixup = ax88179_tx_fixup,
 };
 
+static const struct driver_info lenovo_info = {
+       .description = "Lenovo OneLinkDock Gigabit LAN",
+       .bind = ax88179_bind,
+       .unbind = ax88179_unbind,
+       .status = ax88179_status,
+       .link_reset = ax88179_link_reset,
+       .reset = ax88179_reset,
+       .stop = ax88179_stop,
+       .flags = FLAG_ETHER | FLAG_FRAMING_AX,
+       .rx_fixup = ax88179_rx_fixup,
+       .tx_fixup = ax88179_tx_fixup,
+};
+
 static const struct usb_device_id products[] = {
 {
        /* ASIX AX88179 10/100/1000 */
@@ -1430,6 +1456,10 @@ static const struct usb_device_id products[] = {
        /* ASIX AX88178A 10/100/1000 */
        USB_DEVICE(0x0b95, 0x178a),
        .driver_info = (unsigned long)&ax88178a_info,
+}, {
+       /* D-Link DUB-1312 USB 3.0 to Gigabit Ethernet Adapter */
+       USB_DEVICE(0x2001, 0x4a00),
+       .driver_info = (unsigned long)&dlink_dub1312_info,
 }, {
        /* Sitecom USB 3.0 to Gigabit Adapter */
        USB_DEVICE(0x0df6, 0x0072),
@@ -1438,6 +1468,10 @@ static const struct usb_device_id products[] = {
        /* Samsung USB Ethernet Adapter */
        USB_DEVICE(0x04e8, 0xa100),
        .driver_info = (unsigned long)&samsung_info,
+}, {
+       /* Lenovo OneLinkDock Gigabit LAN */
+       USB_DEVICE(0x17ef, 0x304b),
+       .driver_info = (unsigned long)&lenovo_info,
 },
        { },
 };
index 2ec2041b62d4eb215bf23f74ad82ba44332b8d3d..5b374370f71c702d1eec4153c21038cc882795cd 100644 (file)
@@ -285,7 +285,8 @@ static void veth_setup(struct net_device *dev)
        dev->ethtool_ops = &veth_ethtool_ops;
        dev->features |= NETIF_F_LLTX;
        dev->features |= VETH_FEATURES;
-       dev->vlan_features = dev->features;
+       dev->vlan_features = dev->features &
+                            ~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX);
        dev->destructor = veth_dev_free;
 
        dev->hw_features = VETH_FEATURES;
index d75f8edf4fb370300d13bd22adcfe66e754fb5a2..5632a99cbbd24fdd059c182e5b28d1d19b3ea4e9 100644 (file)
@@ -1711,7 +1711,8 @@ static int virtnet_probe(struct virtio_device *vdev)
        /* If we can receive ANY GSO packets, we must allocate large ones. */
        if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
            virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) ||
-           virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN))
+           virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN) ||
+           virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_UFO))
                vi->big_packets = true;
 
        if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
index 1cc13569b17b8ed7126a9aae49a2d519c360337b..1b6b4d0cfa97a5df8c2a7e0d631c417377ba17b2 100644 (file)
@@ -57,7 +57,7 @@ static const u32 ar9462_2p0_baseband_postamble[][5] = {
        {0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3236605e, 0x32365a5e},
        {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
        {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
-       {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
+       {0x00009e20, 0x000003a5, 0x000003a5, 0x000003a5, 0x000003a5},
        {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
        {0x00009e3c, 0xcf946220, 0xcf946220, 0xcfd5c782, 0xcfd5c282},
        {0x00009e44, 0x62321e27, 0x62321e27, 0xfe291e27, 0xfe291e27},
@@ -96,7 +96,7 @@ static const u32 ar9462_2p0_baseband_postamble[][5] = {
        {0x0000ae04, 0x001c0000, 0x001c0000, 0x001c0000, 0x00100000},
        {0x0000ae18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
        {0x0000ae1c, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
-       {0x0000ae20, 0x000001b5, 0x000001b5, 0x000001ce, 0x000001ce},
+       {0x0000ae20, 0x000001a6, 0x000001a6, 0x000001aa, 0x000001aa},
        {0x0000b284, 0x00000000, 0x00000000, 0x00000550, 0x00000550},
 };
 
index 11eab9f01fd89ac9bb8f9b665646051a6386ac0a..303ce27964c14c3469d245632c6f5739289520b0 100644 (file)
@@ -1534,7 +1534,7 @@ EXPORT_SYMBOL(ath9k_hw_check_nav);
 bool ath9k_hw_check_alive(struct ath_hw *ah)
 {
        int count = 50;
-       u32 reg;
+       u32 reg, last_val;
 
        if (AR_SREV_9300(ah))
                return !ath9k_hw_detect_mac_hang(ah);
@@ -1542,9 +1542,13 @@ bool ath9k_hw_check_alive(struct ath_hw *ah)
        if (AR_SREV_9285_12_OR_LATER(ah))
                return true;
 
+       last_val = REG_READ(ah, AR_OBS_BUS_1);
        do {
                reg = REG_READ(ah, AR_OBS_BUS_1);
+               if (reg != last_val)
+                       return true;
 
+               last_val = reg;
                if ((reg & 0x7E7FFFEF) == 0x00702400)
                        continue;
 
@@ -1556,6 +1560,8 @@ bool ath9k_hw_check_alive(struct ath_hw *ah)
                default:
                        return true;
                }
+
+               udelay(1);
        } while (count-- > 0);
 
        return false;
index a0ebdd000fc20f83dac90401ba017dd1ebf3b570..82e340d3ec60a81cc83d2427bd9280e55ca3af3a 100644 (file)
@@ -732,11 +732,18 @@ static struct ath_rxbuf *ath_get_next_rx_buf(struct ath_softc *sc,
                        return NULL;
 
                /*
-                * mark descriptor as zero-length and set the 'more'
-                * flag to ensure that both buffers get discarded
+                * Re-check previous descriptor, in case it has been filled
+                * in the mean time.
                 */
-               rs->rs_datalen = 0;
-               rs->rs_more = true;
+               ret = ath9k_hw_rxprocdesc(ah, ds, rs);
+               if (ret == -EINPROGRESS) {
+                       /*
+                        * mark descriptor as zero-length and set the 'more'
+                        * flag to ensure that both buffers get discarded
+                        */
+                       rs->rs_datalen = 0;
+                       rs->rs_more = true;
+               }
        }
 
        list_del(&bf->list);
@@ -985,32 +992,32 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc,
        struct ath_common *common = ath9k_hw_common(ah);
        struct ieee80211_hdr *hdr;
        bool discard_current = sc->rx.discard_next;
-       int ret = 0;
 
        /*
         * Discard corrupt descriptors which are marked in
         * ath_get_next_rx_buf().
         */
-       sc->rx.discard_next = rx_stats->rs_more;
        if (discard_current)
-               return -EINVAL;
+               goto corrupt;
+
+       sc->rx.discard_next = false;
 
        /*
         * Discard zero-length packets.
         */
        if (!rx_stats->rs_datalen) {
                RX_STAT_INC(rx_len_err);
-               return -EINVAL;
+               goto corrupt;
        }
 
-        /*
-         * rs_status follows rs_datalen so if rs_datalen is too large
-         * we can take a hint that hardware corrupted it, so ignore
-         * those frames.
-         */
+       /*
+        * rs_status follows rs_datalen so if rs_datalen is too large
+        * we can take a hint that hardware corrupted it, so ignore
+        * those frames.
+        */
        if (rx_stats->rs_datalen > (common->rx_bufsize - ah->caps.rx_status_len)) {
                RX_STAT_INC(rx_len_err);
-               return -EINVAL;
+               goto corrupt;
        }
 
        /* Only use status info from the last fragment */
@@ -1024,10 +1031,8 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc,
         * This is different from the other corrupt descriptor
         * condition handled above.
         */
-       if (rx_stats->rs_status & ATH9K_RXERR_CORRUPT_DESC) {
-               ret = -EINVAL;
-               goto exit;
-       }
+       if (rx_stats->rs_status & ATH9K_RXERR_CORRUPT_DESC)
+               goto corrupt;
 
        hdr = (struct ieee80211_hdr *) (skb->data + ah->caps.rx_status_len);
 
@@ -1043,18 +1048,15 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc,
                if (ath_process_fft(sc, hdr, rx_stats, rx_status->mactime))
                        RX_STAT_INC(rx_spectral);
 
-               ret = -EINVAL;
-               goto exit;
+               return -EINVAL;
        }
 
        /*
         * everything but the rate is checked here, the rate check is done
         * separately to avoid doing two lookups for a rate for each frame.
         */
-       if (!ath9k_rx_accept(common, hdr, rx_status, rx_stats, decrypt_error)) {
-               ret = -EINVAL;
-               goto exit;
-       }
+       if (!ath9k_rx_accept(common, hdr, rx_status, rx_stats, decrypt_error))
+               return -EINVAL;
 
        if (ath_is_mybeacon(common, hdr)) {
                RX_STAT_INC(rx_beacons);
@@ -1064,15 +1066,11 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc,
        /*
         * This shouldn't happen, but have a safety check anyway.
         */
-       if (WARN_ON(!ah->curchan)) {
-               ret = -EINVAL;
-               goto exit;
-       }
+       if (WARN_ON(!ah->curchan))
+               return -EINVAL;
 
-       if (ath9k_process_rate(common, hw, rx_stats, rx_status)) {
-               ret =-EINVAL;
-               goto exit;
-       }
+       if (ath9k_process_rate(common, hw, rx_stats, rx_status))
+               return -EINVAL;
 
        ath9k_process_rssi(common, hw, rx_stats, rx_status);
 
@@ -1087,9 +1085,11 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc,
                sc->rx.num_pkts++;
 #endif
 
-exit:
-       sc->rx.discard_next = false;
-       return ret;
+       return 0;
+
+corrupt:
+       sc->rx.discard_next = rx_stats->rs_more;
+       return -EINVAL;
 }
 
 static void ath9k_rx_skb_postprocess(struct ath_common *common,
index 0a75e2f68c9dc30043e32653d78f67799ab8b771..f042a18c8495cb46c04bebe4e7fac4ace4944b66 100644 (file)
@@ -1444,14 +1444,16 @@ void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc,
        for (tidno = 0, tid = &an->tid[tidno];
             tidno < IEEE80211_NUM_TIDS; tidno++, tid++) {
 
-               if (!tid->sched)
-                       continue;
-
                ac = tid->ac;
                txq = ac->txq;
 
                ath_txq_lock(sc, txq);
 
+               if (!tid->sched) {
+                       ath_txq_unlock(sc, txq);
+                       continue;
+               }
+
                buffered = ath_tid_has_buffered(tid);
 
                tid->sched = false;
@@ -2184,14 +2186,15 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
                txq->stopped = true;
        }
 
+       if (txctl->an)
+               tid = ath_get_skb_tid(sc, txctl->an, skb);
+
        if (info->flags & IEEE80211_TX_CTL_PS_RESPONSE) {
                ath_txq_unlock(sc, txq);
                txq = sc->tx.uapsdq;
                ath_txq_lock(sc, txq);
        } else if (txctl->an &&
                   ieee80211_is_data_present(hdr->frame_control)) {
-               tid = ath_get_skb_tid(sc, txctl->an, skb);
-
                WARN_ON(tid->ac->txq != txctl->txq);
 
                if (info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT)
index 3e991897d7ca4dfe856d06f4027fc0dec5c726f1..119ee6eaf1c3df944b6482957be5350220433753 100644 (file)
@@ -457,7 +457,6 @@ struct brcmf_sdio {
 
        u8 tx_hdrlen;           /* sdio bus header length for tx packet */
        bool txglom;            /* host tx glomming enable flag */
-       struct sk_buff *txglom_sgpad;   /* scatter-gather padding buffer */
        u16 head_align;         /* buffer pointer alignment */
        u16 sgentry_align;      /* scatter-gather buffer alignment */
 };
@@ -1944,9 +1943,8 @@ static int brcmf_sdio_txpkt_prep_sg(struct brcmf_sdio *bus,
        if (lastfrm && chain_pad)
                tail_pad += blksize - chain_pad;
        if (skb_tailroom(pkt) < tail_pad && pkt->len > blksize) {
-               pkt_pad = bus->txglom_sgpad;
-               if (pkt_pad == NULL)
-                         brcmu_pkt_buf_get_skb(tail_pad + tail_chop);
+               pkt_pad = brcmu_pkt_buf_get_skb(tail_pad + tail_chop +
+                                               bus->head_align);
                if (pkt_pad == NULL)
                        return -ENOMEM;
                ret = brcmf_sdio_txpkt_hdalign(bus, pkt_pad);
@@ -1957,6 +1955,7 @@ static int brcmf_sdio_txpkt_prep_sg(struct brcmf_sdio *bus,
                       tail_chop);
                *(u32 *)(pkt_pad->cb) = ALIGN_SKB_FLAG + tail_chop;
                skb_trim(pkt, pkt->len - tail_chop);
+               skb_trim(pkt_pad, tail_pad + tail_chop);
                __skb_queue_after(pktq, pkt, pkt_pad);
        } else {
                ntail = pkt->data_len + tail_pad -
@@ -2011,7 +2010,7 @@ brcmf_sdio_txpkt_prep(struct brcmf_sdio *bus, struct sk_buff_head *pktq,
                        return ret;
                head_pad = (u16)ret;
                if (head_pad)
-                       memset(pkt_next->data, 0, head_pad + bus->tx_hdrlen);
+                       memset(pkt_next->data + bus->tx_hdrlen, 0, head_pad);
 
                total_len += pkt_next->len;
 
@@ -3486,10 +3485,6 @@ static int brcmf_sdio_bus_preinit(struct device *dev)
                bus->txglom = false;
                value = 1;
                pad_size = bus->sdiodev->func[2]->cur_blksize << 1;
-               bus->txglom_sgpad = brcmu_pkt_buf_get_skb(pad_size);
-               if (!bus->txglom_sgpad)
-                       brcmf_err("allocating txglom padding skb failed, reduced performance\n");
-
                err = brcmf_iovar_data_set(bus->sdiodev->dev, "bus:rxglom",
                                           &value, sizeof(u32));
                if (err < 0) {
@@ -4053,7 +4048,6 @@ void brcmf_sdio_remove(struct brcmf_sdio *bus)
                        brcmf_sdio_chip_detach(&bus->ci);
                }
 
-               brcmu_pkt_buf_free_skb(bus->txglom_sgpad);
                kfree(bus->rxbuf);
                kfree(bus->hdrbuf);
                kfree(bus);
index d36e252d2ccbc0041a5b0c1669fe396c2c18db73..596525528f50504afe3005d98ac0ab47d3e26453 100644 (file)
@@ -147,7 +147,7 @@ static void ap_free_sta(struct ap_data *ap, struct sta_info *sta)
 
        if (!sta->ap && sta->u.sta.challenge)
                kfree(sta->u.sta.challenge);
-       del_timer(&sta->timer);
+       del_timer_sync(&sta->timer);
 #endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
 
        kfree(sta);
index c0d070c5df5ed73e14a64e141d6a7d2f2a3133b6..9cdd91cdf661825604e9ae8c89649213667b0b0e 100644 (file)
@@ -590,6 +590,7 @@ void iwl_deactivate_station(struct iwl_priv *priv, const u8 sta_id,
                        sizeof(priv->tid_data[sta_id][tid]));
 
        priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE;
+       priv->stations[sta_id].used &= ~IWL_STA_UCODE_INPROGRESS;
 
        priv->num_stations--;
 
index a6839dfcb82dd75029c1e377bbebb078464a8792..398dd096674cf17bd8112e8e7d06ad4ce57427f0 100644 (file)
@@ -1291,8 +1291,6 @@ int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
        struct iwl_compressed_ba_resp *ba_resp = (void *)pkt->data;
        struct iwl_ht_agg *agg;
        struct sk_buff_head reclaimed_skbs;
-       struct ieee80211_tx_info *info;
-       struct ieee80211_hdr *hdr;
        struct sk_buff *skb;
        int sta_id;
        int tid;
@@ -1379,22 +1377,28 @@ int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
        freed = 0;
 
        skb_queue_walk(&reclaimed_skbs, skb) {
-               hdr = (struct ieee80211_hdr *)skb->data;
+               struct ieee80211_hdr *hdr = (void *)skb->data;
+               struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
 
                if (ieee80211_is_data_qos(hdr->frame_control))
                        freed++;
                else
                        WARN_ON_ONCE(1);
 
-               info = IEEE80211_SKB_CB(skb);
                iwl_trans_free_tx_cmd(priv->trans, info->driver_data[1]);
 
+               memset(&info->status, 0, sizeof(info->status));
+               /* Packet was transmitted successfully, failures come as single
+                * frames because before failing a frame the firmware transmits
+                * it without aggregation at least once.
+                */
+               info->flags |= IEEE80211_TX_STAT_ACK;
+
                if (freed == 1) {
                        /* this is the first skb we deliver in this batch */
                        /* put the rate scaling data there */
                        info = IEEE80211_SKB_CB(skb);
                        memset(&info->status, 0, sizeof(info->status));
-                       info->flags |= IEEE80211_TX_STAT_ACK;
                        info->flags |= IEEE80211_TX_STAT_AMPDU;
                        info->status.ampdu_ack_len = ba_resp->txed_2_done;
                        info->status.ampdu_len = ba_resp->txed;
index e4ead86f06d69a7ebb99d2ceede388035dd6c89e..2b0ba1fc3c82fb457a897dbb8fe08d528b12d762 100644 (file)
@@ -152,7 +152,7 @@ enum iwl_power_scheme {
        IWL_POWER_SCHEME_LP
 };
 
-#define IWL_CONN_MAX_LISTEN_INTERVAL   70
+#define IWL_CONN_MAX_LISTEN_INTERVAL   10
 #define IWL_UAPSD_AC_INFO              (IEEE80211_WMM_IE_STA_QOSINFO_AC_VO |\
                                         IEEE80211_WMM_IE_STA_QOSINFO_AC_VI |\
                                         IEEE80211_WMM_IE_STA_QOSINFO_AC_BK |\
index 4df12fa9d33685b285b489a67897479f8f8dc54a..76ee486039d7a082b9081f835e7b132bf08ecbe5 100644 (file)
@@ -822,16 +822,12 @@ int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
        struct iwl_mvm_ba_notif *ba_notif = (void *)pkt->data;
        struct sk_buff_head reclaimed_skbs;
        struct iwl_mvm_tid_data *tid_data;
-       struct ieee80211_tx_info *info;
        struct ieee80211_sta *sta;
        struct iwl_mvm_sta *mvmsta;
-       struct ieee80211_hdr *hdr;
        struct sk_buff *skb;
        int sta_id, tid, freed;
-
        /* "flow" corresponds to Tx queue */
        u16 scd_flow = le16_to_cpu(ba_notif->scd_flow);
-
        /* "ssn" is start of block-ack Tx window, corresponds to index
         * (in Tx queue's circular buffer) of first TFD/frame in window */
        u16 ba_resp_scd_ssn = le16_to_cpu(ba_notif->scd_ssn);
@@ -888,22 +884,26 @@ int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
        freed = 0;
 
        skb_queue_walk(&reclaimed_skbs, skb) {
-               hdr = (struct ieee80211_hdr *)skb->data;
+               struct ieee80211_hdr *hdr = (void *)skb->data;
+               struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
 
                if (ieee80211_is_data_qos(hdr->frame_control))
                        freed++;
                else
                        WARN_ON_ONCE(1);
 
-               info = IEEE80211_SKB_CB(skb);
                iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]);
 
+               memset(&info->status, 0, sizeof(info->status));
+               /* Packet was transmitted successfully, failures come as single
+                * frames because before failing a frame the firmware transmits
+                * it without aggregation at least once.
+                */
+               info->flags |= IEEE80211_TX_STAT_ACK;
+
                if (freed == 1) {
                        /* this is the first skb we deliver in this batch */
                        /* put the rate scaling data there */
-                       info = IEEE80211_SKB_CB(skb);
-                       memset(&info->status, 0, sizeof(info->status));
-                       info->flags |= IEEE80211_TX_STAT_ACK;
                        info->flags |= IEEE80211_TX_STAT_AMPDU;
                        info->status.ampdu_ack_len = ba_notif->txed_2_done;
                        info->status.ampdu_len = ba_notif->txed;
index 32f75007a825be6c24b6195d35a9f4e31d590961..cb6d189bc3e60fd9bfeb2e3e696caf822ebb9748 100644 (file)
@@ -621,7 +621,7 @@ static int lbs_ret_scan(struct lbs_private *priv, unsigned long dummy,
                        id = *pos++;
                        elen = *pos++;
                        left -= 2;
-                       if (elen > left || elen == 0) {
+                       if (elen > left) {
                                lbs_deb_scan("scan response: invalid IE fmt\n");
                                goto done;
                        }
index 03688aa14e8adb8575163e3a40aeda4a70a32c19..7fe7b53fb17a28d75cb7fa9a6fc315c9f0ddd937 100644 (file)
@@ -1211,6 +1211,12 @@ static int mwifiex_pcie_process_recv_data(struct mwifiex_adapter *adapter)
                rd_index = card->rxbd_rdptr & reg->rx_mask;
                skb_data = card->rx_buf_list[rd_index];
 
+               /* If skb allocation was failed earlier for Rx packet,
+                * rx_buf_list[rd_index] would have been left with a NULL.
+                */
+               if (!skb_data)
+                       return -ENOMEM;
+
                MWIFIEX_SKB_PACB(skb_data, &buf_pa);
                pci_unmap_single(card->dev, buf_pa, MWIFIEX_RX_DATA_BUF_SIZE,
                                 PCI_DMA_FROMDEVICE);
@@ -1525,6 +1531,14 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter)
                if (adapter->ps_state == PS_STATE_SLEEP_CFM) {
                        mwifiex_process_sleep_confirm_resp(adapter, skb->data,
                                                           skb->len);
+                       mwifiex_pcie_enable_host_int(adapter);
+                       if (mwifiex_write_reg(adapter,
+                                             PCIE_CPU_INT_EVENT,
+                                             CPU_INTR_SLEEP_CFM_DONE)) {
+                               dev_warn(adapter->dev,
+                                        "Write register failed\n");
+                               return -1;
+                       }
                        while (reg->sleep_cookie && (count++ < 10) &&
                               mwifiex_pcie_ok_to_access_hw(adapter))
                                usleep_range(50, 60);
@@ -1993,23 +2007,9 @@ static void mwifiex_interrupt_status(struct mwifiex_adapter *adapter)
                adapter->int_status |= pcie_ireg;
                spin_unlock_irqrestore(&adapter->int_lock, flags);
 
-               if (pcie_ireg & HOST_INTR_CMD_DONE) {
-                       if ((adapter->ps_state == PS_STATE_SLEEP_CFM) ||
-                           (adapter->ps_state == PS_STATE_SLEEP)) {
-                               mwifiex_pcie_enable_host_int(adapter);
-                               if (mwifiex_write_reg(adapter,
-                                                     PCIE_CPU_INT_EVENT,
-                                                     CPU_INTR_SLEEP_CFM_DONE)
-                                                     ) {
-                                       dev_warn(adapter->dev,
-                                                "Write register failed\n");
-                                       return;
-
-                               }
-                       }
-               } else if (!adapter->pps_uapsd_mode &&
-                          adapter->ps_state == PS_STATE_SLEEP &&
-                          mwifiex_pcie_ok_to_access_hw(adapter)) {
+               if (!adapter->pps_uapsd_mode &&
+                   adapter->ps_state == PS_STATE_SLEEP &&
+                   mwifiex_pcie_ok_to_access_hw(adapter)) {
                                /* Potentially for PCIe we could get other
                                 * interrupts like shared. Don't change power
                                 * state until cookie is set */
index e8ebbd4bc3cd3348b271a6d930781cb799b97300..208748804a55ee56dd1d8d7cbb445e7600525db5 100644 (file)
@@ -22,8 +22,6 @@
 
 #define USB_VERSION    "1.0"
 
-static const char usbdriver_name[] = "usb8xxx";
-
 static struct mwifiex_if_ops usb_ops;
 static struct semaphore add_remove_card_sem;
 static struct usb_card_rec *usb_card;
@@ -527,13 +525,6 @@ static int mwifiex_usb_resume(struct usb_interface *intf)
                                                   MWIFIEX_BSS_ROLE_ANY),
                                  MWIFIEX_ASYNC_CMD);
 
-#ifdef CONFIG_PM
-       /* Resume handler may be called due to remote wakeup,
-        * force to exit suspend anyway
-        */
-       usb_disable_autosuspend(card->udev);
-#endif /* CONFIG_PM */
-
        return 0;
 }
 
@@ -567,13 +558,12 @@ static void mwifiex_usb_disconnect(struct usb_interface *intf)
 }
 
 static struct usb_driver mwifiex_usb_driver = {
-       .name = usbdriver_name,
+       .name = "mwifiex_usb",
        .probe = mwifiex_usb_probe,
        .disconnect = mwifiex_usb_disconnect,
        .id_table = mwifiex_usb_table,
        .suspend = mwifiex_usb_suspend,
        .resume = mwifiex_usb_resume,
-       .supports_autosuspend = 1,
 };
 
 static int mwifiex_usb_tx_init(struct mwifiex_adapter *adapter)
index 13eaeed03898288d43abf107090346d513132820..981cf6e7c73be5b65f4a90bb05ef0e9023c592f4 100644 (file)
@@ -559,7 +559,8 @@ mwifiex_clean_txrx(struct mwifiex_private *priv)
        mwifiex_wmm_delete_all_ralist(priv);
        memcpy(tos_to_tid, ac_to_tid, sizeof(tos_to_tid));
 
-       if (priv->adapter->if_ops.clean_pcie_ring)
+       if (priv->adapter->if_ops.clean_pcie_ring &&
+           !priv->adapter->surprise_removed)
                priv->adapter->if_ops.clean_pcie_ring(priv->adapter);
        spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
 }
index f9daa9e183f216e7114a1d1b604fe4fc6b6d3861..e30d80033cbc4bb4f93d4e84897ff6fb29417a91 100644 (file)
@@ -907,6 +907,7 @@ static int handle_incoming_queue(struct net_device *dev,
 
                /* Ethernet work: Delayed to here as it peeks the header. */
                skb->protocol = eth_type_trans(skb, dev);
+               skb_reset_network_header(skb);
 
                if (checksum_setup(dev, skb)) {
                        kfree_skb(skb);
index be361b7cd30f240e4b1a78abbb3acd253620b035..1e4e69384baaed11ae859ce6a784eef0d8fa0f9c 100644 (file)
@@ -217,7 +217,7 @@ config PINCTRL_IMX28
        select PINCTRL_MXS
 
 config PINCTRL_MSM
-       tristate
+       bool
        select PINMUX
        select PINCONF
        select GENERIC_PINCONF
index 9ccf681dad2f4993cdf3ef20a9099a17ae62dcb9..f9fabe9bf47d433b9152cd438e259e568b3b9ff5 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/clk.h>
 #include <linux/gpio.h>
 #include <linux/irqdomain.h>
+#include <linux/irqchip/chained_irq.h>
 #include <linux/module.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
@@ -584,7 +585,7 @@ static int sunxi_pinctrl_irq_set_type(struct irq_data *d,
        spin_lock_irqsave(&pctl->lock, flags);
 
        regval = readl(pctl->membase + reg);
-       regval &= ~IRQ_CFG_IRQ_MASK;
+       regval &= ~(IRQ_CFG_IRQ_MASK << index);
        writel(regval | (mode << index), pctl->membase + reg);
 
        spin_unlock_irqrestore(&pctl->lock, flags);
@@ -665,6 +666,7 @@ static struct irq_chip sunxi_pinctrl_irq_chip = {
 
 static void sunxi_pinctrl_irq_handler(unsigned irq, struct irq_desc *desc)
 {
+       struct irq_chip *chip = irq_get_chip(irq);
        struct sunxi_pinctrl *pctl = irq_get_handler_data(irq);
        const unsigned long reg = readl(pctl->membase + IRQ_STATUS_REG);
 
@@ -674,10 +676,12 @@ static void sunxi_pinctrl_irq_handler(unsigned irq, struct irq_desc *desc)
        if (reg) {
                int irqoffset;
 
+               chained_irq_enter(chip, desc);
                for_each_set_bit(irqoffset, &reg, SUNXI_IRQ_NUMBER) {
                        int pin_irq = irq_find_mapping(pctl->domain, irqoffset);
                        generic_handle_irq(pin_irq);
                }
+               chained_irq_exit(chip, desc);
        }
 }
 
index 01c494f8a14f0119493d783624b86831549ccdb0..552b0e97077a858b0c1aeda6d9f1a83e4803ae35 100644 (file)
@@ -511,7 +511,7 @@ static inline u32 sunxi_pull_offset(u16 pin)
 
 static inline u32 sunxi_irq_cfg_reg(u16 irq)
 {
-       u8 reg = irq / IRQ_CFG_IRQ_PER_REG;
+       u8 reg = irq / IRQ_CFG_IRQ_PER_REG * 0x04;
        return reg + IRQ_CFG_REG;
 }
 
@@ -523,7 +523,7 @@ static inline u32 sunxi_irq_cfg_offset(u16 irq)
 
 static inline u32 sunxi_irq_ctrl_reg(u16 irq)
 {
-       u8 reg = irq / IRQ_CTRL_IRQ_PER_REG;
+       u8 reg = irq / IRQ_CTRL_IRQ_PER_REG * 0x04;
        return reg + IRQ_CTRL_REG;
 }
 
@@ -535,7 +535,7 @@ static inline u32 sunxi_irq_ctrl_offset(u16 irq)
 
 static inline u32 sunxi_irq_status_reg(u16 irq)
 {
-       u8 reg = irq / IRQ_STATUS_IRQ_PER_REG;
+       u8 reg = irq / IRQ_STATUS_IRQ_PER_REG * 0x04;
        return reg + IRQ_STATUS_REG;
 }
 
index 77d103fe39d90c8ad0bb82485ec1896dd1b5889a..567d6918d50b226b7841c84a98b2343ad552a03e 100644 (file)
@@ -89,7 +89,8 @@ enum {
 
        /* GPSR6 */
        FN_IP13_10, FN_IP13_11, FN_IP13_12, FN_IP13_13, FN_IP13_14,
-       FN_IP13_15, FN_IP13_18_16, FN_IP13_21_19, FN_IP13_22, FN_IP13_24_23,
+       FN_IP13_15, FN_IP13_18_16, FN_IP13_21_19,
+       FN_IP13_22, FN_IP13_24_23, FN_SD1_CLK,
        FN_IP13_25, FN_IP13_26, FN_IP13_27, FN_IP13_30_28, FN_IP14_1_0,
        FN_IP14_2, FN_IP14_3, FN_IP14_4, FN_IP14_5, FN_IP14_6, FN_IP14_7,
        FN_IP14_10_8, FN_IP14_13_11, FN_IP14_16_14, FN_IP14_19_17,
@@ -788,6 +789,7 @@ static const u16 pinmux_data[] = {
        PINMUX_DATA(USB1_PWEN_MARK, FN_USB1_PWEN),
        PINMUX_DATA(USB1_OVC_MARK, FN_USB1_OVC),
        PINMUX_DATA(DU0_DOTCLKIN_MARK, FN_DU0_DOTCLKIN),
+       PINMUX_DATA(SD1_CLK_MARK, FN_SD1_CLK),
 
        /* IPSR0 */
        PINMUX_IPSR_DATA(IP0_0, D0),
@@ -3825,7 +3827,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
                GP_6_11_FN, FN_IP13_25,
                GP_6_10_FN, FN_IP13_24_23,
                GP_6_9_FN, FN_IP13_22,
-               0, 0,
+               GP_6_8_FN, FN_SD1_CLK,
                GP_6_7_FN, FN_IP13_21_19,
                GP_6_6_FN, FN_IP13_18_16,
                GP_6_5_FN, FN_IP13_15,
index a0d6152701cdf3d3aa5a64bbb435f93526d0a324..617a4916b50fc1d8fec129e001f717c00de13880 100644 (file)
@@ -598,7 +598,7 @@ static unsigned int sirfsoc_gpio_irq_startup(struct irq_data *d)
 {
        struct sirfsoc_gpio_bank *bank = irq_data_get_irq_chip_data(d);
 
-       if (gpio_lock_as_irq(&bank->chip.gc, d->hwirq))
+       if (gpio_lock_as_irq(&bank->chip.gc, d->hwirq % SIRFSOC_GPIO_BANK_SIZE))
                dev_err(bank->chip.gc.dev,
                        "unable to lock HW IRQ %lu for IRQ\n",
                        d->hwirq);
@@ -611,7 +611,7 @@ static void sirfsoc_gpio_irq_shutdown(struct irq_data *d)
        struct sirfsoc_gpio_bank *bank = irq_data_get_irq_chip_data(d);
 
        sirfsoc_gpio_irq_mask(d);
-       gpio_unlock_as_irq(&bank->chip.gc, d->hwirq);
+       gpio_unlock_as_irq(&bank->chip.gc, d->hwirq % SIRFSOC_GPIO_BANK_SIZE);
 }
 
 static struct irq_chip sirfsoc_irq_chip = {
index b4b0d83f9ef6437dfb97f33037aaee1acf7bda59..7061ac0ad4287c0d84edb39058ef6abc295e43ce 100644 (file)
@@ -678,6 +678,7 @@ struct tsi721_bdma_chan {
        struct list_head        free_list;
        dma_cookie_t            completed_cookie;
        struct tasklet_struct   tasklet;
+       bool                    active;
 };
 
 #endif /* CONFIG_RAPIDIO_DMA_ENGINE */
index 502663f5f7c65a847f45881dcac4821081a314cf..91245f5dbe81a7d235f13a2dd2edbe741508d584 100644 (file)
@@ -206,8 +206,8 @@ void tsi721_bdma_handler(struct tsi721_bdma_chan *bdma_chan)
 {
        /* Disable BDMA channel interrupts */
        iowrite32(0, bdma_chan->regs + TSI721_DMAC_INTE);
-
-       tasklet_schedule(&bdma_chan->tasklet);
+       if (bdma_chan->active)
+               tasklet_schedule(&bdma_chan->tasklet);
 }
 
 #ifdef CONFIG_PCI_MSI
@@ -562,7 +562,7 @@ static int tsi721_alloc_chan_resources(struct dma_chan *dchan)
        }
 #endif /* CONFIG_PCI_MSI */
 
-       tasklet_enable(&bdma_chan->tasklet);
+       bdma_chan->active = true;
        tsi721_bdma_interrupt_enable(bdma_chan, 1);
 
        return bdma_chan->bd_num - 1;
@@ -576,9 +576,7 @@ err_out:
 static void tsi721_free_chan_resources(struct dma_chan *dchan)
 {
        struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
-#ifdef CONFIG_PCI_MSI
        struct tsi721_device *priv = to_tsi721(dchan->device);
-#endif
        LIST_HEAD(list);
 
        dev_dbg(dchan->device->dev, "%s: Entry\n", __func__);
@@ -589,14 +587,25 @@ static void tsi721_free_chan_resources(struct dma_chan *dchan)
        BUG_ON(!list_empty(&bdma_chan->active_list));
        BUG_ON(!list_empty(&bdma_chan->queue));
 
-       tasklet_disable(&bdma_chan->tasklet);
+       tsi721_bdma_interrupt_enable(bdma_chan, 0);
+       bdma_chan->active = false;
+
+#ifdef CONFIG_PCI_MSI
+       if (priv->flags & TSI721_USING_MSIX) {
+               synchronize_irq(priv->msix[TSI721_VECT_DMA0_DONE +
+                                          bdma_chan->id].vector);
+               synchronize_irq(priv->msix[TSI721_VECT_DMA0_INT +
+                                          bdma_chan->id].vector);
+       } else
+#endif
+       synchronize_irq(priv->pdev->irq);
+
+       tasklet_kill(&bdma_chan->tasklet);
 
        spin_lock_bh(&bdma_chan->lock);
        list_splice_init(&bdma_chan->free_list, &list);
        spin_unlock_bh(&bdma_chan->lock);
 
-       tsi721_bdma_interrupt_enable(bdma_chan, 0);
-
 #ifdef CONFIG_PCI_MSI
        if (priv->flags & TSI721_USING_MSIX) {
                free_irq(priv->msix[TSI721_VECT_DMA0_DONE +
@@ -790,6 +799,7 @@ int tsi721_register_dma(struct tsi721_device *priv)
                bdma_chan->dchan.cookie = 1;
                bdma_chan->dchan.chan_id = i;
                bdma_chan->id = i;
+               bdma_chan->active = false;
 
                spin_lock_init(&bdma_chan->lock);
 
@@ -799,7 +809,6 @@ int tsi721_register_dma(struct tsi721_device *priv)
 
                tasklet_init(&bdma_chan->tasklet, tsi721_dma_tasklet,
                             (unsigned long)bdma_chan);
-               tasklet_disable(&bdma_chan->tasklet);
                list_add_tail(&bdma_chan->dchan.device_node,
                              &mport->dma.channels);
        }
index d1ac4caaf1b05d4b68dd4bd986355dc59263c6fd..afca1bc24f262251abf3352d2e09d6452959805b 100644 (file)
@@ -953,6 +953,8 @@ static int machine_constraints_current(struct regulator_dev *rdev,
        return 0;
 }
 
+static int _regulator_do_enable(struct regulator_dev *rdev);
+
 /**
  * set_machine_constraints - sets regulator constraints
  * @rdev: regulator source
@@ -1013,10 +1015,9 @@ static int set_machine_constraints(struct regulator_dev *rdev,
        /* If the constraints say the regulator should be on at this point
         * and we have control then make sure it is enabled.
         */
-       if ((rdev->constraints->always_on || rdev->constraints->boot_on) &&
-           ops->enable) {
-               ret = ops->enable(rdev);
-               if (ret < 0) {
+       if (rdev->constraints->always_on || rdev->constraints->boot_on) {
+               ret = _regulator_do_enable(rdev);
+               if (ret < 0 && ret != -EINVAL) {
                        rdev_err(rdev, "failed to enable\n");
                        goto out;
                }
@@ -1907,8 +1908,6 @@ static int _regulator_do_disable(struct regulator_dev *rdev)
 
        trace_regulator_disable_complete(rdev_get_name(rdev));
 
-       _notifier_call_chain(rdev, REGULATOR_EVENT_DISABLE,
-                            NULL);
        return 0;
 }
 
@@ -1932,6 +1931,8 @@ static int _regulator_disable(struct regulator_dev *rdev)
                                rdev_err(rdev, "failed to disable\n");
                                return ret;
                        }
+                       _notifier_call_chain(rdev, REGULATOR_EVENT_DISABLE,
+                                       NULL);
                }
 
                rdev->use_count = 0;
@@ -1984,20 +1985,16 @@ static int _regulator_force_disable(struct regulator_dev *rdev)
 {
        int ret = 0;
 
-       /* force disable */
-       if (rdev->desc->ops->disable) {
-               /* ah well, who wants to live forever... */
-               ret = rdev->desc->ops->disable(rdev);
-               if (ret < 0) {
-                       rdev_err(rdev, "failed to force disable\n");
-                       return ret;
-               }
-               /* notify other consumers that power has been forced off */
-               _notifier_call_chain(rdev, REGULATOR_EVENT_FORCE_DISABLE |
-                       REGULATOR_EVENT_DISABLE, NULL);
+       ret = _regulator_do_disable(rdev);
+       if (ret < 0) {
+               rdev_err(rdev, "failed to force disable\n");
+               return ret;
        }
 
-       return ret;
+       _notifier_call_chain(rdev, REGULATOR_EVENT_FORCE_DISABLE |
+                       REGULATOR_EVENT_DISABLE, NULL);
+
+       return 0;
 }
 
 /**
@@ -3630,23 +3627,18 @@ int regulator_suspend_finish(void)
 
        mutex_lock(&regulator_list_mutex);
        list_for_each_entry(rdev, &regulator_list, list) {
-               struct regulator_ops *ops = rdev->desc->ops;
-
                mutex_lock(&rdev->mutex);
-               if ((rdev->use_count > 0  || rdev->constraints->always_on) &&
-                               ops->enable) {
-                       error = ops->enable(rdev);
+               if (rdev->use_count > 0  || rdev->constraints->always_on) {
+                       error = _regulator_do_enable(rdev);
                        if (error)
                                ret = error;
                } else {
                        if (!have_full_constraints())
                                goto unlock;
-                       if (!ops->disable)
-                               goto unlock;
                        if (!_regulator_is_enabled(rdev))
                                goto unlock;
 
-                       error = ops->disable(rdev);
+                       error = _regulator_do_disable(rdev);
                        if (error)
                                ret = error;
                }
@@ -3820,7 +3812,7 @@ static int __init regulator_init_complete(void)
                ops = rdev->desc->ops;
                c = rdev->constraints;
 
-               if (!ops->disable || (c && c->always_on))
+               if (c && c->always_on)
                        continue;
 
                mutex_lock(&rdev->mutex);
@@ -3841,7 +3833,7 @@ static int __init regulator_init_complete(void)
                        /* We log since this may kill the system if it
                         * goes wrong. */
                        rdev_info(rdev, "disabling\n");
-                       ret = ops->disable(rdev);
+                       ret = _regulator_do_disable(rdev);
                        if (ret != 0)
                                rdev_err(rdev, "couldn't disable: %d\n", ret);
                } else {
index 7afd373b9595ff68b16238fc37688102dbaa04f4..c4cde9c08f1ffa9ed5c7c1d91a10b745ef7fdb3e 100644 (file)
@@ -580,10 +580,12 @@ static int s3c_rtc_suspend(struct device *dev)
 
        clk_enable(rtc_clk);
        /* save TICNT for anyone using periodic interrupts */
-       ticnt_save = readb(s3c_rtc_base + S3C2410_TICNT);
        if (s3c_rtc_cpu_type == TYPE_S3C64XX) {
                ticnt_en_save = readw(s3c_rtc_base + S3C2410_RTCCON);
                ticnt_en_save &= S3C64XX_RTCCON_TICEN;
+               ticnt_save = readl(s3c_rtc_base + S3C2410_TICNT);
+       } else {
+               ticnt_save = readb(s3c_rtc_base + S3C2410_TICNT);
        }
        s3c_rtc_enable(pdev, 0);
 
@@ -605,10 +607,15 @@ static int s3c_rtc_resume(struct device *dev)
 
        clk_enable(rtc_clk);
        s3c_rtc_enable(pdev, 1);
-       writeb(ticnt_save, s3c_rtc_base + S3C2410_TICNT);
-       if (s3c_rtc_cpu_type == TYPE_S3C64XX && ticnt_en_save) {
-               tmp = readw(s3c_rtc_base + S3C2410_RTCCON);
-               writew(tmp | ticnt_en_save, s3c_rtc_base + S3C2410_RTCCON);
+       if (s3c_rtc_cpu_type == TYPE_S3C64XX) {
+               writel(ticnt_save, s3c_rtc_base + S3C2410_TICNT);
+               if (ticnt_en_save) {
+                       tmp = readw(s3c_rtc_base + S3C2410_RTCCON);
+                       writew(tmp | ticnt_en_save,
+                                       s3c_rtc_base + S3C2410_RTCCON);
+               }
+       } else {
+               writeb(ticnt_save, s3c_rtc_base + S3C2410_TICNT);
        }
 
        if (device_may_wakeup(dev) && wake_en) {
index c3a83df07894e51195d914111c15be2c615f09c1..795ed61a549632adc16b808622f75c911f66634d 100644 (file)
@@ -1660,7 +1660,6 @@ int qeth_qdio_clear_card(struct qeth_card *card, int use_halt)
                                QDIO_FLAG_CLEANUP_USING_CLEAR);
                if (rc)
                        QETH_CARD_TEXT_(card, 3, "1err%d", rc);
-               qdio_free(CARD_DDEV(card));
                atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
                break;
        case QETH_QDIO_CLEANING:
@@ -2605,6 +2604,7 @@ static int qeth_mpc_initialize(struct qeth_card *card)
        return 0;
 out_qdio:
        qeth_qdio_clear_card(card, card->info.type != QETH_CARD_TYPE_IQD);
+       qdio_free(CARD_DDEV(card));
        return rc;
 }
 
@@ -4906,9 +4906,11 @@ retry:
        if (retries < 3)
                QETH_DBF_MESSAGE(2, "%s Retrying to do IDX activates.\n",
                        dev_name(&card->gdev->dev));
+       rc = qeth_qdio_clear_card(card, card->info.type != QETH_CARD_TYPE_IQD);
        ccw_device_set_offline(CARD_DDEV(card));
        ccw_device_set_offline(CARD_WDEV(card));
        ccw_device_set_offline(CARD_RDEV(card));
+       qdio_free(CARD_DDEV(card));
        rc = ccw_device_set_online(CARD_RDEV(card));
        if (rc)
                goto retriable;
@@ -4918,7 +4920,6 @@ retry:
        rc = ccw_device_set_online(CARD_DDEV(card));
        if (rc)
                goto retriable;
-       rc = qeth_qdio_clear_card(card, card->info.type != QETH_CARD_TYPE_IQD);
 retriable:
        if (rc == -ERESTARTSYS) {
                QETH_DBF_TEXT(SETUP, 2, "break1");
index 0710550093ce6ac5fea8ddbf80f8409bbc1186f6..908d82529ee9c3e04a42caf92b9e0787836ef7d0 100644 (file)
@@ -1091,6 +1091,7 @@ out_remove:
        ccw_device_set_offline(CARD_DDEV(card));
        ccw_device_set_offline(CARD_WDEV(card));
        ccw_device_set_offline(CARD_RDEV(card));
+       qdio_free(CARD_DDEV(card));
        if (recover_flag == CARD_STATE_RECOVER)
                card->state = CARD_STATE_RECOVER;
        else
@@ -1132,6 +1133,7 @@ static int __qeth_l2_set_offline(struct ccwgroup_device *cgdev,
                rc = (rc2) ? rc2 : rc3;
        if (rc)
                QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
+       qdio_free(CARD_DDEV(card));
        if (recover_flag == CARD_STATE_UP)
                card->state = CARD_STATE_RECOVER;
        /* let user_space know that device is offline */
@@ -1194,6 +1196,7 @@ static void qeth_l2_shutdown(struct ccwgroup_device *gdev)
                qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
        qeth_qdio_clear_card(card, 0);
        qeth_clear_qdio_buffers(card);
+       qdio_free(CARD_DDEV(card));
 }
 
 static int qeth_l2_pm_suspend(struct ccwgroup_device *gdev)
index 0f430424c3b8b0aec231c3e2ca69d739c0c29701..3524d34ff694c273afefc7d85bfa17b6bce38af9 100644 (file)
@@ -3447,6 +3447,7 @@ out_remove:
        ccw_device_set_offline(CARD_DDEV(card));
        ccw_device_set_offline(CARD_WDEV(card));
        ccw_device_set_offline(CARD_RDEV(card));
+       qdio_free(CARD_DDEV(card));
        if (recover_flag == CARD_STATE_RECOVER)
                card->state = CARD_STATE_RECOVER;
        else
@@ -3493,6 +3494,7 @@ static int __qeth_l3_set_offline(struct ccwgroup_device *cgdev,
                rc = (rc2) ? rc2 : rc3;
        if (rc)
                QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
+       qdio_free(CARD_DDEV(card));
        if (recover_flag == CARD_STATE_UP)
                card->state = CARD_STATE_RECOVER;
        /* let user_space know that device is offline */
@@ -3545,6 +3547,7 @@ static void qeth_l3_shutdown(struct ccwgroup_device *gdev)
                qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
        qeth_qdio_clear_card(card, 0);
        qeth_clear_qdio_buffers(card);
+       qdio_free(CARD_DDEV(card));
 }
 
 static int qeth_l3_pm_suspend(struct ccwgroup_device *gdev)
index 31534b51715aa95e29664cc80d44d71b5131d0ab..c3b2fb9b6713c997b1338d5df13470e0227a5d93 100644 (file)
@@ -132,9 +132,9 @@ static int ath79_spi_setup_cs(struct spi_device *spi)
 
                flags = GPIOF_DIR_OUT;
                if (spi->mode & SPI_CS_HIGH)
-                       flags |= GPIOF_INIT_HIGH;
-               else
                        flags |= GPIOF_INIT_LOW;
+               else
+                       flags |= GPIOF_INIT_HIGH;
 
                status = gpio_request_one(cdata->gpio, flags,
                                          dev_name(&spi->dev));
index b0842f75101647616ada3a3db4f545a57b519be0..5d7b07f083266e64ab0d91436c10ae89a9ec112c 100644 (file)
@@ -1455,6 +1455,14 @@ static int atmel_spi_suspend(struct device *dev)
 {
        struct spi_master       *master = dev_get_drvdata(dev);
        struct atmel_spi        *as = spi_master_get_devdata(master);
+       int ret;
+
+       /* Stop the queue running */
+       ret = spi_master_suspend(master);
+       if (ret) {
+               dev_warn(dev, "cannot suspend master\n");
+               return ret;
+       }
 
        clk_disable_unprepare(as->clk);
        return 0;
@@ -1464,9 +1472,16 @@ static int atmel_spi_resume(struct device *dev)
 {
        struct spi_master       *master = dev_get_drvdata(dev);
        struct atmel_spi        *as = spi_master_get_devdata(master);
+       int ret;
 
        clk_prepare_enable(as->clk);
-       return 0;
+
+       /* Start the queue running */
+       ret = spi_master_resume(master);
+       if (ret)
+               dev_err(dev, "problem starting queue (%d)\n", ret);
+
+       return ret;
 }
 
 static SIMPLE_DEV_PM_OPS(atmel_spi_pm_ops, atmel_spi_suspend, atmel_spi_resume);
index cabed8f9119e1af30d80766420d548b94567c91c..28ae470397a9e0a7c77c88f5370327e73df01a67 100644 (file)
@@ -514,7 +514,8 @@ static int mcfqspi_resume(struct device *dev)
 #ifdef CONFIG_PM_RUNTIME
 static int mcfqspi_runtime_suspend(struct device *dev)
 {
-       struct mcfqspi *mcfqspi = dev_get_drvdata(dev);
+       struct spi_master *master = dev_get_drvdata(dev);
+       struct mcfqspi *mcfqspi = spi_master_get_devdata(master);
 
        clk_disable(mcfqspi->clk);
 
@@ -523,7 +524,8 @@ static int mcfqspi_runtime_suspend(struct device *dev)
 
 static int mcfqspi_runtime_resume(struct device *dev)
 {
-       struct mcfqspi *mcfqspi = dev_get_drvdata(dev);
+       struct spi_master *master = dev_get_drvdata(dev);
+       struct mcfqspi *mcfqspi = spi_master_get_devdata(master);
 
        clk_enable(mcfqspi->clk);
 
index ec79f726672a14f7644810101afe59e82322976c..a25392065d9b838ef852e93d09e53d0d121ac485 100644 (file)
@@ -420,7 +420,6 @@ static int dspi_suspend(struct device *dev)
 
 static int dspi_resume(struct device *dev)
 {
-
        struct spi_master *master = dev_get_drvdata(dev);
        struct fsl_dspi *dspi = spi_master_get_devdata(master);
 
@@ -504,7 +503,7 @@ static int dspi_probe(struct platform_device *pdev)
        clk_prepare_enable(dspi->clk);
 
        init_waitqueue_head(&dspi->waitq);
-       platform_set_drvdata(pdev, dspi);
+       platform_set_drvdata(pdev, master);
 
        ret = spi_bitbang_start(&dspi->bitbang);
        if (ret != 0) {
@@ -525,7 +524,8 @@ out_master_put:
 
 static int dspi_remove(struct platform_device *pdev)
 {
-       struct fsl_dspi *dspi = platform_get_drvdata(pdev);
+       struct spi_master *master = platform_get_drvdata(pdev);
+       struct fsl_dspi *dspi = spi_master_get_devdata(master);
 
        /* Disconnect from the SPI framework */
        spi_bitbang_stop(&dspi->bitbang);
index a5474ef9d2a0cf4ad7367ee1e0399fae2e982145..47f15d97e7fa41efeede0bce941d4584a0c07be5 100644 (file)
@@ -948,8 +948,8 @@ static int spi_imx_remove(struct platform_device *pdev)
        spi_bitbang_stop(&spi_imx->bitbang);
 
        writel(0, spi_imx->base + MXC_CSPICTRL);
-       clk_disable_unprepare(spi_imx->clk_ipg);
-       clk_disable_unprepare(spi_imx->clk_per);
+       clk_unprepare(spi_imx->clk_ipg);
+       clk_unprepare(spi_imx->clk_per);
        spi_master_put(master);
 
        return 0;
index 2e7f38c7a9610b11f63fd36c816f311b3372d66d..88eb57e858b379ac258abad703bf532b60852669 100644 (file)
@@ -915,7 +915,7 @@ static void pch_spi_request_dma(struct pch_spi_data *data, int bpw)
        /* Set Tx DMA */
        param = &dma->param_tx;
        param->dma_dev = &dma_dev->dev;
-       param->chan_id = data->master->bus_num * 2; /* Tx = 0, 2 */
+       param->chan_id = data->ch * 2; /* Tx = 0, 2 */;
        param->tx_reg = data->io_base_addr + PCH_SPDWR;
        param->width = width;
        chan = dma_request_channel(mask, pch_spi_filter, param);
@@ -930,7 +930,7 @@ static void pch_spi_request_dma(struct pch_spi_data *data, int bpw)
        /* Set Rx DMA */
        param = &dma->param_rx;
        param->dma_dev = &dma_dev->dev;
-       param->chan_id = data->master->bus_num * 2 + 1; /* Rx = Tx + 1 */
+       param->chan_id = data->ch * 2 + 1; /* Rx = Tx + 1 */;
        param->rx_reg = data->io_base_addr + PCH_SPDRR;
        param->width = width;
        chan = dma_request_channel(mask, pch_spi_filter, param);
@@ -1452,6 +1452,11 @@ static int pch_spi_pd_probe(struct platform_device *plat_dev)
 
        pch_spi_set_master_mode(master);
 
+       if (use_dma) {
+               dev_info(&plat_dev->dev, "Use DMA for data transfers\n");
+               pch_alloc_dma_buf(board_dat, data);
+       }
+
        ret = spi_register_master(master);
        if (ret != 0) {
                dev_err(&plat_dev->dev,
@@ -1459,14 +1464,10 @@ static int pch_spi_pd_probe(struct platform_device *plat_dev)
                goto err_spi_register_master;
        }
 
-       if (use_dma) {
-               dev_info(&plat_dev->dev, "Use DMA for data transfers\n");
-               pch_alloc_dma_buf(board_dat, data);
-       }
-
        return 0;
 
 err_spi_register_master:
+       pch_free_dma_buf(board_dat, data);
        free_irq(board_dat->pdev->irq, data);
 err_request_irq:
        pch_spi_free_resources(board_dat, data);
index 7f1a7ce4b771a791cdf636906fd5fcdcc16c9ba6..b83ec378d04f8f1ed5fbc820429c6f1657b4d7a0 100644 (file)
@@ -785,7 +785,7 @@ static void iscsit_ack_from_expstatsn(struct iscsi_conn *conn, u32 exp_statsn)
        spin_unlock_bh(&conn->cmd_lock);
 
        list_for_each_entry_safe(cmd, cmd_p, &ack_list, i_conn_node) {
-               list_del(&cmd->i_conn_node);
+               list_del_init(&cmd->i_conn_node);
                iscsit_free_cmd(cmd, false);
        }
 }
@@ -3708,7 +3708,7 @@ iscsit_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state
                break;
        case ISTATE_REMOVE:
                spin_lock_bh(&conn->cmd_lock);
-               list_del(&cmd->i_conn_node);
+               list_del_init(&cmd->i_conn_node);
                spin_unlock_bh(&conn->cmd_lock);
 
                iscsit_free_cmd(cmd, false);
@@ -4151,7 +4151,7 @@ static void iscsit_release_commands_from_conn(struct iscsi_conn *conn)
        spin_lock_bh(&conn->cmd_lock);
        list_for_each_entry_safe(cmd, cmd_tmp, &conn->conn_cmd_list, i_conn_node) {
 
-               list_del(&cmd->i_conn_node);
+               list_del_init(&cmd->i_conn_node);
                spin_unlock_bh(&conn->cmd_lock);
 
                iscsit_increment_maxcmdsn(cmd, sess);
@@ -4196,6 +4196,10 @@ int iscsit_close_connection(
        iscsit_stop_timers_for_cmds(conn);
        iscsit_stop_nopin_response_timer(conn);
        iscsit_stop_nopin_timer(conn);
+
+       if (conn->conn_transport->iscsit_wait_conn)
+               conn->conn_transport->iscsit_wait_conn(conn);
+
        iscsit_free_queue_reqs_for_conn(conn);
 
        /*
index 33be1fb1df32f2850b6ceb1d54b3b5340a2b6bb1..4ca8fd2a70db4c05597f6b4bfbac171ce58ea8c7 100644 (file)
@@ -138,7 +138,7 @@ void iscsit_free_connection_recovery_entires(struct iscsi_session *sess)
                list_for_each_entry_safe(cmd, cmd_tmp,
                                &cr->conn_recovery_cmd_list, i_conn_node) {
 
-                       list_del(&cmd->i_conn_node);
+                       list_del_init(&cmd->i_conn_node);
                        cmd->conn = NULL;
                        spin_unlock(&cr->conn_recovery_cmd_lock);
                        iscsit_free_cmd(cmd, true);
@@ -160,7 +160,7 @@ void iscsit_free_connection_recovery_entires(struct iscsi_session *sess)
                list_for_each_entry_safe(cmd, cmd_tmp,
                                &cr->conn_recovery_cmd_list, i_conn_node) {
 
-                       list_del(&cmd->i_conn_node);
+                       list_del_init(&cmd->i_conn_node);
                        cmd->conn = NULL;
                        spin_unlock(&cr->conn_recovery_cmd_lock);
                        iscsit_free_cmd(cmd, true);
@@ -216,7 +216,7 @@ int iscsit_remove_cmd_from_connection_recovery(
        }
        cr = cmd->cr;
 
-       list_del(&cmd->i_conn_node);
+       list_del_init(&cmd->i_conn_node);
        return --cr->cmd_count;
 }
 
@@ -297,7 +297,7 @@ int iscsit_discard_unacknowledged_ooo_cmdsns_for_conn(struct iscsi_conn *conn)
                if (!(cmd->cmd_flags & ICF_OOO_CMDSN))
                        continue;
 
-               list_del(&cmd->i_conn_node);
+               list_del_init(&cmd->i_conn_node);
 
                spin_unlock_bh(&conn->cmd_lock);
                iscsit_free_cmd(cmd, true);
@@ -335,7 +335,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn)
        /*
         * Only perform connection recovery on ISCSI_OP_SCSI_CMD or
         * ISCSI_OP_NOOP_OUT opcodes.  For all other opcodes call
-        * list_del(&cmd->i_conn_node); to release the command to the
+        * list_del_init(&cmd->i_conn_node); to release the command to the
         * session pool and remove it from the connection's list.
         *
         * Also stop the DataOUT timer, which will be restarted after
@@ -351,7 +351,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn)
                                " CID: %hu\n", cmd->iscsi_opcode,
                                cmd->init_task_tag, cmd->cmd_sn, conn->cid);
 
-                       list_del(&cmd->i_conn_node);
+                       list_del_init(&cmd->i_conn_node);
                        spin_unlock_bh(&conn->cmd_lock);
                        iscsit_free_cmd(cmd, true);
                        spin_lock_bh(&conn->cmd_lock);
@@ -371,7 +371,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn)
                 */
                if (!(cmd->cmd_flags & ICF_OOO_CMDSN) && !cmd->immediate_cmd &&
                     iscsi_sna_gte(cmd->cmd_sn, conn->sess->exp_cmd_sn)) {
-                       list_del(&cmd->i_conn_node);
+                       list_del_init(&cmd->i_conn_node);
                        spin_unlock_bh(&conn->cmd_lock);
                        iscsit_free_cmd(cmd, true);
                        spin_lock_bh(&conn->cmd_lock);
@@ -393,7 +393,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn)
 
                cmd->sess = conn->sess;
 
-               list_del(&cmd->i_conn_node);
+               list_del_init(&cmd->i_conn_node);
                spin_unlock_bh(&conn->cmd_lock);
 
                iscsit_free_all_datain_reqs(cmd);
index 39761837608d3a2ff059356e1049779580182952..44a5471de00ffe95c5fccdf5462931efa0acc361 100644 (file)
@@ -137,7 +137,7 @@ struct iscsi_portal_group *iscsit_get_tpg_from_np(
        list_for_each_entry(tpg, &tiqn->tiqn_tpg_list, tpg_list) {
 
                spin_lock(&tpg->tpg_state_lock);
-               if (tpg->tpg_state == TPG_STATE_FREE) {
+               if (tpg->tpg_state != TPG_STATE_ACTIVE) {
                        spin_unlock(&tpg->tpg_state_lock);
                        continue;
                }
index 42f18fc1067b63cd8c539959ae43e2548c53c34b..77e6531fb0a1c0a25ed16b71d1936ec37c2ed50a 100644 (file)
@@ -1079,25 +1079,31 @@ sbc_dif_copy_prot(struct se_cmd *cmd, unsigned int sectors, bool read,
        left = sectors * dev->prot_length;
 
        for_each_sg(cmd->t_prot_sg, psg, cmd->t_prot_nents, i) {
-
-               len = min(psg->length, left);
-               if (offset >= sg->length) {
-                       sg = sg_next(sg);
-                       offset = 0;
-               }
+               unsigned int psg_len, copied = 0;
 
                paddr = kmap_atomic(sg_page(psg)) + psg->offset;
-               addr = kmap_atomic(sg_page(sg)) + sg->offset + offset;
-
-               if (read)
-                       memcpy(paddr, addr, len);
-               else
-                       memcpy(addr, paddr, len);
-
-               left -= len;
-               offset += len;
+               psg_len = min(left, psg->length);
+               while (psg_len) {
+                       len = min(psg_len, sg->length - offset);
+                       addr = kmap_atomic(sg_page(sg)) + sg->offset + offset;
+
+                       if (read)
+                               memcpy(paddr + copied, addr, len);
+                       else
+                               memcpy(addr, paddr + copied, len);
+
+                       left -= len;
+                       offset += len;
+                       copied += len;
+                       psg_len -= len;
+
+                       if (offset >= sg->length) {
+                               sg = sg_next(sg);
+                               offset = 0;
+                       }
+                       kunmap_atomic(addr);
+               }
                kunmap_atomic(paddr);
-               kunmap_atomic(addr);
        }
 }
 
index 35c066489a19ecd1def35f044e45dc52d55688a1..5f88d767671e956fa400b039f2cc21de9bc58621 100644 (file)
@@ -136,6 +136,7 @@ config SPEAR_THERMAL
 config RCAR_THERMAL
        tristate "Renesas R-Car thermal driver"
        depends on ARCH_SHMOBILE || COMPILE_TEST
+       depends on HAS_IOMEM
        help
          Enable this to plug the R-Car thermal sensor driver into the Linux
          thermal framework.
@@ -210,8 +211,16 @@ config ACPI_INT3403_THERMAL
        tristate "ACPI INT3403 thermal driver"
        depends on X86 && ACPI
        help
-         This driver uses ACPI INT3403 device objects. If present, it will
-         register each INT3403 thermal sensor as a thermal zone.
+         Newer laptops and tablets that use ACPI may have thermal sensors
+         outside the core CPU/SOC for thermal safety reasons. These
+         temperature sensors are also exposed for the OS to use via the so
+         called INT3403 ACPI object. This driver will, on devices that have
+         such sensors, expose the temperature information from these sensors
+         to userspace via the normal thermal framework. This means that a wide
+         range of applications and GUI widgets can show this information to
+         the user or use this information for making decisions. For example,
+         the Intel Thermal Daemon can use this information to allow the user
+         to select his laptop to run without turning on the fans.
 
 menu "Texas Instruments thermal drivers"
 source "drivers/thermal/ti-soc-thermal/Kconfig"
index 338a88bf6662d1fd0b963f0f8bc7c02dce55e73b..71b0ec0c370d73aad18d685118c197735c5b2833 100644 (file)
@@ -56,10 +56,15 @@ static LIST_HEAD(thermal_governor_list);
 static DEFINE_MUTEX(thermal_list_lock);
 static DEFINE_MUTEX(thermal_governor_lock);
 
+static struct thermal_governor *def_governor;
+
 static struct thermal_governor *__find_governor(const char *name)
 {
        struct thermal_governor *pos;
 
+       if (!name || !name[0])
+               return def_governor;
+
        list_for_each_entry(pos, &thermal_governor_list, governor_list)
                if (!strnicmp(name, pos->name, THERMAL_NAME_LENGTH))
                        return pos;
@@ -82,17 +87,23 @@ int thermal_register_governor(struct thermal_governor *governor)
        if (__find_governor(governor->name) == NULL) {
                err = 0;
                list_add(&governor->governor_list, &thermal_governor_list);
+               if (!def_governor && !strncmp(governor->name,
+                       DEFAULT_THERMAL_GOVERNOR, THERMAL_NAME_LENGTH))
+                       def_governor = governor;
        }
 
        mutex_lock(&thermal_list_lock);
 
        list_for_each_entry(pos, &thermal_tz_list, node) {
+               /*
+                * only thermal zones with specified tz->tzp->governor_name
+                * may run with tz->govenor unset
+                */
                if (pos->governor)
                        continue;
-               if (pos->tzp)
-                       name = pos->tzp->governor_name;
-               else
-                       name = DEFAULT_THERMAL_GOVERNOR;
+
+               name = pos->tzp->governor_name;
+
                if (!strnicmp(name, governor->name, THERMAL_NAME_LENGTH))
                        pos->governor = governor;
        }
@@ -342,8 +353,8 @@ static void monitor_thermal_zone(struct thermal_zone_device *tz)
 static void handle_non_critical_trips(struct thermal_zone_device *tz,
                        int trip, enum thermal_trip_type trip_type)
 {
-       if (tz->governor)
-               tz->governor->throttle(tz, trip);
+       tz->governor ? tz->governor->throttle(tz, trip) :
+                      def_governor->throttle(tz, trip);
 }
 
 static void handle_critical_trips(struct thermal_zone_device *tz,
@@ -1107,7 +1118,7 @@ __thermal_cooling_device_register(struct device_node *np,
        INIT_LIST_HEAD(&cdev->thermal_instances);
        cdev->np = np;
        cdev->ops = ops;
-       cdev->updated = true;
+       cdev->updated = false;
        cdev->device.class = &thermal_class;
        cdev->devdata = devdata;
        dev_set_name(&cdev->device, "cooling_device%d", cdev->id);
@@ -1533,7 +1544,7 @@ struct thermal_zone_device *thermal_zone_device_register(const char *type,
        if (tz->tzp)
                tz->governor = __find_governor(tz->tzp->governor_name);
        else
-               tz->governor = __find_governor(DEFAULT_THERMAL_GOVERNOR);
+               tz->governor = def_governor;
 
        mutex_unlock(&thermal_governor_lock);
 
index 972e1c73722a4c4ec90cd75af32ce99da6438745..081fd7e6a9f070c683deaea799437a2e459b94f4 100644 (file)
@@ -68,6 +68,10 @@ struct phy_dev_entry {
        struct thermal_zone_device *tzone;
 };
 
+static const struct thermal_zone_params pkg_temp_tz_params = {
+       .no_hwmon       = true,
+};
+
 /* List maintaining number of package instances */
 static LIST_HEAD(phy_dev_list);
 static DEFINE_MUTEX(phy_dev_list_mutex);
@@ -394,7 +398,6 @@ static int pkg_temp_thermal_device_add(unsigned int cpu)
        int err;
        u32 tj_max;
        struct phy_dev_entry *phy_dev_entry;
-       char buffer[30];
        int thres_count;
        u32 eax, ebx, ecx, edx;
        u8 *temp;
@@ -440,13 +443,11 @@ static int pkg_temp_thermal_device_add(unsigned int cpu)
        phy_dev_entry->first_cpu = cpu;
        phy_dev_entry->tj_max = tj_max;
        phy_dev_entry->ref_cnt = 1;
-       snprintf(buffer, sizeof(buffer), "pkg-temp-%d\n",
-                                       phy_dev_entry->phys_proc_id);
-       phy_dev_entry->tzone = thermal_zone_device_register(buffer,
+       phy_dev_entry->tzone = thermal_zone_device_register("x86_pkg_temp",
                        thres_count,
                        (thres_count == MAX_NUMBER_OF_TRIPS) ?
                                0x03 : 0x01,
-                       phy_dev_entry, &tzone_ops, NULL, 0, 0);
+                       phy_dev_entry, &tzone_ops, &pkg_temp_tz_params, 0, 0);
        if (IS_ERR(phy_dev_entry->tzone)) {
                err = PTR_ERR(phy_dev_entry->tzone);
                goto err_ret_free;
index 4fb7a8f83c8a99ff8d3412a5328b2407c98409a8..54af4e93369583e5629a4bac6d1ecdbd74f885cf 100644 (file)
@@ -186,12 +186,12 @@ static bool is_invalid_reserved_pfn(unsigned long pfn)
        if (pfn_valid(pfn)) {
                bool reserved;
                struct page *tail = pfn_to_page(pfn);
-               struct page *head = compound_trans_head(tail);
+               struct page *head = compound_head(tail);
                reserved = !!(PageReserved(head));
                if (head != tail) {
                        /*
                         * "head" is not a dangling pointer
-                        * (compound_trans_head takes care of that)
+                        * (compound_head takes care of that)
                         * but the hugepage may have been split
                         * from under us (and we may not hold a
                         * reference count on the head page so it can
index 0129b78a69086b3ba2d53f24ab6d54b23faf6862..4f70f383132cc9dfe143c570fbb9759967fefc6f 100644 (file)
@@ -458,11 +458,10 @@ static int bio_integrity_verify(struct bio *bio)
        struct blk_integrity_exchg bix;
        struct bio_vec *bv;
        sector_t sector = bio->bi_integrity->bip_iter.bi_sector;
-       unsigned int sectors, total, ret;
+       unsigned int sectors, ret = 0;
        void *prot_buf = bio->bi_integrity->bip_buf;
        int i;
 
-       ret = total = 0;
        bix.disk_name = bio->bi_bdev->bd_disk->disk_name;
        bix.sector_size = bi->sector_size;
 
@@ -484,8 +483,6 @@ static int bio_integrity_verify(struct bio *bio)
                sectors = bv->bv_len / bi->sector_size;
                sector += sectors;
                prot_buf += sectors * bi->tuple_size;
-               total += sectors * bi->tuple_size;
-               BUG_ON(total > bio->bi_integrity->bip_iter.bi_size);
 
                kunmap_atomic(kaddr);
        }
index 968eab5bc1f5623f09169c65f178518dbd87ee2b..68537e8b7a0934859048ecd4cfb0e67cdcb6b819 100644 (file)
@@ -75,7 +75,7 @@ int hfsplus_parse_options_remount(char *input, int *force)
        int token;
 
        if (!input)
-               return 0;
+               return 1;
 
        while ((p = strsep(&input, ",")) != NULL) {
                if (!*p)
index aaa50611ec66c27f2b6cd9a67a039a05e0253787..d7b5108789e2e7d8a26049b7dc6a01f8a0ff2d6f 100644 (file)
@@ -717,6 +717,12 @@ static int ocfs2_release_dquot(struct dquot *dquot)
         */
        if (status < 0)
                mlog_errno(status);
+       /*
+        * Clear dq_off so that we search for the structure in quota file next
+        * time we acquire it. The structure might be deleted and reallocated
+        * elsewhere by another node while our dquot structure is on freelist.
+        */
+       dquot->dq_off = 0;
        clear_bit(DQ_ACTIVE_B, &dquot->dq_flags);
 out_trans:
        ocfs2_commit_trans(osb, handle);
@@ -756,16 +762,17 @@ static int ocfs2_acquire_dquot(struct dquot *dquot)
        status = ocfs2_lock_global_qf(info, 1);
        if (status < 0)
                goto out;
-       if (!test_bit(DQ_READ_B, &dquot->dq_flags)) {
-               status = ocfs2_qinfo_lock(info, 0);
-               if (status < 0)
-                       goto out_dq;
-               status = qtree_read_dquot(&info->dqi_gi, dquot);
-               ocfs2_qinfo_unlock(info, 0);
-               if (status < 0)
-                       goto out_dq;
-       }
-       set_bit(DQ_READ_B, &dquot->dq_flags);
+       status = ocfs2_qinfo_lock(info, 0);
+       if (status < 0)
+               goto out_dq;
+       /*
+        * We always want to read dquot structure from disk because we don't
+        * know what happened with it while it was on freelist.
+        */
+       status = qtree_read_dquot(&info->dqi_gi, dquot);
+       ocfs2_qinfo_unlock(info, 0);
+       if (status < 0)
+               goto out_dq;
 
        OCFS2_DQUOT(dquot)->dq_use_count++;
        OCFS2_DQUOT(dquot)->dq_origspace = dquot->dq_dqb.dqb_curspace;
index 2e4344be3b962b40a6cd4e5743a87f088b7abe63..2001862bf2b1cfbb20ed78bd9febeacbc96b42a4 100644 (file)
@@ -1303,10 +1303,6 @@ int ocfs2_local_release_dquot(handle_t *handle, struct dquot *dquot)
        ocfs2_journal_dirty(handle, od->dq_chunk->qc_headerbh);
 
 out:
-       /* Clear the read bit so that next time someone uses this
-        * dquot he reads fresh info from disk and allocates local
-        * dquot structure */
-       clear_bit(DQ_READ_B, &dquot->dq_flags);
        return status;
 }
 
index 02174a610315ebb218880a8744e05b2244b6d26f..e647c55275d9ff9f96b92ac859e65c10fc03d318 100644 (file)
@@ -121,9 +121,8 @@ u64 stable_page_flags(struct page *page)
         * just checks PG_head/PG_tail, so we need to check PageLRU/PageAnon
         * to make sure a given page is a thp, not a non-huge compound page.
         */
-       else if (PageTransCompound(page) &&
-                (PageLRU(compound_trans_head(page)) ||
-                 PageAnon(compound_trans_head(page))))
+       else if (PageTransCompound(page) && (PageLRU(compound_head(page)) ||
+                                            PageAnon(compound_head(page))))
                u |= 1 << KPF_THP;
 
        /*
index a1116a3b54ef106b610e74cc54ac0326b76805f6..8c1603b10665d141a2b0cb67e2643c0db7452146 100644 (file)
 #define TEGRA124_CLK_PWM 17
 #define TEGRA124_CLK_I2S2 18
 /* 20 (register bit affects vi and vi_sensor) */
-#define TEGRA124_CLK_GR_2D 21
+/* 21 */
 #define TEGRA124_CLK_USBD 22
 #define TEGRA124_CLK_ISP 23
-#define TEGRA124_CLK_GR_3D 24
+/* 26 */
 /* 25 */
 #define TEGRA124_CLK_DISP2 26
 #define TEGRA124_CLK_DISP1 27
index 18ba8a627f46e0fd77a97aa0257940d845f7f8a4..2ff2e8d982bea9b689050a2802fba5ec29e44b81 100644 (file)
@@ -121,8 +121,7 @@ void blk_mq_init_commands(struct request_queue *, void (*init)(void *data, struc
 
 void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule);
 
-void blk_mq_insert_request(struct request_queue *, struct request *,
-               bool, bool);
+void blk_mq_insert_request(struct request *, bool, bool, bool);
 void blk_mq_run_queues(struct request_queue *q, bool async);
 void blk_mq_free_request(struct request *rq);
 bool blk_mq_can_queue(struct blk_mq_hw_ctx *);
@@ -134,7 +133,13 @@ struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_ind
 struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_reg *, unsigned int);
 void blk_mq_free_single_hw_queue(struct blk_mq_hw_ctx *, unsigned int);
 
-void blk_mq_end_io(struct request *rq, int error);
+bool blk_mq_end_io_partial(struct request *rq, int error,
+               unsigned int nr_bytes);
+static inline void blk_mq_end_io(struct request *rq, int error)
+{
+       bool done = !blk_mq_end_io_partial(rq, error, blk_rq_bytes(rq));
+       BUG_ON(!done);
+}
 
 void blk_mq_complete_request(struct request *rq);
 
index 5d7782e42b8f22f81df12652348ff9a21aebc7e4..c3683bdf28fe4e677959f33d524bd724a9a1d59d 100644 (file)
@@ -200,6 +200,7 @@ struct fw_device {
        unsigned irmc:1;
        unsigned bc_implemented:2;
 
+       work_func_t workfn;
        struct delayed_work work;
        struct fw_attribute_group attribute_group;
 };
index db512014e061b27abae7d689175e82d74348683b..b826239bdce0b26a614ae0802782860348dd6fc6 100644 (file)
@@ -157,46 +157,6 @@ static inline int hpage_nr_pages(struct page *page)
                return HPAGE_PMD_NR;
        return 1;
 }
-/*
- * compound_trans_head() should be used instead of compound_head(),
- * whenever the "page" passed as parameter could be the tail of a
- * transparent hugepage that could be undergoing a
- * __split_huge_page_refcount(). The page structure layout often
- * changes across releases and it makes extensive use of unions. So if
- * the page structure layout will change in a way that
- * page->first_page gets clobbered by __split_huge_page_refcount, the
- * implementation making use of smp_rmb() will be required.
- *
- * Currently we define compound_trans_head as compound_head, because
- * page->private is in the same union with page->first_page, and
- * page->private isn't clobbered. However this also means we're
- * currently leaving dirt into the page->private field of anonymous
- * pages resulting from a THP split, instead of setting page->private
- * to zero like for every other page that has PG_private not set. But
- * anonymous pages don't use page->private so this is not a problem.
- */
-#if 0
-/* This will be needed if page->private will be clobbered in split_huge_page */
-static inline struct page *compound_trans_head(struct page *page)
-{
-       if (PageTail(page)) {
-               struct page *head;
-               head = page->first_page;
-               smp_rmb();
-               /*
-                * head may be a dangling pointer.
-                * __split_huge_page_refcount clears PageTail before
-                * overwriting first_page, so if PageTail is still
-                * there it means the head pointer isn't dangling.
-                */
-               if (PageTail(page))
-                       return head;
-       }
-       return page;
-}
-#else
-#define compound_trans_head(page) compound_head(page)
-#endif
 
 extern int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
                                unsigned long addr, pmd_t pmd, pmd_t *pmdp);
@@ -226,7 +186,6 @@ static inline int split_huge_page(struct page *page)
        do { } while (0)
 #define split_huge_page_pmd_mm(__mm, __address, __pmd) \
        do { } while (0)
-#define compound_trans_head(page) compound_head(page)
 static inline int hugepage_madvise(struct vm_area_struct *vma,
                                   unsigned long *vm_flags, int advice)
 {
index f28f46eade6a642873246fea247f3f5455a18acb..c1b7414c7bef7c0a2128edd30438e48974af40f3 100644 (file)
@@ -175,7 +175,7 @@ extern unsigned int kobjsize(const void *objp);
  * Special vmas that are non-mergable, non-mlock()able.
  * Note: mm/huge_memory.c VM_NO_THP depends on this definition.
  */
-#define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP)
+#define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP | VM_MIXEDMAP)
 
 /*
  * mapping from the currently active vm_flags protection bits (the
@@ -399,8 +399,18 @@ static inline void compound_unlock_irqrestore(struct page *page,
 
 static inline struct page *compound_head(struct page *page)
 {
-       if (unlikely(PageTail(page)))
-               return page->first_page;
+       if (unlikely(PageTail(page))) {
+               struct page *head = page->first_page;
+
+               /*
+                * page->first_page may be a dangling pointer to an old
+                * compound page, so recheck that it is still a tail
+                * page before returning.
+                */
+               smp_rmb();
+               if (likely(PageTail(page)))
+                       return head;
+       }
        return page;
 }
 
@@ -757,7 +767,7 @@ static inline bool __cpupid_match_pid(pid_t task_pid, int cpupid)
 #ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
 static inline int page_cpupid_xchg_last(struct page *page, int cpupid)
 {
-       return xchg(&page->_last_cpupid, cpupid);
+       return xchg(&page->_last_cpupid, cpupid & LAST_CPUPID_MASK);
 }
 
 static inline int page_cpupid_last(struct page *page)
@@ -766,7 +776,7 @@ static inline int page_cpupid_last(struct page *page)
 }
 static inline void page_cpupid_reset_last(struct page *page)
 {
-       page->_last_cpupid = -1;
+       page->_last_cpupid = -1 & LAST_CPUPID_MASK;
 }
 #else
 static inline int page_cpupid_last(struct page *page)
index 3ebbbe7b6d05fadbccaf66bac9ad1d8cfe7c5c17..5e1e6f2d98c2ae5b45b8a3ec5565eb793e840a68 100644 (file)
@@ -2725,7 +2725,7 @@ static inline void nf_reset(struct sk_buff *skb)
 
 static inline void nf_reset_trace(struct sk_buff *skb)
 {
-#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
+#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES)
        skb->nf_trace = 0;
 #endif
 }
@@ -2742,6 +2742,9 @@ static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src)
        dst->nf_bridge  = src->nf_bridge;
        nf_bridge_get(src->nf_bridge);
 #endif
+#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES)
+       dst->nf_trace = src->nf_trace;
+#endif
 }
 
 static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src)
index accc497f8d729160b0234756e96d99607cf67f33..7159a0a933df2b016db23cdcd87487c19d5829d0 100644 (file)
@@ -60,6 +60,12 @@ struct tp_module {
        unsigned int num_tracepoints;
        struct tracepoint * const *tracepoints_ptrs;
 };
+bool trace_module_has_bad_taint(struct module *mod);
+#else
+static inline bool trace_module_has_bad_taint(struct module *mod)
+{
+       return false;
+}
 #endif /* CONFIG_MODULES */
 
 struct tracepoint_iter {
index 48ed75c21260b8dd17e681021311bb8dae4f2617..e77c10405d515da16071461fe9f7385d8ac08556 100644 (file)
@@ -129,6 +129,7 @@ int ip_tunnel_changelink(struct net_device *dev, struct nlattr *tb[],
 int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[],
                      struct ip_tunnel_parm *p);
 void ip_tunnel_setup(struct net_device *dev, int net_id);
+void ip_tunnel_dst_reset_all(struct ip_tunnel *t);
 
 /* Extract dsfield from inner protocol */
 static inline u8 ip_tunnel_get_dsfield(const struct iphdr *iph,
index 56fc366da6d5183b536648e949769a118ba33677..8c4dd63134d498164dee2c60978fccb3568e5b57 100644 (file)
@@ -1303,7 +1303,8 @@ struct tcp_fastopen_request {
        /* Fast Open cookie. Size 0 means a cookie request */
        struct tcp_fastopen_cookie      cookie;
        struct msghdr                   *data;  /* data in MSG_FASTOPEN */
-       u16                             copied; /* queued in tcp_connect() */
+       size_t                          size;
+       int                             copied; /* queued in tcp_connect() */
 };
 void tcp_free_fastopen_req(struct tcp_sock *tp);
 
index afa5730fb3bd2ff810f63f861f1b52b29e34c965..fb5654a8ca3cee1c65923c105f3760d607eaa1de 100644 (file)
@@ -1648,6 +1648,11 @@ static inline int xfrm_aevent_is_on(struct net *net)
 }
 #endif
 
+static inline int aead_len(struct xfrm_algo_aead *alg)
+{
+       return sizeof(*alg) + ((alg->alg_key_len + 7) / 8);
+}
+
 static inline int xfrm_alg_len(const struct xfrm_algo *alg)
 {
        return sizeof(*alg) + ((alg->alg_key_len + 7) / 8);
@@ -1686,6 +1691,12 @@ static inline int xfrm_replay_clone(struct xfrm_state *x,
        return 0;
 }
 
+static inline struct xfrm_algo_aead *xfrm_algo_aead_clone(struct xfrm_algo_aead *orig)
+{
+       return kmemdup(orig, aead_len(orig), GFP_KERNEL);
+}
+
+
 static inline struct xfrm_algo *xfrm_algo_clone(struct xfrm_algo *orig)
 {
        return kmemdup(orig, xfrm_alg_len(orig), GFP_KERNEL);
index ae5a17111968c7ca8bc8667a65d16393532ffbde..4483fadfa68d8fdc744743d6bc5bcd52de301608 100644 (file)
@@ -12,6 +12,7 @@ struct iscsit_transport {
        int (*iscsit_setup_np)(struct iscsi_np *, struct __kernel_sockaddr_storage *);
        int (*iscsit_accept_np)(struct iscsi_np *, struct iscsi_conn *);
        void (*iscsit_free_np)(struct iscsi_np *);
+       void (*iscsit_wait_conn)(struct iscsi_conn *);
        void (*iscsit_free_conn)(struct iscsi_conn *);
        int (*iscsit_get_login_rx)(struct iscsi_conn *, struct iscsi_login *);
        int (*iscsit_put_login_tx)(struct iscsi_conn *, struct iscsi_login *, u32);
index 4410ac6a55f1d9ae410976e3a8e7315acf88a61b..e6b1b66afe526acfa2a9ecbfc5bad9da76e8d9b4 100644 (file)
@@ -974,12 +974,6 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
  *    Temporarilly set tasks mems_allowed to target nodes of migration,
  *    so that the migration code can allocate pages on these nodes.
  *
- *    Call holding cpuset_mutex, so current's cpuset won't change
- *    during this call, as manage_mutex holds off any cpuset_attach()
- *    calls.  Therefore we don't need to take task_lock around the
- *    call to guarantee_online_mems(), as we know no one is changing
- *    our task's cpuset.
- *
  *    While the mm_struct we are migrating is typically from some
  *    other task, the task_struct mems_allowed that we are hacking
  *    is for our current task, which must allocate new pages for that
@@ -996,8 +990,10 @@ static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
 
        do_migrate_pages(mm, from, to, MPOL_MF_MOVE_ALL);
 
+       rcu_read_lock();
        mems_cs = effective_nodemask_cpuset(task_cs(tsk));
        guarantee_online_mems(mems_cs, &tsk->mems_allowed);
+       rcu_read_unlock();
 }
 
 /*
@@ -2486,9 +2482,9 @@ int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask)
 
        task_lock(current);
        cs = nearest_hardwall_ancestor(task_cs(current));
+       allowed = node_isset(node, cs->mems_allowed);
        task_unlock(current);
 
-       allowed = node_isset(node, cs->mems_allowed);
        mutex_unlock(&callback_mutex);
        return allowed;
 }
index cf68bb36fe5885dd2ba7b9c9ccef3c5165a320d3..f14033700c25cbc6872ccc2e60d33ee25a3493e9 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/mutex.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
+#include <linux/of_irq.h>
 #include <linux/topology.h>
 #include <linux/seq_file.h>
 #include <linux/slab.h>
index 481a13c43b1708d3501a7f686760573c4eab6a34..d3bf660cb57fb8a26e55c3605dffc78ce82035f4 100644 (file)
@@ -802,8 +802,7 @@ static irqreturn_t irq_thread_fn(struct irq_desc *desc,
 
 static void wake_threads_waitq(struct irq_desc *desc)
 {
-       if (atomic_dec_and_test(&desc->threads_active) &&
-           waitqueue_active(&desc->wait_for_threads))
+       if (atomic_dec_and_test(&desc->threads_active))
                wake_up(&desc->wait_for_threads);
 }
 
index 5b8838b56d1c8dee47354f3a1a697263678d00ea..5b9bb42b2d47e760f3a7cd17af24ba37d2cf07ea 100644 (file)
@@ -70,7 +70,7 @@ static void cpudl_heapify(struct cpudl *cp, int idx)
 
 static void cpudl_change_key(struct cpudl *cp, int idx, u64 new_dl)
 {
-       WARN_ON(!cpu_present(idx) || idx == IDX_INVALID);
+       WARN_ON(idx == IDX_INVALID || !cpu_present(idx));
 
        if (dl_time_before(new_dl, cp->elements[idx].dl)) {
                cp->elements[idx].dl = new_dl;
@@ -117,7 +117,7 @@ int cpudl_find(struct cpudl *cp, struct task_struct *p,
        }
 
 out:
-       WARN_ON(!cpu_present(best_cpu) && best_cpu != -1);
+       WARN_ON(best_cpu != -1 && !cpu_present(best_cpu));
 
        return best_cpu;
 }
index 15cbc17fbf84d57ac52aa9b752d9552a4b4335ee..6e79b3faa4cd5384754232dc3b74300367afec8e 100644 (file)
@@ -135,7 +135,6 @@ static void update_dl_migration(struct dl_rq *dl_rq)
 static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
 {
        struct task_struct *p = dl_task_of(dl_se);
-       dl_rq = &rq_of_dl_rq(dl_rq)->dl;
 
        if (p->nr_cpus_allowed > 1)
                dl_rq->dl_nr_migratory++;
@@ -146,7 +145,6 @@ static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
 static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
 {
        struct task_struct *p = dl_task_of(dl_se);
-       dl_rq = &rq_of_dl_rq(dl_rq)->dl;
 
        if (p->nr_cpus_allowed > 1)
                dl_rq->dl_nr_migratory--;
@@ -564,6 +562,8 @@ int dl_runtime_exceeded(struct rq *rq, struct sched_dl_entity *dl_se)
        return 1;
 }
 
+extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq);
+
 /*
  * Update the current task's runtime statistics (provided it is still
  * a -deadline task and has not been removed from the dl_rq).
@@ -627,11 +627,13 @@ static void update_curr_dl(struct rq *rq)
                struct rt_rq *rt_rq = &rq->rt;
 
                raw_spin_lock(&rt_rq->rt_runtime_lock);
-               rt_rq->rt_time += delta_exec;
                /*
                 * We'll let actual RT tasks worry about the overflow here, we
-                * have our own CBS to keep us inline -- see above.
+                * have our own CBS to keep us inline; only account when RT
+                * bandwidth is relevant.
                 */
+               if (sched_rt_bandwidth_account(rt_rq))
+                       rt_rq->rt_time += delta_exec;
                raw_spin_unlock(&rt_rq->rt_runtime_lock);
        }
 }
index 78157099b167c64f034c8c3348c02eaeb41c888a..9b4c4f3201301a269bd66718650899274f204bc6 100644 (file)
@@ -7001,15 +7001,15 @@ static void switched_from_fair(struct rq *rq, struct task_struct *p)
        struct cfs_rq *cfs_rq = cfs_rq_of(se);
 
        /*
-        * Ensure the task's vruntime is normalized, so that when its
+        * Ensure the task's vruntime is normalized, so that when it's
         * switched back to the fair class the enqueue_entity(.flags=0) will
         * do the right thing.
         *
-        * If it was on_rq, then the dequeue_entity(.flags=0) will already
-        * have normalized the vruntime, if it was !on_rq, then only when
+        * If it's on_rq, then the dequeue_entity(.flags=0) will already
+        * have normalized the vruntime, if it's !on_rq, then only when
         * the task is sleeping will it still have non-normalized vruntime.
         */
-       if (!se->on_rq && p->state != TASK_RUNNING) {
+       if (!p->on_rq && p->state != TASK_RUNNING) {
                /*
                 * Fix up our vruntime so that the current sleep doesn't
                 * cause 'unlimited' sleep bonus.
index a2740b775b456ed27c56ad088007c83e1873458b..1999021042c7010c6e5e86539b9c0b9a03519dcd 100644 (file)
@@ -538,6 +538,14 @@ static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
 
 #endif /* CONFIG_RT_GROUP_SCHED */
 
+bool sched_rt_bandwidth_account(struct rt_rq *rt_rq)
+{
+       struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
+
+       return (hrtimer_active(&rt_b->rt_period_timer) ||
+               rt_rq->rt_time < rt_b->rt_runtime);
+}
+
 #ifdef CONFIG_SMP
 /*
  * We ran out of runtime, see if we can borrow some from our neighbours.
index e71ffd4eccb5b4d7a7b0234dd72e525b1c647550..f3989ceb5cd5aa7cbfae069015e8d915d6eb4c59 100644 (file)
@@ -1777,6 +1777,16 @@ static void trace_module_add_events(struct module *mod)
 {
        struct ftrace_event_call **call, **start, **end;
 
+       if (!mod->num_trace_events)
+               return;
+
+       /* Don't add infrastructure for mods without tracepoints */
+       if (trace_module_has_bad_taint(mod)) {
+               pr_err("%s: module has bad taint, not creating trace events\n",
+                      mod->name);
+               return;
+       }
+
        start = mod->trace_events;
        end = mod->trace_events + mod->num_trace_events;
 
index 29f26540e9c9550fd1c099512e7b20beb21ce234..031cc5655a514d2bcf89f930388dcddda396986f 100644 (file)
@@ -631,6 +631,11 @@ void tracepoint_iter_reset(struct tracepoint_iter *iter)
 EXPORT_SYMBOL_GPL(tracepoint_iter_reset);
 
 #ifdef CONFIG_MODULES
+bool trace_module_has_bad_taint(struct module *mod)
+{
+       return mod->taints & ~((1 << TAINT_OOT_MODULE) | (1 << TAINT_CRAP));
+}
+
 static int tracepoint_module_coming(struct module *mod)
 {
        struct tp_module *tp_mod, *iter;
@@ -641,7 +646,7 @@ static int tracepoint_module_coming(struct module *mod)
         * module headers (for forced load), to make sure we don't cause a crash.
         * Staging and out-of-tree GPL modules are fine.
         */
-       if (mod->taints & ~((1 << TAINT_OOT_MODULE) | (1 << TAINT_CRAP)))
+       if (trace_module_has_bad_taint(mod))
                return 0;
        mutex_lock(&tracepoints_mutex);
        tp_mod = kmalloc(sizeof(struct tp_module), GFP_KERNEL);
index 2defd1308b045c46389a6232391999914573deb6..98f2d7e91a9114e5e124e42bef00d7935708e521 100644 (file)
@@ -424,111 +424,134 @@ void debug_dma_dump_mappings(struct device *dev)
 EXPORT_SYMBOL(debug_dma_dump_mappings);
 
 /*
- * For each page mapped (initial page in the case of
- * dma_alloc_coherent/dma_map_{single|page}, or each page in a
- * scatterlist) insert into this tree using the pfn as the key. At
+ * For each mapping (initial cacheline in the case of
+ * dma_alloc_coherent/dma_map_page, initial cacheline in each page of a
+ * scatterlist, or the cacheline specified in dma_map_single) insert
+ * into this tree using the cacheline as the key. At
  * dma_unmap_{single|sg|page} or dma_free_coherent delete the entry.  If
- * the pfn already exists at insertion time add a tag as a reference
+ * the entry already exists at insertion time add a tag as a reference
  * count for the overlapping mappings.  For now, the overlap tracking
- * just ensures that 'unmaps' balance 'maps' before marking the pfn
- * idle, but we should also be flagging overlaps as an API violation.
+ * just ensures that 'unmaps' balance 'maps' before marking the
+ * cacheline idle, but we should also be flagging overlaps as an API
+ * violation.
  *
  * Memory usage is mostly constrained by the maximum number of available
  * dma-debug entries in that we need a free dma_debug_entry before
- * inserting into the tree.  In the case of dma_map_{single|page} and
- * dma_alloc_coherent there is only one dma_debug_entry and one pfn to
- * track per event.  dma_map_sg(), on the other hand,
- * consumes a single dma_debug_entry, but inserts 'nents' entries into
- * the tree.
+ * inserting into the tree.  In the case of dma_map_page and
+ * dma_alloc_coherent there is only one dma_debug_entry and one
+ * dma_active_cacheline entry to track per event.  dma_map_sg(), on the
+ * other hand, consumes a single dma_debug_entry, but inserts 'nents'
+ * entries into the tree.
  *
  * At any time debug_dma_assert_idle() can be called to trigger a
- * warning if the given page is in the active set.
+ * warning if any cachelines in the given page are in the active set.
  */
-static RADIX_TREE(dma_active_pfn, GFP_NOWAIT);
+static RADIX_TREE(dma_active_cacheline, GFP_NOWAIT);
 static DEFINE_SPINLOCK(radix_lock);
-#define ACTIVE_PFN_MAX_OVERLAP ((1 << RADIX_TREE_MAX_TAGS) - 1)
+#define ACTIVE_CACHELINE_MAX_OVERLAP ((1 << RADIX_TREE_MAX_TAGS) - 1)
+#define CACHELINE_PER_PAGE_SHIFT (PAGE_SHIFT - L1_CACHE_SHIFT)
+#define CACHELINES_PER_PAGE (1 << CACHELINE_PER_PAGE_SHIFT)
 
-static int active_pfn_read_overlap(unsigned long pfn)
+static phys_addr_t to_cacheline_number(struct dma_debug_entry *entry)
+{
+       return (entry->pfn << CACHELINE_PER_PAGE_SHIFT) +
+               (entry->offset >> L1_CACHE_SHIFT);
+}
+
+static int active_cacheline_read_overlap(phys_addr_t cln)
 {
        int overlap = 0, i;
 
        for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--)
-               if (radix_tree_tag_get(&dma_active_pfn, pfn, i))
+               if (radix_tree_tag_get(&dma_active_cacheline, cln, i))
                        overlap |= 1 << i;
        return overlap;
 }
 
-static int active_pfn_set_overlap(unsigned long pfn, int overlap)
+static int active_cacheline_set_overlap(phys_addr_t cln, int overlap)
 {
        int i;
 
-       if (overlap > ACTIVE_PFN_MAX_OVERLAP || overlap < 0)
+       if (overlap > ACTIVE_CACHELINE_MAX_OVERLAP || overlap < 0)
                return overlap;
 
        for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--)
                if (overlap & 1 << i)
-                       radix_tree_tag_set(&dma_active_pfn, pfn, i);
+                       radix_tree_tag_set(&dma_active_cacheline, cln, i);
                else
-                       radix_tree_tag_clear(&dma_active_pfn, pfn, i);
+                       radix_tree_tag_clear(&dma_active_cacheline, cln, i);
 
        return overlap;
 }
 
-static void active_pfn_inc_overlap(unsigned long pfn)
+static void active_cacheline_inc_overlap(phys_addr_t cln)
 {
-       int overlap = active_pfn_read_overlap(pfn);
+       int overlap = active_cacheline_read_overlap(cln);
 
-       overlap = active_pfn_set_overlap(pfn, ++overlap);
+       overlap = active_cacheline_set_overlap(cln, ++overlap);
 
        /* If we overflowed the overlap counter then we're potentially
         * leaking dma-mappings.  Otherwise, if maps and unmaps are
         * balanced then this overflow may cause false negatives in
-        * debug_dma_assert_idle() as the pfn may be marked idle
+        * debug_dma_assert_idle() as the cacheline may be marked idle
         * prematurely.
         */
-       WARN_ONCE(overlap > ACTIVE_PFN_MAX_OVERLAP,
-                 "DMA-API: exceeded %d overlapping mappings of pfn %lx\n",
-                 ACTIVE_PFN_MAX_OVERLAP, pfn);
+       WARN_ONCE(overlap > ACTIVE_CACHELINE_MAX_OVERLAP,
+                 "DMA-API: exceeded %d overlapping mappings of cacheline %pa\n",
+                 ACTIVE_CACHELINE_MAX_OVERLAP, &cln);
 }
 
-static int active_pfn_dec_overlap(unsigned long pfn)
+static int active_cacheline_dec_overlap(phys_addr_t cln)
 {
-       int overlap = active_pfn_read_overlap(pfn);
+       int overlap = active_cacheline_read_overlap(cln);
 
-       return active_pfn_set_overlap(pfn, --overlap);
+       return active_cacheline_set_overlap(cln, --overlap);
 }
 
-static int active_pfn_insert(struct dma_debug_entry *entry)
+static int active_cacheline_insert(struct dma_debug_entry *entry)
 {
+       phys_addr_t cln = to_cacheline_number(entry);
        unsigned long flags;
        int rc;
 
+       /* If the device is not writing memory then we don't have any
+        * concerns about the cpu consuming stale data.  This mitigates
+        * legitimate usages of overlapping mappings.
+        */
+       if (entry->direction == DMA_TO_DEVICE)
+               return 0;
+
        spin_lock_irqsave(&radix_lock, flags);
-       rc = radix_tree_insert(&dma_active_pfn, entry->pfn, entry);
+       rc = radix_tree_insert(&dma_active_cacheline, cln, entry);
        if (rc == -EEXIST)
-               active_pfn_inc_overlap(entry->pfn);
+               active_cacheline_inc_overlap(cln);
        spin_unlock_irqrestore(&radix_lock, flags);
 
        return rc;
 }
 
-static void active_pfn_remove(struct dma_debug_entry *entry)
+static void active_cacheline_remove(struct dma_debug_entry *entry)
 {
+       phys_addr_t cln = to_cacheline_number(entry);
        unsigned long flags;
 
+       /* ...mirror the insert case */
+       if (entry->direction == DMA_TO_DEVICE)
+               return;
+
        spin_lock_irqsave(&radix_lock, flags);
        /* since we are counting overlaps the final put of the
-        * entry->pfn will occur when the overlap count is 0.
-        * active_pfn_dec_overlap() returns -1 in that case
+        * cacheline will occur when the overlap count is 0.
+        * active_cacheline_dec_overlap() returns -1 in that case
         */
-       if (active_pfn_dec_overlap(entry->pfn) < 0)
-               radix_tree_delete(&dma_active_pfn, entry->pfn);
+       if (active_cacheline_dec_overlap(cln) < 0)
+               radix_tree_delete(&dma_active_cacheline, cln);
        spin_unlock_irqrestore(&radix_lock, flags);
 }
 
 /**
  * debug_dma_assert_idle() - assert that a page is not undergoing dma
- * @page: page to lookup in the dma_active_pfn tree
+ * @page: page to lookup in the dma_active_cacheline tree
  *
  * Place a call to this routine in cases where the cpu touching the page
  * before the dma completes (page is dma_unmapped) will lead to data
@@ -536,22 +559,38 @@ static void active_pfn_remove(struct dma_debug_entry *entry)
  */
 void debug_dma_assert_idle(struct page *page)
 {
+       static struct dma_debug_entry *ents[CACHELINES_PER_PAGE];
+       struct dma_debug_entry *entry = NULL;
+       void **results = (void **) &ents;
+       unsigned int nents, i;
        unsigned long flags;
-       struct dma_debug_entry *entry;
+       phys_addr_t cln;
 
        if (!page)
                return;
 
+       cln = (phys_addr_t) page_to_pfn(page) << CACHELINE_PER_PAGE_SHIFT;
        spin_lock_irqsave(&radix_lock, flags);
-       entry = radix_tree_lookup(&dma_active_pfn, page_to_pfn(page));
+       nents = radix_tree_gang_lookup(&dma_active_cacheline, results, cln,
+                                      CACHELINES_PER_PAGE);
+       for (i = 0; i < nents; i++) {
+               phys_addr_t ent_cln = to_cacheline_number(ents[i]);
+
+               if (ent_cln == cln) {
+                       entry = ents[i];
+                       break;
+               } else if (ent_cln >= cln + CACHELINES_PER_PAGE)
+                       break;
+       }
        spin_unlock_irqrestore(&radix_lock, flags);
 
        if (!entry)
                return;
 
+       cln = to_cacheline_number(entry);
        err_printk(entry->dev, entry,
-                  "DMA-API: cpu touching an active dma mapped page "
-                  "[pfn=0x%lx]\n", entry->pfn);
+                  "DMA-API: cpu touching an active dma mapped cacheline [cln=%pa]\n",
+                  &cln);
 }
 
 /*
@@ -568,9 +607,9 @@ static void add_dma_entry(struct dma_debug_entry *entry)
        hash_bucket_add(bucket, entry);
        put_hash_bucket(bucket, &flags);
 
-       rc = active_pfn_insert(entry);
+       rc = active_cacheline_insert(entry);
        if (rc == -ENOMEM) {
-               pr_err("DMA-API: pfn tracking ENOMEM, dma-debug disabled\n");
+               pr_err("DMA-API: cacheline tracking ENOMEM, dma-debug disabled\n");
                global_disable = true;
        }
 
@@ -631,7 +670,7 @@ static void dma_entry_free(struct dma_debug_entry *entry)
 {
        unsigned long flags;
 
-       active_pfn_remove(entry);
+       active_cacheline_remove(entry);
 
        /*
         * add to beginning of the list - this way the entries are
index 7811ed3b4e701c2e0d82368a8bae457279ca3246..bd4a8dfdf0b8052cdaedd0b3478eea4138ac100a 100644 (file)
@@ -1253,8 +1253,10 @@ unsigned long radix_tree_locate_item(struct radix_tree_root *root, void *item)
 
                node = indirect_to_ptr(node);
                max_index = radix_tree_maxindex(node->height);
-               if (cur_index > max_index)
+               if (cur_index > max_index) {
+                       rcu_read_unlock();
                        break;
+               }
 
                cur_index = __locate(node, item, cur_index, &found_index);
                rcu_read_unlock();
index 4df39b1bde91ff9c9300db00f9df485733c064db..1546655a2d78afe4dc1c954b5e0ccff77ba3437f 100644 (file)
@@ -1961,7 +1961,7 @@ out:
        return ret;
 }
 
-#define VM_NO_THP (VM_SPECIAL|VM_MIXEDMAP|VM_HUGETLB|VM_SHARED|VM_MAYSHARE)
+#define VM_NO_THP (VM_SPECIAL | VM_HUGETLB | VM_SHARED | VM_MAYSHARE)
 
 int hugepage_madvise(struct vm_area_struct *vma,
                     unsigned long *vm_flags, int advice)
index aa4c7c7250c11a95b9676b70c7cc38f987d88717..68710e80994afed815c58b59a1a3cf421df8101f 100644 (file)
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -444,7 +444,7 @@ static void break_cow(struct rmap_item *rmap_item)
 static struct page *page_trans_compound_anon(struct page *page)
 {
        if (PageTransCompound(page)) {
-               struct page *head = compound_trans_head(page);
+               struct page *head = compound_head(page);
                /*
                 * head may actually be splitted and freed from under
                 * us but it's ok here.
index ce7a8cc7b40417556c7e2cffd4f5172ababb57c5..5b6b0039f725032de5d63376aa0388e49bd3192d 100644 (file)
@@ -1127,8 +1127,8 @@ skip_node:
         * skipping css reference should be safe.
         */
        if (next_css) {
-               if ((next_css->flags & CSS_ONLINE) &&
-                               (next_css == &root->css || css_tryget(next_css)))
+               if ((next_css == &root->css) ||
+                   ((next_css->flags & CSS_ONLINE) && css_tryget(next_css)))
                        return mem_cgroup_from_css(next_css);
 
                prev_css = next_css;
@@ -6595,6 +6595,7 @@ static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
 {
        struct mem_cgroup *memcg = mem_cgroup_from_css(css);
        struct mem_cgroup_event *event, *tmp;
+       struct cgroup_subsys_state *iter;
 
        /*
         * Unregister events and notify userspace.
@@ -6611,7 +6612,14 @@ static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
        kmem_cgroup_css_offline(memcg);
 
        mem_cgroup_invalidate_reclaim_iterators(memcg);
-       mem_cgroup_reparent_charges(memcg);
+
+       /*
+        * This requires that offlining is serialized.  Right now that is
+        * guaranteed because css_killed_work_fn() holds the cgroup_mutex.
+        */
+       css_for_each_descendant_post(iter, css)
+               mem_cgroup_reparent_charges(mem_cgroup_from_css(iter));
+
        mem_cgroup_destroy_all_caches(memcg);
        vmpressure_cleanup(&memcg->vmpressure);
 }
index 2f2f34a4e77de18e47bc815e3fd90ace33967e55..90002ea43638cd0dd669d12092837714ec3113f3 100644 (file)
@@ -1651,7 +1651,7 @@ int soft_offline_page(struct page *page, int flags)
 {
        int ret;
        unsigned long pfn = page_to_pfn(page);
-       struct page *hpage = compound_trans_head(page);
+       struct page *hpage = compound_head(page);
 
        if (PageHWPoison(page)) {
                pr_info("soft offline: %#lx page already poisoned\n", pfn);
index e3758a09a009747bd17cb75442d0fcae69a74cc4..3bac76ae4b30ec8a62042bdff9644e87ee181d3c 100644 (file)
@@ -369,9 +369,11 @@ void prep_compound_page(struct page *page, unsigned long order)
        __SetPageHead(page);
        for (i = 1; i < nr_pages; i++) {
                struct page *p = page + i;
-               __SetPageTail(p);
                set_page_count(p, 0);
                p->first_page = page;
+               /* Make sure p->first_page is always valid for PageTail() */
+               smp_wmb();
+               __SetPageTail(p);
        }
 }
 
@@ -1236,6 +1238,15 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
        }
        local_irq_restore(flags);
 }
+static bool gfp_thisnode_allocation(gfp_t gfp_mask)
+{
+       return (gfp_mask & GFP_THISNODE) == GFP_THISNODE;
+}
+#else
+static bool gfp_thisnode_allocation(gfp_t gfp_mask)
+{
+       return false;
+}
 #endif
 
 /*
@@ -1572,7 +1583,13 @@ again:
                                          get_pageblock_migratetype(page));
        }
 
-       __mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order));
+       /*
+        * NOTE: GFP_THISNODE allocations do not partake in the kswapd
+        * aging protocol, so they can't be fair.
+        */
+       if (!gfp_thisnode_allocation(gfp_flags))
+               __mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order));
+
        __count_zone_vm_events(PGALLOC, zone, 1 << order);
        zone_statistics(preferred_zone, zone, gfp_flags);
        local_irq_restore(flags);
@@ -1944,8 +1961,12 @@ zonelist_scan:
                 * ultimately fall back to remote zones that do not
                 * partake in the fairness round-robin cycle of this
                 * zonelist.
+                *
+                * NOTE: GFP_THISNODE allocations do not partake in
+                * the kswapd aging protocol, so they can't be fair.
                 */
-               if (alloc_flags & ALLOC_WMARK_LOW) {
+               if ((alloc_flags & ALLOC_WMARK_LOW) &&
+                   !gfp_thisnode_allocation(gfp_mask)) {
                        if (zone_page_state(zone, NR_ALLOC_BATCH) <= 0)
                                continue;
                        if (!zone_local(preferred_zone, zone))
@@ -2501,8 +2522,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
         * allowed per node queues are empty and that nodes are
         * over allocated.
         */
-       if (IS_ENABLED(CONFIG_NUMA) &&
-                       (gfp_mask & GFP_THISNODE) == GFP_THISNODE)
+       if (gfp_thisnode_allocation(gfp_mask))
                goto nopage;
 
 restart:
index b31ba67d440ac997a05de372e831046bf98d9070..0092097b3f4ce5e22844759a9e264ef143d05c7c 100644 (file)
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -98,7 +98,7 @@ static void put_compound_page(struct page *page)
        }
 
        /* __split_huge_page_refcount can run under us */
-       page_head = compound_trans_head(page);
+       page_head = compound_head(page);
 
        /*
         * THP can not break up slab pages so avoid taking
@@ -253,7 +253,7 @@ bool __get_page_tail(struct page *page)
         */
        unsigned long flags;
        bool got;
-       struct page *page_head = compound_trans_head(page);
+       struct page *page_head = compound_head(page);
 
        /* Ref to put_compound_page() comment. */
        if (!__compound_tail_refcounted(page_head)) {
index 8be757cca2ec444171982cb8c33ef58ab27310be..081e81fd017fa53f7a6ed3afd341601b43377531 100644 (file)
@@ -121,13 +121,9 @@ static void raw_rcv(struct sk_buff *oskb, void *data)
        if (!ro->recv_own_msgs && oskb->sk == sk)
                return;
 
-       /* do not pass frames with DLC > 8 to a legacy socket */
-       if (!ro->fd_frames) {
-               struct canfd_frame *cfd = (struct canfd_frame *)oskb->data;
-
-               if (unlikely(cfd->len > CAN_MAX_DLEN))
-                       return;
-       }
+       /* do not pass non-CAN2.0 frames to a legacy socket */
+       if (!ro->fd_frames && oskb->len != CAN_MTU)
+               return;
 
        /* clone the given skb to be able to enqueue it into the rcv queue */
        skb = skb_clone(oskb, GFP_ATOMIC);
@@ -738,9 +734,7 @@ static int raw_recvmsg(struct kiocb *iocb, struct socket *sock,
                       struct msghdr *msg, size_t size, int flags)
 {
        struct sock *sk = sock->sk;
-       struct raw_sock *ro = raw_sk(sk);
        struct sk_buff *skb;
-       int rxmtu;
        int err = 0;
        int noblock;
 
@@ -751,20 +745,10 @@ static int raw_recvmsg(struct kiocb *iocb, struct socket *sock,
        if (!skb)
                return err;
 
-       /*
-        * when serving a legacy socket the DLC <= 8 is already checked inside
-        * raw_rcv(). Now check if we need to pass a canfd_frame to a legacy
-        * socket and cut the possible CANFD_MTU/CAN_MTU length to CAN_MTU
-        */
-       if (!ro->fd_frames)
-               rxmtu = CAN_MTU;
-       else
-               rxmtu = skb->len;
-
-       if (size < rxmtu)
+       if (size < skb->len)
                msg->msg_flags |= MSG_TRUNC;
        else
-               size = rxmtu;
+               size = skb->len;
 
        err = memcpy_toiovec(msg->msg_iov, skb->data, size);
        if (err < 0) {
index b9e9e0d38672a8ca9a17f8d6ec8daf88b8a09a65..e16129019c6658ae7b1ab697692f8e6484c8cd90 100644 (file)
@@ -766,9 +766,6 @@ static void neigh_periodic_work(struct work_struct *work)
        nht = rcu_dereference_protected(tbl->nht,
                                        lockdep_is_held(&tbl->lock));
 
-       if (atomic_read(&tbl->entries) < tbl->gc_thresh1)
-               goto out;
-
        /*
         *      periodically recompute ReachableTime from random function
         */
@@ -781,6 +778,9 @@ static void neigh_periodic_work(struct work_struct *work)
                                neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
        }
 
+       if (atomic_read(&tbl->entries) < tbl->gc_thresh1)
+               goto out;
+
        for (i = 0 ; i < (1 << nht->hash_shift); i++) {
                np = &nht->hash_buckets[i];
 
@@ -3046,7 +3046,7 @@ int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
        if (!t)
                goto err;
 
-       for (i = 0; i < ARRAY_SIZE(t->neigh_vars); i++) {
+       for (i = 0; i < NEIGH_VAR_GC_INTERVAL; i++) {
                t->neigh_vars[i].data += (long) p;
                t->neigh_vars[i].extra1 = dev;
                t->neigh_vars[i].extra2 = p;
index 5976ef0846bdda08db6289bb91f05a63b85e6e3a..5d6236d9fdce41eeea2e41433e8f51803b2be68a 100644 (file)
@@ -707,9 +707,6 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
        new->mark               = old->mark;
        new->skb_iif            = old->skb_iif;
        __nf_copy(new, old);
-#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
-       new->nf_trace           = old->nf_trace;
-#endif
 #ifdef CONFIG_NET_SCHED
        new->tc_index           = old->tc_index;
 #ifdef CONFIG_NET_CLS_ACT
index 327060c6c874b337cf6edf9bd37c5b8181c0beee..7ae0d7f6dbd0bff10516e9415050d7aa41a07da0 100644 (file)
@@ -297,7 +297,7 @@ static bool seq_nr_after(u16 a, u16 b)
 
 void hsr_register_frame_in(struct node_entry *node, enum hsr_dev_idx dev_idx)
 {
-       if ((dev_idx < 0) || (dev_idx >= HSR_MAX_DEV)) {
+       if ((dev_idx < 0) || (dev_idx >= HSR_MAX_SLAVE)) {
                WARN_ONCE(1, "%s: Invalid dev_idx (%d)\n", __func__, dev_idx);
                return;
        }
index ecd2c3f245ce2b2e0b79f17417c5e6ad8c70abf6..19ab78aca547fc7fb45e56607e7adafd0036d947 100644 (file)
@@ -1296,8 +1296,11 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb,
 
        segs = ERR_PTR(-EPROTONOSUPPORT);
 
-       /* Note : following gso_segment() might change skb->encapsulation */
-       udpfrag = !skb->encapsulation && proto == IPPROTO_UDP;
+       if (skb->encapsulation &&
+           skb_shinfo(skb)->gso_type & (SKB_GSO_SIT|SKB_GSO_IPIP))
+               udpfrag = proto == IPPROTO_UDP && encap;
+       else
+               udpfrag = proto == IPPROTO_UDP && !skb->encapsulation;
 
        ops = rcu_dereference(inet_offloads[proto]);
        if (likely(ops && ops->callbacks.gso_segment))
index 8971780aec7c5fadb4223c0391b05aa9c881ba34..73c6b63bba74e57b70589ff548fccf0db79cc9a4 100644 (file)
@@ -422,9 +422,6 @@ static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from)
        to->tc_index = from->tc_index;
 #endif
        nf_copy(to, from);
-#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
-       to->nf_trace = from->nf_trace;
-#endif
 #if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE)
        to->ipvs_property = from->ipvs_property;
 #endif
index 50228be5c17bfc2c02d6539eea210a537bcc421b..78a89e61925d6ae27937098f906145d2d8c48f5d 100644 (file)
@@ -93,13 +93,14 @@ static void tunnel_dst_reset(struct ip_tunnel *t)
        tunnel_dst_set(t, NULL);
 }
 
-static void tunnel_dst_reset_all(struct ip_tunnel *t)
+void ip_tunnel_dst_reset_all(struct ip_tunnel *t)
 {
        int i;
 
        for_each_possible_cpu(i)
                __tunnel_dst_set(per_cpu_ptr(t->dst_cache, i), NULL);
 }
+EXPORT_SYMBOL(ip_tunnel_dst_reset_all);
 
 static struct rtable *tunnel_rtable_get(struct ip_tunnel *t, u32 cookie)
 {
@@ -119,52 +120,6 @@ static struct rtable *tunnel_rtable_get(struct ip_tunnel *t, u32 cookie)
        return (struct rtable *)dst;
 }
 
-/* Often modified stats are per cpu, other are shared (netdev->stats) */
-struct rtnl_link_stats64 *ip_tunnel_get_stats64(struct net_device *dev,
-                                               struct rtnl_link_stats64 *tot)
-{
-       int i;
-
-       for_each_possible_cpu(i) {
-               const struct pcpu_sw_netstats *tstats =
-                                                  per_cpu_ptr(dev->tstats, i);
-               u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
-               unsigned int start;
-
-               do {
-                       start = u64_stats_fetch_begin_bh(&tstats->syncp);
-                       rx_packets = tstats->rx_packets;
-                       tx_packets = tstats->tx_packets;
-                       rx_bytes = tstats->rx_bytes;
-                       tx_bytes = tstats->tx_bytes;
-               } while (u64_stats_fetch_retry_bh(&tstats->syncp, start));
-
-               tot->rx_packets += rx_packets;
-               tot->tx_packets += tx_packets;
-               tot->rx_bytes   += rx_bytes;
-               tot->tx_bytes   += tx_bytes;
-       }
-
-       tot->multicast = dev->stats.multicast;
-
-       tot->rx_crc_errors = dev->stats.rx_crc_errors;
-       tot->rx_fifo_errors = dev->stats.rx_fifo_errors;
-       tot->rx_length_errors = dev->stats.rx_length_errors;
-       tot->rx_frame_errors = dev->stats.rx_frame_errors;
-       tot->rx_errors = dev->stats.rx_errors;
-
-       tot->tx_fifo_errors = dev->stats.tx_fifo_errors;
-       tot->tx_carrier_errors = dev->stats.tx_carrier_errors;
-       tot->tx_dropped = dev->stats.tx_dropped;
-       tot->tx_aborted_errors = dev->stats.tx_aborted_errors;
-       tot->tx_errors = dev->stats.tx_errors;
-
-       tot->collisions  = dev->stats.collisions;
-
-       return tot;
-}
-EXPORT_SYMBOL_GPL(ip_tunnel_get_stats64);
-
 static bool ip_tunnel_key_match(const struct ip_tunnel_parm *p,
                                __be16 flags, __be32 key)
 {
@@ -759,7 +714,7 @@ static void ip_tunnel_update(struct ip_tunnel_net *itn,
                if (set_mtu)
                        dev->mtu = mtu;
        }
-       tunnel_dst_reset_all(t);
+       ip_tunnel_dst_reset_all(t);
        netdev_state_change(dev);
 }
 
@@ -1088,7 +1043,7 @@ void ip_tunnel_uninit(struct net_device *dev)
        if (itn->fb_tunnel_dev != dev)
                ip_tunnel_del(netdev_priv(dev));
 
-       tunnel_dst_reset_all(tunnel);
+       ip_tunnel_dst_reset_all(tunnel);
 }
 EXPORT_SYMBOL_GPL(ip_tunnel_uninit);
 
index 6156f4ef5e919ccdc470f8cdb5a27593dedbe2c3..6f847dd56dbc7fb53a7f3dc9dfc8330e507ec0e9 100644 (file)
@@ -108,7 +108,6 @@ int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto)
        nf_reset(skb);
        secpath_reset(skb);
        skb_clear_hash_if_not_l4(skb);
-       skb_dst_drop(skb);
        skb->vlan_tci = 0;
        skb_set_queue_mapping(skb, 0);
        skb->pkt_type = PACKET_HOST;
@@ -148,3 +147,49 @@ error:
        return ERR_PTR(err);
 }
 EXPORT_SYMBOL_GPL(iptunnel_handle_offloads);
+
+/* Often modified stats are per cpu, other are shared (netdev->stats) */
+struct rtnl_link_stats64 *ip_tunnel_get_stats64(struct net_device *dev,
+                                               struct rtnl_link_stats64 *tot)
+{
+       int i;
+
+       for_each_possible_cpu(i) {
+               const struct pcpu_sw_netstats *tstats =
+                                                  per_cpu_ptr(dev->tstats, i);
+               u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
+               unsigned int start;
+
+               do {
+                       start = u64_stats_fetch_begin_bh(&tstats->syncp);
+                       rx_packets = tstats->rx_packets;
+                       tx_packets = tstats->tx_packets;
+                       rx_bytes = tstats->rx_bytes;
+                       tx_bytes = tstats->tx_bytes;
+               } while (u64_stats_fetch_retry_bh(&tstats->syncp, start));
+
+               tot->rx_packets += rx_packets;
+               tot->tx_packets += tx_packets;
+               tot->rx_bytes   += rx_bytes;
+               tot->tx_bytes   += tx_bytes;
+       }
+
+       tot->multicast = dev->stats.multicast;
+
+       tot->rx_crc_errors = dev->stats.rx_crc_errors;
+       tot->rx_fifo_errors = dev->stats.rx_fifo_errors;
+       tot->rx_length_errors = dev->stats.rx_length_errors;
+       tot->rx_frame_errors = dev->stats.rx_frame_errors;
+       tot->rx_errors = dev->stats.rx_errors;
+
+       tot->tx_fifo_errors = dev->stats.tx_fifo_errors;
+       tot->tx_carrier_errors = dev->stats.tx_carrier_errors;
+       tot->tx_dropped = dev->stats.tx_dropped;
+       tot->tx_aborted_errors = dev->stats.tx_aborted_errors;
+       tot->tx_errors = dev->stats.tx_errors;
+
+       tot->collisions  = dev->stats.collisions;
+
+       return tot;
+}
+EXPORT_SYMBOL_GPL(ip_tunnel_get_stats64);
index d551e31b416e02e728a433293d25317e9c930dcb..7c676671329d9432eb2392f6b4fc649ebcdae97f 100644 (file)
@@ -1198,8 +1198,8 @@ static int snmp_translate(struct nf_conn *ct,
                map.to = NOCT1(&ct->tuplehash[!dir].tuple.dst.u3.ip);
        } else {
                /* DNAT replies */
-               map.from = NOCT1(&ct->tuplehash[dir].tuple.src.u3.ip);
-               map.to = NOCT1(&ct->tuplehash[!dir].tuple.dst.u3.ip);
+               map.from = NOCT1(&ct->tuplehash[!dir].tuple.src.u3.ip);
+               map.to = NOCT1(&ct->tuplehash[dir].tuple.dst.u3.ip);
        }
 
        if (map.from == map.to)
index 9f3a2db9109efda9a121135d46406f0beabb8805..97c8f5620c430930c0b9c7958db079a968bd807b 100644 (file)
@@ -1044,7 +1044,8 @@ void tcp_free_fastopen_req(struct tcp_sock *tp)
        }
 }
 
-static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg, int *size)
+static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg,
+                               int *copied, size_t size)
 {
        struct tcp_sock *tp = tcp_sk(sk);
        int err, flags;
@@ -1059,11 +1060,12 @@ static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg, int *size)
        if (unlikely(tp->fastopen_req == NULL))
                return -ENOBUFS;
        tp->fastopen_req->data = msg;
+       tp->fastopen_req->size = size;
 
        flags = (msg->msg_flags & MSG_DONTWAIT) ? O_NONBLOCK : 0;
        err = __inet_stream_connect(sk->sk_socket, msg->msg_name,
                                    msg->msg_namelen, flags);
-       *size = tp->fastopen_req->copied;
+       *copied = tp->fastopen_req->copied;
        tcp_free_fastopen_req(tp);
        return err;
 }
@@ -1083,7 +1085,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
 
        flags = msg->msg_flags;
        if (flags & MSG_FASTOPEN) {
-               err = tcp_sendmsg_fastopen(sk, msg, &copied_syn);
+               err = tcp_sendmsg_fastopen(sk, msg, &copied_syn, size);
                if (err == -EINPROGRESS && copied_syn > 0)
                        goto out;
                else if (err)
index ad37bf18ae4b95a6870a8019ecc3ca9ff55d7679..2388275adb9bd0fcfb8ea7a97a22ac661b4df745 100644 (file)
@@ -290,8 +290,7 @@ bool tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight)
        left = tp->snd_cwnd - in_flight;
        if (sk_can_gso(sk) &&
            left * sysctl_tcp_tso_win_divisor < tp->snd_cwnd &&
-           left * tp->mss_cache < sk->sk_gso_max_size &&
-           left < sk->sk_gso_max_segs)
+           left < tp->xmit_size_goal_segs)
                return true;
        return left <= tcp_max_tso_deferred_mss(tp);
 }
index 227cba79fa6b1f490f27a3aaf013070fbf9c8015..eeaac399420de043bb466603fcb03ff83076438a 100644 (file)
@@ -1945,8 +1945,9 @@ void tcp_enter_loss(struct sock *sk, int how)
                if (skb == tcp_send_head(sk))
                        break;
 
-               if (TCP_SKB_CB(skb)->sacked & TCPCB_RETRANS)
+               if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS)
                        tp->undo_marker = 0;
+
                TCP_SKB_CB(skb)->sacked &= (~TCPCB_TAGBITS)|TCPCB_SACKED_ACKED;
                if (!(TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_ACKED) || how) {
                        TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_ACKED;
index 3be16727f058b191d305366f9cf5851f03c7c417..f0eb4e337ec88fc631a30adc0d168817e5812ced 100644 (file)
@@ -864,8 +864,8 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
 
                if (unlikely(skb->fclone == SKB_FCLONE_ORIG &&
                             fclone->fclone == SKB_FCLONE_CLONE))
-                       NET_INC_STATS_BH(sock_net(sk),
-                                        LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES);
+                       NET_INC_STATS(sock_net(sk),
+                                     LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES);
 
                if (unlikely(skb_cloned(skb)))
                        skb = pskb_copy(skb, gfp_mask);
@@ -2337,6 +2337,7 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
        struct tcp_sock *tp = tcp_sk(sk);
        struct inet_connection_sock *icsk = inet_csk(sk);
        unsigned int cur_mss;
+       int err;
 
        /* Inconslusive MTU probe */
        if (icsk->icsk_mtup.probe_size) {
@@ -2400,11 +2401,15 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
                     skb_headroom(skb) >= 0xFFFF)) {
                struct sk_buff *nskb = __pskb_copy(skb, MAX_TCP_HEADER,
                                                   GFP_ATOMIC);
-               return nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) :
-                             -ENOBUFS;
+               err = nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) :
+                            -ENOBUFS;
        } else {
-               return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
+               err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
        }
+
+       if (likely(!err))
+               TCP_SKB_CB(skb)->sacked |= TCPCB_EVER_RETRANS;
+       return err;
 }
 
 int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
@@ -2908,7 +2913,12 @@ static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn)
        space = __tcp_mtu_to_mss(sk, inet_csk(sk)->icsk_pmtu_cookie) -
                MAX_TCP_OPTION_SPACE;
 
-       syn_data = skb_copy_expand(syn, skb_headroom(syn), space,
+       space = min_t(size_t, space, fo->size);
+
+       /* limit to order-0 allocations */
+       space = min_t(size_t, space, SKB_MAX_HEAD(MAX_TCP_HEADER));
+
+       syn_data = skb_copy_expand(syn, MAX_TCP_HEADER, space,
                                   sk->sk_allocation);
        if (syn_data == NULL)
                goto fallback;
index d92e5586783e518c1f5db2b1bd3261766ff7a913..438a73aa777cf560f38a87801b03b8ce20a315b1 100644 (file)
@@ -138,6 +138,7 @@ config INET6_XFRM_MODE_ROUTEOPTIMIZATION
 config IPV6_VTI
 tristate "Virtual (secure) IPv6: tunneling"
        select IPV6_TUNNEL
+       select NET_IP_TUNNEL
        depends on INET6_XFRM_MODE_TUNNEL
        ---help---
        Tunneling means encapsulating data of one protocol type within
index 140748debc4ade194e5e179636e94264da7e65a1..8af3eb57f4380fd7de7497ff98f40c88f2040e50 100644 (file)
@@ -212,7 +212,7 @@ int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
                found = (nexthdr == target);
 
                if ((!ipv6_ext_hdr(nexthdr)) || nexthdr == NEXTHDR_NONE) {
-                       if (target < 0)
+                       if (target < 0 || found)
                                break;
                        return -ENOENT;
                }
index 1e8683b135bb7b503d9ab97d5c3c165af2453fa1..59f95affceb0773d052184bdf5fec9f276433d63 100644 (file)
@@ -89,7 +89,7 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
        unsigned int unfrag_ip6hlen;
        u8 *prevhdr;
        int offset = 0;
-       bool tunnel;
+       bool encap, udpfrag;
        int nhoff;
 
        if (unlikely(skb_shinfo(skb)->gso_type &
@@ -110,8 +110,8 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
        if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h))))
                goto out;
 
-       tunnel = SKB_GSO_CB(skb)->encap_level > 0;
-       if (tunnel)
+       encap = SKB_GSO_CB(skb)->encap_level > 0;
+       if (encap)
                features = skb->dev->hw_enc_features & netif_skb_features(skb);
        SKB_GSO_CB(skb)->encap_level += sizeof(*ipv6h);
 
@@ -121,6 +121,12 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
 
        proto = ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr);
 
+       if (skb->encapsulation &&
+           skb_shinfo(skb)->gso_type & (SKB_GSO_SIT|SKB_GSO_IPIP))
+               udpfrag = proto == IPPROTO_UDP && encap;
+       else
+               udpfrag = proto == IPPROTO_UDP && !skb->encapsulation;
+
        ops = rcu_dereference(inet6_offloads[proto]);
        if (likely(ops && ops->callbacks.gso_segment)) {
                skb_reset_transport_header(skb);
@@ -133,13 +139,9 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
        for (skb = segs; skb; skb = skb->next) {
                ipv6h = (struct ipv6hdr *)(skb_mac_header(skb) + nhoff);
                ipv6h->payload_len = htons(skb->len - nhoff - sizeof(*ipv6h));
-               if (tunnel) {
-                       skb_reset_inner_headers(skb);
-                       skb->encapsulation = 1;
-               }
                skb->network_header = (u8 *)ipv6h - skb->head;
 
-               if (!tunnel && proto == IPPROTO_UDP) {
+               if (udpfrag) {
                        unfrag_ip6hlen = ip6_find_1stfragopt(skb, &prevhdr);
                        fptr = (struct frag_hdr *)((u8 *)ipv6h + unfrag_ip6hlen);
                        fptr->frag_off = htons(offset);
@@ -148,6 +150,8 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
                        offset += (ntohs(ipv6h->payload_len) -
                                   sizeof(struct frag_hdr));
                }
+               if (encap)
+                       skb_reset_inner_headers(skb);
        }
 
 out:
index 070a2fae2375cd5f6c4ad8109887dc6236ee5b08..16f91a2e788819b6b1755ef65a98c8cb699e0597 100644 (file)
@@ -530,9 +530,6 @@ static void ip6_copy_metadata(struct sk_buff *to, struct sk_buff *from)
        to->tc_index = from->tc_index;
 #endif
        nf_copy(to, from);
-#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
-       to->nf_trace = from->nf_trace;
-#endif
        skb_copy_secmark(to, from);
 }
 
index fb9beb78f00b23fc307384bf6aef21adf2ab6895..587bbdcb22b4c04c0186932d463bce29ca32b38e 100644 (file)
@@ -135,6 +135,7 @@ int ping_v6_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
        fl6.flowi6_proto = IPPROTO_ICMPV6;
        fl6.saddr = np->saddr;
        fl6.daddr = *daddr;
+       fl6.flowi6_mark = sk->sk_mark;
        fl6.fl6_icmp_type = user_icmph.icmp6_type;
        fl6.fl6_icmp_code = user_icmph.icmp6_code;
        security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
index 3dfbcf1dcb1cbdb38a2a0b17a328bb424b139eb0..b4d74c86586cd1a0256afb59870e32ebbebd56e8 100644 (file)
@@ -475,6 +475,7 @@ static void ipip6_tunnel_uninit(struct net_device *dev)
                ipip6_tunnel_unlink(sitn, tunnel);
                ipip6_tunnel_del_prl(tunnel, NULL);
        }
+       ip_tunnel_dst_reset_all(tunnel);
        dev_put(dev);
 }
 
@@ -1082,6 +1083,7 @@ static void ipip6_tunnel_update(struct ip_tunnel *t, struct ip_tunnel_parm *p)
                t->parms.link = p->link;
                ipip6_tunnel_bind_dev(t->dev);
        }
+       ip_tunnel_dst_reset_all(t);
        netdev_state_change(t->dev);
 }
 
@@ -1112,6 +1114,7 @@ static int ipip6_tunnel_update_6rd(struct ip_tunnel *t,
        t->ip6rd.relay_prefix = relay_prefix;
        t->ip6rd.prefixlen = ip6rd->prefixlen;
        t->ip6rd.relay_prefixlen = ip6rd->relay_prefixlen;
+       ip_tunnel_dst_reset_all(t);
        netdev_state_change(t->dev);
        return 0;
 }
@@ -1271,6 +1274,7 @@ ipip6_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
                        err = ipip6_tunnel_add_prl(t, &prl, cmd == SIOCCHGPRL);
                        break;
                }
+               ip_tunnel_dst_reset_all(t);
                netdev_state_change(dev);
                break;
 
@@ -1326,6 +1330,9 @@ static const struct net_device_ops ipip6_netdev_ops = {
 
 static void ipip6_dev_free(struct net_device *dev)
 {
+       struct ip_tunnel *tunnel = netdev_priv(dev);
+
+       free_percpu(tunnel->dst_cache);
        free_percpu(dev->tstats);
        free_netdev(dev);
 }
@@ -1375,6 +1382,12 @@ static int ipip6_tunnel_init(struct net_device *dev)
                u64_stats_init(&ipip6_tunnel_stats->syncp);
        }
 
+       tunnel->dst_cache = alloc_percpu(struct ip_tunnel_dst);
+       if (!tunnel->dst_cache) {
+               free_percpu(dev->tstats);
+               return -ENOMEM;
+       }
+
        return 0;
 }
 
@@ -1405,6 +1418,12 @@ static int __net_init ipip6_fb_tunnel_init(struct net_device *dev)
                u64_stats_init(&ipip6_fb_stats->syncp);
        }
 
+       tunnel->dst_cache = alloc_percpu(struct ip_tunnel_dst);
+       if (!tunnel->dst_cache) {
+               free_percpu(dev->tstats);
+               return -ENOMEM;
+       }
+
        dev_hold(dev);
        rcu_assign_pointer(sitn->tunnels_wc[0], tunnel);
        return 0;
index e7359f9eaa8d4dd14b706afc9c7241c85e52d056..b261ee8b83fc87ddabcb447d1235ea8b67429de2 100644 (file)
@@ -113,7 +113,7 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
                fptr = (struct frag_hdr *)(skb_network_header(skb) + unfrag_ip6hlen);
                fptr->nexthdr = nexthdr;
                fptr->reserved = 0;
-               ipv6_select_ident(fptr, (struct rt6_info *)skb_dst(skb));
+               fptr->identification = skb_shinfo(skb)->ip6_frag_id;
 
                /* Fragment the skb. ipv6 header and the remaining fields of the
                 * fragment header are updated in ipv6_gso_segment()
index 3701930c66493af53461a4f0c1849ad26450e092..5e44e3179e02aabaea7b6d455ec5ed656e3691b0 100644 (file)
@@ -1692,14 +1692,8 @@ void ieee80211_stop_queue_by_reason(struct ieee80211_hw *hw, int queue,
 void ieee80211_propagate_queue_wake(struct ieee80211_local *local, int queue);
 void ieee80211_add_pending_skb(struct ieee80211_local *local,
                               struct sk_buff *skb);
-void ieee80211_add_pending_skbs_fn(struct ieee80211_local *local,
-                                  struct sk_buff_head *skbs,
-                                  void (*fn)(void *data), void *data);
-static inline void ieee80211_add_pending_skbs(struct ieee80211_local *local,
-                                             struct sk_buff_head *skbs)
-{
-       ieee80211_add_pending_skbs_fn(local, skbs, NULL, NULL);
-}
+void ieee80211_add_pending_skbs(struct ieee80211_local *local,
+                               struct sk_buff_head *skbs);
 void ieee80211_flush_queues(struct ieee80211_local *local,
                            struct ieee80211_sub_if_data *sdata);
 
index fc1d82465b3ce1b1cdcc9edb4f5157618b70e8ce..245dce969b31165078c04fca9a5fc963457e9d68 100644 (file)
@@ -222,6 +222,7 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
        switch (vht_oper->chan_width) {
        case IEEE80211_VHT_CHANWIDTH_USE_HT:
                vht_chandef.width = chandef->width;
+               vht_chandef.center_freq1 = chandef->center_freq1;
                break;
        case IEEE80211_VHT_CHANWIDTH_80MHZ:
                vht_chandef.width = NL80211_CHAN_WIDTH_80;
@@ -271,6 +272,28 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
        ret = 0;
 
 out:
+       /*
+        * When tracking the current AP, don't do any further checks if the
+        * new chandef is identical to the one we're currently using for the
+        * connection. This keeps us from playing ping-pong with regulatory,
+        * without it the following can happen (for example):
+        *  - connect to an AP with 80 MHz, world regdom allows 80 MHz
+        *  - AP advertises regdom US
+        *  - CRDA loads regdom US with 80 MHz prohibited (old database)
+        *  - the code below detects an unsupported channel, downgrades, and
+        *    we disconnect from the AP in the caller
+        *  - disconnect causes CRDA to reload world regdomain and the game
+        *    starts anew.
+        * (see https://bugzilla.kernel.org/show_bug.cgi?id=70881)
+        *
+        * It seems possible that there are still scenarios with CSA or real
+        * bandwidth changes where a this could happen, but those cases are
+        * less common and wouldn't completely prevent using the AP.
+        */
+       if (tracking &&
+           cfg80211_chandef_identical(chandef, &sdata->vif.bss_conf.chandef))
+               return ret;
+
        /* don't print the message below for VHT mismatch if VHT is disabled */
        if (ret & IEEE80211_STA_DISABLE_VHT)
                vht_chandef = *chandef;
@@ -3753,6 +3776,7 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
                chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
                if (WARN_ON(!chanctx_conf)) {
                        rcu_read_unlock();
+                       sta_info_free(local, new_sta);
                        return -EINVAL;
                }
                rate_flags = ieee80211_chandef_rate_flags(&chanctx_conf->def);
index c24ca0d0f4697ea7040c813dbf24d1c32f707329..3e57f96c9666daf4b420cfd663864878bef34204 100644 (file)
@@ -1128,6 +1128,13 @@ static void sta_ps_end(struct sta_info *sta)
               sta->sta.addr, sta->sta.aid);
 
        if (test_sta_flag(sta, WLAN_STA_PS_DRIVER)) {
+               /*
+                * Clear the flag only if the other one is still set
+                * so that the TX path won't start TX'ing new frames
+                * directly ... In the case that the driver flag isn't
+                * set ieee80211_sta_ps_deliver_wakeup() will clear it.
+                */
+               clear_sta_flag(sta, WLAN_STA_PS_STA);
                ps_dbg(sta->sdata, "STA %pM aid %d driver-ps-blocked\n",
                       sta->sta.addr, sta->sta.aid);
                return;
index decd30c1e29053d9a5e3ea56c7ccb66a7407578d..a023b432143b6f4b9db102b7a86c8ca417e1b3d2 100644 (file)
@@ -91,7 +91,7 @@ static int sta_info_hash_del(struct ieee80211_local *local,
        return -ENOENT;
 }
 
-static void cleanup_single_sta(struct sta_info *sta)
+static void __cleanup_single_sta(struct sta_info *sta)
 {
        int ac, i;
        struct tid_ampdu_tx *tid_tx;
@@ -99,7 +99,8 @@ static void cleanup_single_sta(struct sta_info *sta)
        struct ieee80211_local *local = sdata->local;
        struct ps_data *ps;
 
-       if (test_sta_flag(sta, WLAN_STA_PS_STA)) {
+       if (test_sta_flag(sta, WLAN_STA_PS_STA) ||
+           test_sta_flag(sta, WLAN_STA_PS_DRIVER)) {
                if (sta->sdata->vif.type == NL80211_IFTYPE_AP ||
                    sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
                        ps = &sdata->bss->ps;
@@ -109,6 +110,7 @@ static void cleanup_single_sta(struct sta_info *sta)
                        return;
 
                clear_sta_flag(sta, WLAN_STA_PS_STA);
+               clear_sta_flag(sta, WLAN_STA_PS_DRIVER);
 
                atomic_dec(&ps->num_sta_ps);
                sta_info_recalc_tim(sta);
@@ -139,7 +141,14 @@ static void cleanup_single_sta(struct sta_info *sta)
                ieee80211_purge_tx_queue(&local->hw, &tid_tx->pending);
                kfree(tid_tx);
        }
+}
 
+static void cleanup_single_sta(struct sta_info *sta)
+{
+       struct ieee80211_sub_if_data *sdata = sta->sdata;
+       struct ieee80211_local *local = sdata->local;
+
+       __cleanup_single_sta(sta);
        sta_info_free(local, sta);
 }
 
@@ -330,6 +339,7 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
        rcu_read_unlock();
 
        spin_lock_init(&sta->lock);
+       spin_lock_init(&sta->ps_lock);
        INIT_WORK(&sta->drv_unblock_wk, sta_unblock);
        INIT_WORK(&sta->ampdu_mlme.work, ieee80211_ba_session_work);
        mutex_init(&sta->ampdu_mlme.mtx);
@@ -487,21 +497,26 @@ static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU)
                goto out_err;
        }
 
-       /* notify driver */
-       err = sta_info_insert_drv_state(local, sdata, sta);
-       if (err)
-               goto out_err;
-
        local->num_sta++;
        local->sta_generation++;
        smp_mb();
 
+       /* simplify things and don't accept BA sessions yet */
+       set_sta_flag(sta, WLAN_STA_BLOCK_BA);
+
        /* make the station visible */
        sta_info_hash_add(local, sta);
 
        list_add_rcu(&sta->list, &local->sta_list);
 
+       /* notify driver */
+       err = sta_info_insert_drv_state(local, sdata, sta);
+       if (err)
+               goto out_remove;
+
        set_sta_flag(sta, WLAN_STA_INSERTED);
+       /* accept BA sessions now */
+       clear_sta_flag(sta, WLAN_STA_BLOCK_BA);
 
        ieee80211_recalc_min_chandef(sdata);
        ieee80211_sta_debugfs_add(sta);
@@ -522,6 +537,12 @@ static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU)
                mesh_accept_plinks_update(sdata);
 
        return 0;
+ out_remove:
+       sta_info_hash_del(local, sta);
+       list_del_rcu(&sta->list);
+       local->num_sta--;
+       synchronize_net();
+       __cleanup_single_sta(sta);
  out_err:
        mutex_unlock(&local->sta_mtx);
        rcu_read_lock();
@@ -1071,10 +1092,14 @@ struct ieee80211_sta *ieee80211_find_sta(struct ieee80211_vif *vif,
 }
 EXPORT_SYMBOL(ieee80211_find_sta);
 
-static void clear_sta_ps_flags(void *_sta)
+/* powersave support code */
+void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
 {
-       struct sta_info *sta = _sta;
        struct ieee80211_sub_if_data *sdata = sta->sdata;
+       struct ieee80211_local *local = sdata->local;
+       struct sk_buff_head pending;
+       int filtered = 0, buffered = 0, ac;
+       unsigned long flags;
        struct ps_data *ps;
 
        if (sdata->vif.type == NL80211_IFTYPE_AP ||
@@ -1085,20 +1110,6 @@ static void clear_sta_ps_flags(void *_sta)
        else
                return;
 
-       clear_sta_flag(sta, WLAN_STA_PS_DRIVER);
-       if (test_and_clear_sta_flag(sta, WLAN_STA_PS_STA))
-               atomic_dec(&ps->num_sta_ps);
-}
-
-/* powersave support code */
-void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
-{
-       struct ieee80211_sub_if_data *sdata = sta->sdata;
-       struct ieee80211_local *local = sdata->local;
-       struct sk_buff_head pending;
-       int filtered = 0, buffered = 0, ac;
-       unsigned long flags;
-
        clear_sta_flag(sta, WLAN_STA_SP);
 
        BUILD_BUG_ON(BITS_TO_LONGS(IEEE80211_NUM_TIDS) > 1);
@@ -1109,6 +1120,8 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
 
        skb_queue_head_init(&pending);
 
+       /* sync with ieee80211_tx_h_unicast_ps_buf */
+       spin_lock(&sta->ps_lock);
        /* Send all buffered frames to the station */
        for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
                int count = skb_queue_len(&pending), tmp;
@@ -1127,7 +1140,12 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
                buffered += tmp - count;
        }
 
-       ieee80211_add_pending_skbs_fn(local, &pending, clear_sta_ps_flags, sta);
+       ieee80211_add_pending_skbs(local, &pending);
+       clear_sta_flag(sta, WLAN_STA_PS_DRIVER);
+       clear_sta_flag(sta, WLAN_STA_PS_STA);
+       spin_unlock(&sta->ps_lock);
+
+       atomic_dec(&ps->num_sta_ps);
 
        /* This station just woke up and isn't aware of our SMPS state */
        if (!ieee80211_smps_is_restrictive(sta->known_smps_mode,
index d77ff70906303855458734a5bfac0526460056eb..d3a6d8208f2f85f7db331238f41c0da2938f0f8d 100644 (file)
@@ -267,6 +267,7 @@ struct ieee80211_tx_latency_stat {
  * @drv_unblock_wk: used for driver PS unblocking
  * @listen_interval: listen interval of this station, when we're acting as AP
  * @_flags: STA flags, see &enum ieee80211_sta_info_flags, do not use directly
+ * @ps_lock: used for powersave (when mac80211 is the AP) related locking
  * @ps_tx_buf: buffers (per AC) of frames to transmit to this station
  *     when it leaves power saving state or polls
  * @tx_filtered: buffers (per AC) of frames we already tried to
@@ -356,10 +357,8 @@ struct sta_info {
        /* use the accessors defined below */
        unsigned long _flags;
 
-       /*
-        * STA powersave frame queues, no more than the internal
-        * locking required.
-        */
+       /* STA powersave lock and frame queues */
+       spinlock_t ps_lock;
        struct sk_buff_head ps_tx_buf[IEEE80211_NUM_ACS];
        struct sk_buff_head tx_filtered[IEEE80211_NUM_ACS];
        unsigned long driver_buffered_tids;
index 97a02d3f7d87720795e1518d27fce0bbaed9bc4f..4080c615636fabf3d430ecd898d4349ccd213464 100644 (file)
@@ -478,6 +478,20 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
                       sta->sta.addr, sta->sta.aid, ac);
                if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER)
                        purge_old_ps_buffers(tx->local);
+
+               /* sync with ieee80211_sta_ps_deliver_wakeup */
+               spin_lock(&sta->ps_lock);
+               /*
+                * STA woke up the meantime and all the frames on ps_tx_buf have
+                * been queued to pending queue. No reordering can happen, go
+                * ahead and Tx the packet.
+                */
+               if (!test_sta_flag(sta, WLAN_STA_PS_STA) &&
+                   !test_sta_flag(sta, WLAN_STA_PS_DRIVER)) {
+                       spin_unlock(&sta->ps_lock);
+                       return TX_CONTINUE;
+               }
+
                if (skb_queue_len(&sta->ps_tx_buf[ac]) >= STA_MAX_TX_BUFFER) {
                        struct sk_buff *old = skb_dequeue(&sta->ps_tx_buf[ac]);
                        ps_dbg(tx->sdata,
@@ -492,6 +506,7 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
                info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
                info->flags &= ~IEEE80211_TX_TEMPORARY_FLAGS;
                skb_queue_tail(&sta->ps_tx_buf[ac], tx->skb);
+               spin_unlock(&sta->ps_lock);
 
                if (!timer_pending(&local->sta_cleanup))
                        mod_timer(&local->sta_cleanup,
index 676dc0967f377f1251a761bf2dff0c35c8e92346..b8700d417a9cf26735a581fe25eb2619cf4a37da 100644 (file)
@@ -435,9 +435,8 @@ void ieee80211_add_pending_skb(struct ieee80211_local *local,
        spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
 }
 
-void ieee80211_add_pending_skbs_fn(struct ieee80211_local *local,
-                                  struct sk_buff_head *skbs,
-                                  void (*fn)(void *data), void *data)
+void ieee80211_add_pending_skbs(struct ieee80211_local *local,
+                               struct sk_buff_head *skbs)
 {
        struct ieee80211_hw *hw = &local->hw;
        struct sk_buff *skb;
@@ -461,9 +460,6 @@ void ieee80211_add_pending_skbs_fn(struct ieee80211_local *local,
                __skb_queue_tail(&local->pending[queue], skb);
        }
 
-       if (fn)
-               fn(data);
-
        for (i = 0; i < hw->queues; i++)
                __ieee80211_wake_queue(hw, i,
                        IEEE80211_QUEUE_STOP_REASON_SKB_ADD);
@@ -1740,6 +1736,26 @@ int ieee80211_reconfig(struct ieee80211_local *local)
        ieee80211_wake_queues_by_reason(hw, IEEE80211_MAX_QUEUE_MAP,
                                        IEEE80211_QUEUE_STOP_REASON_SUSPEND);
 
+       /*
+        * Reconfigure sched scan if it was interrupted by FW restart or
+        * suspend.
+        */
+       mutex_lock(&local->mtx);
+       sched_scan_sdata = rcu_dereference_protected(local->sched_scan_sdata,
+                                               lockdep_is_held(&local->mtx));
+       if (sched_scan_sdata && local->sched_scan_req)
+               /*
+                * Sched scan stopped, but we don't want to report it. Instead,
+                * we're trying to reschedule.
+                */
+               if (__ieee80211_request_sched_scan_start(sched_scan_sdata,
+                                                        local->sched_scan_req))
+                       sched_scan_stopped = true;
+       mutex_unlock(&local->mtx);
+
+       if (sched_scan_stopped)
+               cfg80211_sched_scan_stopped(local->hw.wiphy);
+
        /*
         * If this is for hw restart things are still running.
         * We may want to change that later, however.
@@ -1768,26 +1784,6 @@ int ieee80211_reconfig(struct ieee80211_local *local)
        WARN_ON(1);
 #endif
 
-       /*
-        * Reconfigure sched scan if it was interrupted by FW restart or
-        * suspend.
-        */
-       mutex_lock(&local->mtx);
-       sched_scan_sdata = rcu_dereference_protected(local->sched_scan_sdata,
-                                               lockdep_is_held(&local->mtx));
-       if (sched_scan_sdata && local->sched_scan_req)
-               /*
-                * Sched scan stopped, but we don't want to report it. Instead,
-                * we're trying to reschedule.
-                */
-               if (__ieee80211_request_sched_scan_start(sched_scan_sdata,
-                                                        local->sched_scan_req))
-                       sched_scan_stopped = true;
-       mutex_unlock(&local->mtx);
-
-       if (sched_scan_stopped)
-               cfg80211_sched_scan_stopped(local->hw.wiphy);
-
        return 0;
 }
 
index 21211c60ca988992034bfc330977e846c3fc7010..d51422c778dee359bec450c75b77bfb24ca712f4 100644 (file)
@@ -154,6 +154,11 @@ u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata,
                return IEEE80211_AC_BE;
        }
 
+       if (skb->protocol == sdata->control_port_protocol) {
+               skb->priority = 7;
+               return ieee80211_downgrade_queue(sdata, skb);
+       }
+
        /* use the data classifier to determine what 802.1d tag the
         * data frame has */
        rcu_read_lock();
index bb322d0beb484f3c66a334a5e0b24651f2a1ffca..b9f0e03743228ec852eee97eb6bb9ef1e0ab73e9 100644 (file)
@@ -1310,27 +1310,22 @@ ctnetlink_change_status(struct nf_conn *ct, const struct nlattr * const cda[])
 }
 
 static int
-ctnetlink_change_nat(struct nf_conn *ct, const struct nlattr * const cda[])
+ctnetlink_setup_nat(struct nf_conn *ct, const struct nlattr * const cda[])
 {
 #ifdef CONFIG_NF_NAT_NEEDED
        int ret;
 
-       if (cda[CTA_NAT_DST]) {
-               ret = ctnetlink_parse_nat_setup(ct,
-                                               NF_NAT_MANIP_DST,
-                                               cda[CTA_NAT_DST]);
-               if (ret < 0)
-                       return ret;
-       }
-       if (cda[CTA_NAT_SRC]) {
-               ret = ctnetlink_parse_nat_setup(ct,
-                                               NF_NAT_MANIP_SRC,
-                                               cda[CTA_NAT_SRC]);
-               if (ret < 0)
-                       return ret;
-       }
-       return 0;
+       ret = ctnetlink_parse_nat_setup(ct, NF_NAT_MANIP_DST,
+                                       cda[CTA_NAT_DST]);
+       if (ret < 0)
+               return ret;
+
+       ret = ctnetlink_parse_nat_setup(ct, NF_NAT_MANIP_SRC,
+                                       cda[CTA_NAT_SRC]);
+       return ret;
 #else
+       if (!cda[CTA_NAT_DST] && !cda[CTA_NAT_SRC])
+               return 0;
        return -EOPNOTSUPP;
 #endif
 }
@@ -1659,11 +1654,9 @@ ctnetlink_create_conntrack(struct net *net, u16 zone,
                        goto err2;
        }
 
-       if (cda[CTA_NAT_SRC] || cda[CTA_NAT_DST]) {
-               err = ctnetlink_change_nat(ct, cda);
-               if (err < 0)
-                       goto err2;
-       }
+       err = ctnetlink_setup_nat(ct, cda);
+       if (err < 0)
+               goto err2;
 
        nf_ct_acct_ext_add(ct, GFP_ATOMIC);
        nf_ct_tstamp_ext_add(ct, GFP_ATOMIC);
index d3f5cd6dd962b195ea85775cd0432cf204e697b2..52ca952b802c5e3ea41c83823b90f74365b8fc76 100644 (file)
@@ -432,15 +432,15 @@ nf_nat_setup_info(struct nf_conn *ct,
 }
 EXPORT_SYMBOL(nf_nat_setup_info);
 
-unsigned int
-nf_nat_alloc_null_binding(struct nf_conn *ct, unsigned int hooknum)
+static unsigned int
+__nf_nat_alloc_null_binding(struct nf_conn *ct, enum nf_nat_manip_type manip)
 {
        /* Force range to this IP; let proto decide mapping for
         * per-proto parts (hence not IP_NAT_RANGE_PROTO_SPECIFIED).
         * Use reply in case it's already been mangled (eg local packet).
         */
        union nf_inet_addr ip =
-               (HOOK2MANIP(hooknum) == NF_NAT_MANIP_SRC ?
+               (manip == NF_NAT_MANIP_SRC ?
                ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3 :
                ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3);
        struct nf_nat_range range = {
@@ -448,7 +448,13 @@ nf_nat_alloc_null_binding(struct nf_conn *ct, unsigned int hooknum)
                .min_addr       = ip,
                .max_addr       = ip,
        };
-       return nf_nat_setup_info(ct, &range, HOOK2MANIP(hooknum));
+       return nf_nat_setup_info(ct, &range, manip);
+}
+
+unsigned int
+nf_nat_alloc_null_binding(struct nf_conn *ct, unsigned int hooknum)
+{
+       return __nf_nat_alloc_null_binding(ct, HOOK2MANIP(hooknum));
 }
 EXPORT_SYMBOL_GPL(nf_nat_alloc_null_binding);
 
@@ -702,9 +708,9 @@ static const struct nla_policy nat_nla_policy[CTA_NAT_MAX+1] = {
 
 static int
 nfnetlink_parse_nat(const struct nlattr *nat,
-                   const struct nf_conn *ct, struct nf_nat_range *range)
+                   const struct nf_conn *ct, struct nf_nat_range *range,
+                   const struct nf_nat_l3proto *l3proto)
 {
-       const struct nf_nat_l3proto *l3proto;
        struct nlattr *tb[CTA_NAT_MAX+1];
        int err;
 
@@ -714,38 +720,46 @@ nfnetlink_parse_nat(const struct nlattr *nat,
        if (err < 0)
                return err;
 
-       rcu_read_lock();
-       l3proto = __nf_nat_l3proto_find(nf_ct_l3num(ct));
-       if (l3proto == NULL) {
-               err = -EAGAIN;
-               goto out;
-       }
        err = l3proto->nlattr_to_range(tb, range);
        if (err < 0)
-               goto out;
+               return err;
 
        if (!tb[CTA_NAT_PROTO])
-               goto out;
+               return 0;
 
-       err = nfnetlink_parse_nat_proto(tb[CTA_NAT_PROTO], ct, range);
-out:
-       rcu_read_unlock();
-       return err;
+       return nfnetlink_parse_nat_proto(tb[CTA_NAT_PROTO], ct, range);
 }
 
+/* This function is called under rcu_read_lock() */
 static int
 nfnetlink_parse_nat_setup(struct nf_conn *ct,
                          enum nf_nat_manip_type manip,
                          const struct nlattr *attr)
 {
        struct nf_nat_range range;
+       const struct nf_nat_l3proto *l3proto;
        int err;
 
-       err = nfnetlink_parse_nat(attr, ct, &range);
+       /* Should not happen, restricted to creating new conntracks
+        * via ctnetlink.
+        */
+       if (WARN_ON_ONCE(nf_nat_initialized(ct, manip)))
+               return -EEXIST;
+
+       /* Make sure that L3 NAT is there by when we call nf_nat_setup_info to
+        * attach the null binding, otherwise this may oops.
+        */
+       l3proto = __nf_nat_l3proto_find(nf_ct_l3num(ct));
+       if (l3proto == NULL)
+               return -EAGAIN;
+
+       /* No NAT information has been passed, allocate the null-binding */
+       if (attr == NULL)
+               return __nf_nat_alloc_null_binding(ct, manip);
+
+       err = nfnetlink_parse_nat(attr, ct, &range, l3proto);
        if (err < 0)
                return err;
-       if (nf_nat_initialized(ct, manip))
-               return -EEXIST;
 
        return nf_nat_setup_info(ct, &range, manip);
 }
index e8254ad2e5a9f37e84694293c8ba4069b5e5f35e..425cf39af8907f1d0618b0904121073ea4dc116a 100644 (file)
@@ -116,7 +116,7 @@ static void nft_meta_get_eval(const struct nft_expr *expr,
                                 skb->sk->sk_socket->file->f_cred->fsgid);
                read_unlock_bh(&skb->sk->sk_callback_lock);
                break;
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
        case NFT_META_RTCLASSID: {
                const struct dst_entry *dst = skb_dst(skb);
 
@@ -199,7 +199,7 @@ static int nft_meta_init_validate_get(uint32_t key)
        case NFT_META_OIFTYPE:
        case NFT_META_SKUID:
        case NFT_META_SKGID:
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
        case NFT_META_RTCLASSID:
 #endif
 #ifdef CONFIG_NETWORK_SECMARK
index a2aeb318678f9e0e577801f6e42bda4df66ea805..85daa84bfdfee3d65296fb62c723a78e180c4497 100644 (file)
@@ -135,7 +135,8 @@ nft_payload_select_ops(const struct nft_ctx *ctx,
        if (len == 0 || len > FIELD_SIZEOF(struct nft_data, data))
                return ERR_PTR(-EINVAL);
 
-       if (len <= 4 && IS_ALIGNED(offset, len) && base != NFT_PAYLOAD_LL_HEADER)
+       if (len <= 4 && is_power_of_2(len) && IS_ALIGNED(offset, len) &&
+           base != NFT_PAYLOAD_LL_HEADER)
                return &nft_payload_fast_ops;
        else
                return &nft_payload_ops;
index 8a310f239c93ed769f0eeadc1ce30c97fcd63de8..b718a52a46544036ba81a4105da149d8261d30c3 100644 (file)
@@ -21,9 +21,9 @@ static void nft_reject_inet_eval(const struct nft_expr *expr,
 {
        switch (pkt->ops->pf) {
        case NFPROTO_IPV4:
-               nft_reject_ipv4_eval(expr, data, pkt);
+               return nft_reject_ipv4_eval(expr, data, pkt);
        case NFPROTO_IPV6:
-               nft_reject_ipv6_eval(expr, data, pkt);
+               return nft_reject_ipv6_eval(expr, data, pkt);
        }
 }
 
index fdf51353cf78ac80958cadfa0e58e159970aa8bf..04748ab649c25bbb2d2d7cee6fbc192fb267dfbc 100644 (file)
@@ -1489,8 +1489,8 @@ static int netlink_connect(struct socket *sock, struct sockaddr *addr,
        if (addr->sa_family != AF_NETLINK)
                return -EINVAL;
 
-       /* Only superuser is allowed to send multicasts */
-       if (nladdr->nl_groups && !netlink_capable(sock, NL_CFG_F_NONROOT_SEND))
+       if ((nladdr->nl_groups || nladdr->nl_pid) &&
+           !netlink_capable(sock, NL_CFG_F_NONROOT_SEND))
                return -EPERM;
 
        if (!nlk->portid)
index 46bda010bf11740c1a2fcd9002a193a7ab70801a..56db888b1cd56785e23f0e0272d4ae3af87e32e6 100644 (file)
@@ -301,7 +301,7 @@ static int nci_open_device(struct nci_dev *ndev)
        rc = __nci_request(ndev, nci_reset_req, 0,
                           msecs_to_jiffies(NCI_RESET_TIMEOUT));
 
-       if (ndev->ops->setup(ndev))
+       if (ndev->ops->setup)
                ndev->ops->setup(ndev);
 
        if (!rc) {
index 1cb413fead89522a64931a4e2472b39c4d6a720d..4f505a006896578ebdac1392af1dc064500665c9 100644 (file)
@@ -334,18 +334,6 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt)
                        qdisc_put_rtab(qdisc_get_rtab(&qopt->peakrate,
                                                      tb[TCA_TBF_PTAB]));
 
-       if (q->qdisc != &noop_qdisc) {
-               err = fifo_set_limit(q->qdisc, qopt->limit);
-               if (err)
-                       goto done;
-       } else if (qopt->limit > 0) {
-               child = fifo_create_dflt(sch, &bfifo_qdisc_ops, qopt->limit);
-               if (IS_ERR(child)) {
-                       err = PTR_ERR(child);
-                       goto done;
-               }
-       }
-
        buffer = min_t(u64, PSCHED_TICKS2NS(qopt->buffer), ~0U);
        mtu = min_t(u64, PSCHED_TICKS2NS(qopt->mtu), ~0U);
 
@@ -390,6 +378,18 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt)
                goto done;
        }
 
+       if (q->qdisc != &noop_qdisc) {
+               err = fifo_set_limit(q->qdisc, qopt->limit);
+               if (err)
+                       goto done;
+       } else if (qopt->limit > 0) {
+               child = fifo_create_dflt(sch, &bfifo_qdisc_ops, qopt->limit);
+               if (IS_ERR(child)) {
+                       err = PTR_ERR(child);
+                       goto done;
+               }
+       }
+
        sch_tree_lock(sch);
        if (child) {
                qdisc_tree_decrease_qlen(q->qdisc, q->qdisc->q.qlen);
index f558433537b8f829a3f6147256abb7c5d23e4b47..ee13d28d39d10702096f733ee8d88aa862459952 100644 (file)
@@ -1239,78 +1239,107 @@ void sctp_assoc_update(struct sctp_association *asoc,
 }
 
 /* Update the retran path for sending a retransmitted packet.
- * Round-robin through the active transports, else round-robin
- * through the inactive transports as this is the next best thing
- * we can try.
+ * See also RFC4960, 6.4. Multi-Homed SCTP Endpoints:
+ *
+ *   When there is outbound data to send and the primary path
+ *   becomes inactive (e.g., due to failures), or where the
+ *   SCTP user explicitly requests to send data to an
+ *   inactive destination transport address, before reporting
+ *   an error to its ULP, the SCTP endpoint should try to send
+ *   the data to an alternate active destination transport
+ *   address if one exists.
+ *
+ *   When retransmitting data that timed out, if the endpoint
+ *   is multihomed, it should consider each source-destination
+ *   address pair in its retransmission selection policy.
+ *   When retransmitting timed-out data, the endpoint should
+ *   attempt to pick the most divergent source-destination
+ *   pair from the original source-destination pair to which
+ *   the packet was transmitted.
+ *
+ *   Note: Rules for picking the most divergent source-destination
+ *   pair are an implementation decision and are not specified
+ *   within this document.
+ *
+ * Our basic strategy is to round-robin transports in priorities
+ * according to sctp_state_prio_map[] e.g., if no such
+ * transport with state SCTP_ACTIVE exists, round-robin through
+ * SCTP_UNKNOWN, etc. You get the picture.
  */
-void sctp_assoc_update_retran_path(struct sctp_association *asoc)
+static const u8 sctp_trans_state_to_prio_map[] = {
+       [SCTP_ACTIVE]   = 3,    /* best case */
+       [SCTP_UNKNOWN]  = 2,
+       [SCTP_PF]       = 1,
+       [SCTP_INACTIVE] = 0,    /* worst case */
+};
+
+static u8 sctp_trans_score(const struct sctp_transport *trans)
 {
-       struct sctp_transport *t, *next;
-       struct list_head *head = &asoc->peer.transport_addr_list;
-       struct list_head *pos;
+       return sctp_trans_state_to_prio_map[trans->state];
+}
 
-       if (asoc->peer.transport_count == 1)
-               return;
+static struct sctp_transport *sctp_trans_elect_best(struct sctp_transport *curr,
+                                                   struct sctp_transport *best)
+{
+       if (best == NULL)
+               return curr;
 
-       /* Find the next transport in a round-robin fashion. */
-       t = asoc->peer.retran_path;
-       pos = &t->transports;
-       next = NULL;
+       return sctp_trans_score(curr) > sctp_trans_score(best) ? curr : best;
+}
 
-       while (1) {
-               /* Skip the head. */
-               if (pos->next == head)
-                       pos = head->next;
-               else
-                       pos = pos->next;
+void sctp_assoc_update_retran_path(struct sctp_association *asoc)
+{
+       struct sctp_transport *trans = asoc->peer.retran_path;
+       struct sctp_transport *trans_next = NULL;
 
-               t = list_entry(pos, struct sctp_transport, transports);
+       /* We're done as we only have the one and only path. */
+       if (asoc->peer.transport_count == 1)
+               return;
+       /* If active_path and retran_path are the same and active,
+        * then this is the only active path. Use it.
+        */
+       if (asoc->peer.active_path == asoc->peer.retran_path &&
+           asoc->peer.active_path->state == SCTP_ACTIVE)
+               return;
 
-               /* We have exhausted the list, but didn't find any
-                * other active transports.  If so, use the next
-                * transport.
-                */
-               if (t == asoc->peer.retran_path) {
-                       t = next;
+       /* Iterate from retran_path's successor back to retran_path. */
+       for (trans = list_next_entry(trans, transports); 1;
+            trans = list_next_entry(trans, transports)) {
+               /* Manually skip the head element. */
+               if (&trans->transports == &asoc->peer.transport_addr_list)
+                       continue;
+               if (trans->state == SCTP_UNCONFIRMED)
+                       continue;
+               trans_next = sctp_trans_elect_best(trans, trans_next);
+               /* Active is good enough for immediate return. */
+               if (trans_next->state == SCTP_ACTIVE)
                        break;
-               }
-
-               /* Try to find an active transport. */
-
-               if ((t->state == SCTP_ACTIVE) ||
-                   (t->state == SCTP_UNKNOWN)) {
+               /* We've reached the end, time to update path. */
+               if (trans == asoc->peer.retran_path)
                        break;
-               } else {
-                       /* Keep track of the next transport in case
-                        * we don't find any active transport.
-                        */
-                       if (t->state != SCTP_UNCONFIRMED && !next)
-                               next = t;
-               }
        }
 
-       if (t)
-               asoc->peer.retran_path = t;
-       else
-               t = asoc->peer.retran_path;
+       if (trans_next != NULL)
+               asoc->peer.retran_path = trans_next;
 
-       pr_debug("%s: association:%p addr:%pISpc\n", __func__, asoc,
-                &t->ipaddr.sa);
+       pr_debug("%s: association:%p updated new path to addr:%pISpc\n",
+                __func__, asoc, &asoc->peer.retran_path->ipaddr.sa);
 }
 
-/* Choose the transport for sending retransmit packet.  */
-struct sctp_transport *sctp_assoc_choose_alter_transport(
-       struct sctp_association *asoc, struct sctp_transport *last_sent_to)
+struct sctp_transport *
+sctp_assoc_choose_alter_transport(struct sctp_association *asoc,
+                                 struct sctp_transport *last_sent_to)
 {
        /* If this is the first time packet is sent, use the active path,
         * else use the retran path. If the last packet was sent over the
         * retran path, update the retran path and use it.
         */
-       if (!last_sent_to)
+       if (last_sent_to == NULL) {
                return asoc->peer.active_path;
-       else {
+       else {
                if (last_sent_to == asoc->peer.retran_path)
                        sctp_assoc_update_retran_path(asoc);
+
                return asoc->peer.retran_path;
        }
 }
index bd859154000e47cccdae4f9f5e4353ba3cb83fd0..5d6883ff00c3b7056639f06254caffcb14349e27 100644 (file)
@@ -495,11 +495,12 @@ static void sctp_do_8_2_transport_strike(sctp_cmd_seq_t *commands,
        }
 
        /* If the transport error count is greater than the pf_retrans
-        * threshold, and less than pathmaxrtx, then mark this transport
-        * as Partially Failed, ee SCTP Quick Failover Draft, secon 5.1,
-        * point 1
+        * threshold, and less than pathmaxrtx, and if the current state
+        * is not SCTP_UNCONFIRMED, then mark this transport as Partially
+        * Failed, see SCTP Quick Failover Draft, section 5.1
         */
        if ((transport->state != SCTP_PF) &&
+          (transport->state != SCTP_UNCONFIRMED) &&
           (asoc->pf_retrans < transport->pathmaxrxt) &&
           (transport->error_count > asoc->pf_retrans)) {
 
index 591b44d3b7de6bb39faa994cddc72851422ec08f..ae65b6b5973a9bceca7825037317f9fc9f39e2db 100644 (file)
@@ -758,6 +758,13 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(struct net *net,
                struct sctp_chunk auth;
                sctp_ierror_t ret;
 
+               /* Make sure that we and the peer are AUTH capable */
+               if (!net->sctp.auth_enable || !new_asoc->peer.auth_capable) {
+                       kfree_skb(chunk->auth_chunk);
+                       sctp_association_free(new_asoc);
+                       return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+               }
+
                /* set-up our fake chunk so that we can process it */
                auth.skb = chunk->auth_chunk;
                auth.asoc = chunk->asoc;
index a38c89969c686128df1a556598b248a70eec665f..574b86193b15a8251dc85555cc03a8e5f9af1768 100644 (file)
@@ -610,8 +610,13 @@ static struct notifier_block notifier = {
 
 int tipc_bearer_setup(void)
 {
+       int err;
+
+       err = register_netdevice_notifier(&notifier);
+       if (err)
+               return err;
        dev_add_pack(&tipc_packet_type);
-       return register_netdevice_notifier(&notifier);
+       return 0;
 }
 
 void tipc_bearer_cleanup(void)
index c301a9a592d82d570050116df07e54a4551da537..e74eef2e749011188a3d420f0bd8201c3d58f5c6 100644 (file)
@@ -181,7 +181,7 @@ static struct sk_buff *cfg_set_own_addr(void)
        if (tipc_own_addr)
                return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
                                                   " (cannot change node address once assigned)");
-       tipc_core_start_net(addr);
+       tipc_net_start(addr);
        return tipc_cfg_reply_none();
 }
 
index f9e88d8b04ca182b2e8c217e579647fbf9581e4e..80c20647b3d29fd75d2ebb8c760c2aa75214c515 100644 (file)
@@ -76,38 +76,14 @@ struct sk_buff *tipc_buf_acquire(u32 size)
        return skb;
 }
 
-/**
- * tipc_core_stop_net - shut down TIPC networking sub-systems
- */
-static void tipc_core_stop_net(void)
-{
-       tipc_net_stop();
-       tipc_bearer_cleanup();
-}
-
-/**
- * start_net - start TIPC networking sub-systems
- */
-int tipc_core_start_net(unsigned long addr)
-{
-       int res;
-
-       tipc_net_start(addr);
-       res = tipc_bearer_setup();
-       if (res < 0)
-               goto err;
-       return res;
-
-err:
-       tipc_core_stop_net();
-       return res;
-}
-
 /**
  * tipc_core_stop - switch TIPC from SINGLE NODE to NOT RUNNING mode
  */
 static void tipc_core_stop(void)
 {
+       tipc_handler_stop();
+       tipc_net_stop();
+       tipc_bearer_cleanup();
        tipc_netlink_stop();
        tipc_cfg_stop();
        tipc_subscr_stop();
@@ -122,30 +98,65 @@ static void tipc_core_stop(void)
  */
 static int tipc_core_start(void)
 {
-       int res;
+       int err;
 
        get_random_bytes(&tipc_random, sizeof(tipc_random));
 
-       res = tipc_handler_start();
-       if (!res)
-               res = tipc_ref_table_init(tipc_max_ports, tipc_random);
-       if (!res)
-               res = tipc_nametbl_init();
-       if (!res)
-               res = tipc_netlink_start();
-       if (!res)
-               res = tipc_socket_init();
-       if (!res)
-               res = tipc_register_sysctl();
-       if (!res)
-               res = tipc_subscr_start();
-       if (!res)
-               res = tipc_cfg_init();
-       if (res) {
-               tipc_handler_stop();
-               tipc_core_stop();
-       }
-       return res;
+       err = tipc_handler_start();
+       if (err)
+               goto out_handler;
+
+       err = tipc_ref_table_init(tipc_max_ports, tipc_random);
+       if (err)
+               goto out_reftbl;
+
+       err = tipc_nametbl_init();
+       if (err)
+               goto out_nametbl;
+
+       err = tipc_netlink_start();
+       if (err)
+               goto out_netlink;
+
+       err = tipc_socket_init();
+       if (err)
+               goto out_socket;
+
+       err = tipc_register_sysctl();
+       if (err)
+               goto out_sysctl;
+
+       err = tipc_subscr_start();
+       if (err)
+               goto out_subscr;
+
+       err = tipc_cfg_init();
+       if (err)
+               goto out_cfg;
+
+       err = tipc_bearer_setup();
+       if (err)
+               goto out_bearer;
+
+       return 0;
+out_bearer:
+       tipc_cfg_stop();
+out_cfg:
+       tipc_subscr_stop();
+out_subscr:
+       tipc_unregister_sysctl();
+out_sysctl:
+       tipc_socket_stop();
+out_socket:
+       tipc_netlink_stop();
+out_netlink:
+       tipc_nametbl_stop();
+out_nametbl:
+       tipc_ref_table_stop();
+out_reftbl:
+       tipc_handler_stop();
+out_handler:
+       return err;
 }
 
 static int __init tipc_init(void)
@@ -174,8 +185,6 @@ static int __init tipc_init(void)
 
 static void __exit tipc_exit(void)
 {
-       tipc_handler_stop();
-       tipc_core_stop_net();
        tipc_core_stop();
        pr_info("Deactivated\n");
 }
index 5569d96b4da3f3652bd418656a2f7e81822feda4..4dfe137587bbd35a519b87a107f4e665a625bdaa 100644 (file)
@@ -90,7 +90,6 @@ extern int tipc_random __read_mostly;
 /*
  * Routines available to privileged subsystems
  */
-int tipc_core_start_net(unsigned long);
 int tipc_handler_start(void);
 void tipc_handler_stop(void);
 int tipc_netlink_start(void);
index 92a1533af4e0a689f93f30c5b560368f1a548e37..48302be175ce328d5b6d61d52a16c55ab31952a6 100644 (file)
@@ -945,9 +945,6 @@ void tipc_nametbl_stop(void)
 {
        u32 i;
 
-       if (!table.types)
-               return;
-
        /* Verify name table is empty, then release it */
        write_lock_bh(&tipc_nametbl_lock);
        for (i = 0; i < TIPC_NAMETBL_SIZE; i++) {
index 9f72a6376362e613cb87185acbb3581174c45592..3aaf73de9e2d017e96b3cc1124d5c9420d827abd 100644 (file)
@@ -83,8 +83,6 @@ static struct genl_ops tipc_genl_ops[] = {
        },
 };
 
-static int tipc_genl_family_registered;
-
 int tipc_netlink_start(void)
 {
        int res;
@@ -94,16 +92,10 @@ int tipc_netlink_start(void)
                pr_err("Failed to register netlink interface\n");
                return res;
        }
-
-       tipc_genl_family_registered = 1;
        return 0;
 }
 
 void tipc_netlink_stop(void)
 {
-       if (!tipc_genl_family_registered)
-               return;
-
        genl_unregister_family(&tipc_genl_family);
-       tipc_genl_family_registered = 0;
 }
index 2a2a938dc22cb3c14def50169afffa37ea5bec86..de3d593e2fee08c384ca2970d7a0fc695ff4d5fc 100644 (file)
@@ -126,9 +126,6 @@ int tipc_ref_table_init(u32 requested_size, u32 start)
  */
 void tipc_ref_table_stop(void)
 {
-       if (!tipc_ref_table.entries)
-               return;
-
        vfree(tipc_ref_table.entries);
        tipc_ref_table.entries = NULL;
 }
index b635ca347a870dc55187c221b3df1f2dc295a333..373979789a73538bdd55dac4bde74505dc4310a9 100644 (file)
@@ -573,7 +573,6 @@ int tipc_server_start(struct tipc_server *s)
                kmem_cache_destroy(s->rcvbuf_cache);
                return ret;
        }
-       s->enabled = 1;
        return ret;
 }
 
@@ -583,10 +582,6 @@ void tipc_server_stop(struct tipc_server *s)
        int total = 0;
        int id;
 
-       if (!s->enabled)
-               return;
-
-       s->enabled = 0;
        spin_lock_bh(&s->idr_lock);
        for (id = 0; total < s->idr_in_use; id++) {
                con = idr_find(&s->conn_idr, id);
index 98b23f20bc0f5cbb1631111177bd0d88adfcaa68..be817b0b547e87148a103284adc5285a66147c28 100644 (file)
@@ -56,7 +56,6 @@
  * @name: server name
  * @imp: message importance
  * @type: socket type
- * @enabled: identify whether server is launched or not
  */
 struct tipc_server {
        struct idr conn_idr;
@@ -74,7 +73,6 @@ struct tipc_server {
        const char name[TIPC_SERVER_NAME_LEN];
        int imp;
        int type;
-       int enabled;
 };
 
 int tipc_conn_sendmsg(struct tipc_server *s, int conid,
index aab4948f0affa995adc2603bd09db4b5d354b266..a4cf274455aa42922c3e27947e0788da8e3ac794 100644 (file)
@@ -70,8 +70,6 @@ static const struct proto_ops msg_ops;
 static struct proto tipc_proto;
 static struct proto tipc_proto_kern;
 
-static int sockets_enabled;
-
 /*
  * Revised TIPC socket locking policy:
  *
@@ -2027,8 +2025,6 @@ int tipc_socket_init(void)
                proto_unregister(&tipc_proto);
                goto out;
        }
-
-       sockets_enabled = 1;
  out:
        return res;
 }
@@ -2038,10 +2034,6 @@ int tipc_socket_init(void)
  */
 void tipc_socket_stop(void)
 {
-       if (!sockets_enabled)
-               return;
-
-       sockets_enabled = 0;
        sock_unregister(tipc_family_ops.family);
        proto_unregister(&tipc_proto);
 }
index 9b897fca7487dd4fe8d8364368e8516ab8f369c0..f0541370e68eb9071caa429772a0a55d952c27d4 100644 (file)
@@ -1700,7 +1700,7 @@ static void reg_process_hint(struct regulatory_request *reg_request)
                return;
        case NL80211_REGDOM_SET_BY_USER:
                treatment = reg_process_hint_user(reg_request);
-               if (treatment == REG_REQ_OK ||
+               if (treatment == REG_REQ_IGNORE ||
                    treatment == REG_REQ_ALREADY_SET)
                        return;
                schedule_delayed_work(&reg_timeout, msecs_to_jiffies(3142));
@@ -2373,6 +2373,7 @@ static int reg_set_rd_country_ie(const struct ieee80211_regdomain *rd,
 int set_regdom(const struct ieee80211_regdomain *rd)
 {
        struct regulatory_request *lr;
+       bool user_reset = false;
        int r;
 
        if (!reg_is_valid_request(rd->alpha2)) {
@@ -2389,6 +2390,7 @@ int set_regdom(const struct ieee80211_regdomain *rd)
                break;
        case NL80211_REGDOM_SET_BY_USER:
                r = reg_set_rd_user(rd, lr);
+               user_reset = true;
                break;
        case NL80211_REGDOM_SET_BY_DRIVER:
                r = reg_set_rd_driver(rd, lr);
@@ -2402,8 +2404,14 @@ int set_regdom(const struct ieee80211_regdomain *rd)
        }
 
        if (r) {
-               if (r == -EALREADY)
+               switch (r) {
+               case -EALREADY:
                        reg_set_request_processed();
+                       break;
+               default:
+                       /* Back to world regulatory in case of errors */
+                       restore_regulatory_settings(user_reset);
+               }
 
                kfree(rd);
                return r;
index 4b98b25793c5c23fa1799463b45ad71a030fceb9..1d5c7bf29938231fa06428dc6236e36c1719860e 100644 (file)
@@ -1158,7 +1158,7 @@ static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
        if (hlist_unhashed(&pol->bydst))
                return NULL;
 
-       hlist_del(&pol->bydst);
+       hlist_del_init(&pol->bydst);
        hlist_del(&pol->byidx);
        list_del(&pol->walk.all);
        net->xfrm.policy_count[dir]--;
index a26b7aa794755f970756cadb3c817bfc500956d2..40f1b3e92e7812e83127064237db07f18048b0a5 100644 (file)
@@ -1159,6 +1159,11 @@ static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig, int *errp)
        }
        x->props.aalgo = orig->props.aalgo;
 
+       if (orig->aead) {
+               x->aead = xfrm_algo_aead_clone(orig->aead);
+               if (!x->aead)
+                       goto error;
+       }
        if (orig->ealg) {
                x->ealg = xfrm_algo_clone(orig->ealg);
                if (!x->ealg)
@@ -1201,6 +1206,9 @@ static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig, int *errp)
        x->props.flags = orig->props.flags;
        x->props.extra_flags = orig->props.extra_flags;
 
+       x->tfcpad = orig->tfcpad;
+       x->replay_maxdiff = orig->replay_maxdiff;
+       x->replay_maxage = orig->replay_maxage;
        x->curlft.add_time = orig->curlft.add_time;
        x->km.state = orig->km.state;
        x->km.seq = orig->km.seq;
@@ -1215,11 +1223,12 @@ out:
        return NULL;
 }
 
-/* net->xfrm.xfrm_state_lock is held */
 struct xfrm_state *xfrm_migrate_state_find(struct xfrm_migrate *m, struct net *net)
 {
        unsigned int h;
-       struct xfrm_state *x;
+       struct xfrm_state *x = NULL;
+
+       spin_lock_bh(&net->xfrm.xfrm_state_lock);
 
        if (m->reqid) {
                h = xfrm_dst_hash(net, &m->old_daddr, &m->old_saddr,
@@ -1236,7 +1245,7 @@ struct xfrm_state *xfrm_migrate_state_find(struct xfrm_migrate *m, struct net *n
                                             m->old_family))
                                continue;
                        xfrm_state_hold(x);
-                       return x;
+                       break;
                }
        } else {
                h = xfrm_src_hash(net, &m->old_daddr, &m->old_saddr,
@@ -1251,11 +1260,13 @@ struct xfrm_state *xfrm_migrate_state_find(struct xfrm_migrate *m, struct net *n
                                             m->old_family))
                                continue;
                        xfrm_state_hold(x);
-                       return x;
+                       break;
                }
        }
 
-       return NULL;
+       spin_unlock_bh(&net->xfrm.xfrm_state_lock);
+
+       return x;
 }
 EXPORT_SYMBOL(xfrm_migrate_state_find);
 
@@ -1451,7 +1462,7 @@ xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src, int n,
 {
        int err = 0;
        struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
-       struct net *net = xs_net(*dst);
+       struct net *net = xs_net(*src);
 
        if (!afinfo)
                return -EAFNOSUPPORT;
index 1ae3ec7c18b0de977b1b781c8fee72d8357543bc..c274179d60a2de65446a6c4d959501bafc9d4728 100644 (file)
 #include <linux/in6.h>
 #endif
 
-static inline int aead_len(struct xfrm_algo_aead *alg)
-{
-       return sizeof(*alg) + ((alg->alg_key_len + 7) / 8);
-}
-
 static int verify_one_alg(struct nlattr **attrs, enum xfrm_attr_type_t type)
 {
        struct nlattr *rt = attrs[type];
index ef474098d9f1d1e2609221d3d3571c1f29d1b599..17fa901418ae6a491ba61f3814a1143e2a62bfde 100644 (file)
@@ -257,7 +257,7 @@ case "$arg" in
                 && compr="lzop -9 -f"
                echo "$output_file" | grep -q "\.lz4$" \
                 && [ -x "`which lz4 2> /dev/null`" ] \
-                && compr="lz4 -9 -f"
+                && compr="lz4 -l -9 -f"
                echo "$output_file" | grep -q "\.cpio$" && compr="cat"
                shift
                ;;
index 10085de886fef49b78a12746a2e0a593545d56e0..276e84b8a8e57cb99cc5f1c956f1d4422657a0e7 100644 (file)
@@ -330,8 +330,7 @@ static void write_src(void)
                                printf("\tPTR\t_text + %#llx\n",
                                        table[i].addr - _text);
                        else
-                               printf("\tPTR\t_text - %#llx\n",
-                                       _text - table[i].addr);
+                               printf("\tPTR\t%#llx\n", table[i].addr);
                } else {
                        printf("\tPTR\t%#llx\n", table[i].addr);
                }
index 40610984a1b54b06c579f953dabeb5ddb1304525..99a45fdc1bbfa9a15e7bbd6e4f9ec14a8a8c2357 100644 (file)
@@ -1502,6 +1502,16 @@ static int addend_386_rel(struct elf_info *elf, Elf_Shdr *sechdr, Elf_Rela *r)
 #define R_ARM_JUMP24   29
 #endif
 
+#ifndef        R_ARM_THM_CALL
+#define        R_ARM_THM_CALL          10
+#endif
+#ifndef        R_ARM_THM_JUMP24
+#define        R_ARM_THM_JUMP24        30
+#endif
+#ifndef        R_ARM_THM_JUMP19
+#define        R_ARM_THM_JUMP19        51
+#endif
+
 static int addend_arm_rel(struct elf_info *elf, Elf_Shdr *sechdr, Elf_Rela *r)
 {
        unsigned int r_typ = ELF_R_TYPE(r->r_info);
@@ -1515,6 +1525,9 @@ static int addend_arm_rel(struct elf_info *elf, Elf_Shdr *sechdr, Elf_Rela *r)
        case R_ARM_PC24:
        case R_ARM_CALL:
        case R_ARM_JUMP24:
+       case R_ARM_THM_CALL:
+       case R_ARM_THM_JUMP24:
+       case R_ARM_THM_JUMP19:
                /* From ARM ABI: ((S + A) | T) - P */
                r->r_addend = (int)(long)(elf->hdr +
                              sechdr->sh_offset +
index d46cbc5e335e9c330ccd74a08fcbf78aeafe8c96..2fb2576dc6448b3c1020dc311e0d81bfe77fb768 100644 (file)
@@ -1000,7 +1000,11 @@ static int keyring_detect_cycle_iterator(const void *object,
 
        kenter("{%d}", key->serial);
 
-       BUG_ON(key != ctx->match_data);
+       /* We might get a keyring with matching index-key that is nonetheless a
+        * different keyring. */
+       if (key != ctx->match_data)
+               return 0;
+
        ctx->result = ERR_PTR(-EDEADLK);
        return 1;
 }
index df3652ad15ef36224fdbc121ee1018116bab28c0..8ed0bcc0138637e69e19ff7056b99a86fac09fa6 100644 (file)
@@ -1026,6 +1026,9 @@ static void ad1884_fixup_thinkpad(struct hda_codec *codec,
                spec->gen.keep_eapd_on = 1;
                spec->gen.vmaster_mute.hook = ad_vmaster_eapd_hook;
                spec->eapd_nid = 0x12;
+               /* Analog PC Beeper - allow firmware/ACPI beeps */
+               spec->beep_amp = HDA_COMPOSE_AMP_VAL(0x20, 3, 3, HDA_INPUT);
+               spec->gen.beep_nid = 0; /* no digital beep */
        }
 }
 
@@ -1092,6 +1095,7 @@ static int patch_ad1884(struct hda_codec *codec)
        spec = codec->spec;
 
        spec->gen.mixer_nid = 0x20;
+       spec->gen.mixer_merge_nid = 0x21;
        spec->gen.beep_nid = 0x10;
        set_beep_amp(spec, 0x10, 0, HDA_OUTPUT);
 
index ec304f3ae3b41d4a8a1295c448ff35e18eb58977..850296a1e0ff10db1bc699389b9912068f5b2761 100644 (file)
@@ -4253,6 +4253,7 @@ static const struct hda_fixup alc269_fixups[] = {
 };
 
 static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+       SND_PCI_QUIRK(0x1025, 0x0283, "Acer TravelMate 8371", ALC269_FIXUP_INV_DMIC),
        SND_PCI_QUIRK(0x1025, 0x029b, "Acer 1810TZ", ALC269_FIXUP_INV_DMIC),
        SND_PCI_QUIRK(0x1025, 0x0349, "Acer AOD260", ALC269_FIXUP_INV_DMIC),
        SND_PCI_QUIRK(0x1025, 0x047c, "Acer AC700", ALC269_FIXUP_ACER_AC700),
@@ -5163,7 +5164,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1028, 0x0625, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x0626, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x0628, "Dell", ALC668_FIXUP_AUTO_MUTE),
-       SND_PCI_QUIRK(0x1028, 0x064e, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
+       SND_PCI_QUIRK(0x1028, 0x064e, "Dell", ALC668_FIXUP_AUTO_MUTE),
        SND_PCI_QUIRK(0x103c, 0x1632, "HP RP5800", ALC662_FIXUP_HP_RP5800),
        SND_PCI_QUIRK(0x1043, 0x11cd, "Asus N550", ALC662_FIXUP_BASS_1A_CHMAP),
        SND_PCI_QUIRK(0x1043, 0x1477, "ASUS N56VZ", ALC662_FIXUP_BASS_CHMAP),
index 44b0ba4feab3bd1b43100feb457e82e290ea26dc..1bed780e21d96945d1cf913ce23fe31409df0cc4 100644 (file)
@@ -883,6 +883,7 @@ static void volume_control_quirks(struct usb_mixer_elem_info *cval,
                }
                break;
 
+       case USB_ID(0x046d, 0x0807): /* Logitech Webcam C500 */
        case USB_ID(0x046d, 0x0808):
        case USB_ID(0x046d, 0x0809):
        case USB_ID(0x046d, 0x081b): /* HD Webcam c310 */
index da8b7aa3d3518d1e52532af42974fc2e3981376f..07b0b7542511e9912e830b11885e1212bbf63a76 100644 (file)
@@ -87,8 +87,8 @@ endif # BUILD_SRC
 # We process the rest of the Makefile if this is the final invocation of make
 ifeq ($(skip-makefile),)
 
-srctree                := $(if $(BUILD_SRC),$(BUILD_SRC),$(CURDIR))
-objtree                := $(CURDIR)
+srctree                := $(realpath $(if $(BUILD_SRC),$(BUILD_SRC),$(CURDIR)))
+objtree                := $(realpath $(CURDIR))
 src            := $(srctree)
 obj            := $(objtree)
 
@@ -112,7 +112,7 @@ export Q VERBOSE
 
 LIBLOCKDEP_VERSION = $(LL_VERSION).$(LL_PATCHLEVEL).$(LL_EXTRAVERSION)
 
-INCLUDES = -I. -I/usr/local/include -I./uinclude $(CONFIG_INCLUDES)
+INCLUDES = -I. -I/usr/local/include -I./uinclude -I./include $(CONFIG_INCLUDES)
 
 # Set compile option CFLAGS if not set elsewhere
 CFLAGS ?= -g -DCONFIG_LOCKDEP -DCONFIG_STACKTRACE -DCONFIG_PROVE_LOCKING -DBITS_PER_LONG=__WORDSIZE -DLIBLOCKDEP_VERSION='"$(LIBLOCKDEP_VERSION)"' -rdynamic -O0 -g
index f8465a811aa554e6510477ac0c733b2eb02bb5f4..23bd69cb5ade7014e8630e87ad89a16e2d95be71 100644 (file)
@@ -418,7 +418,7 @@ int pthread_rwlock_unlock(pthread_rwlock_t *rwlock)
 
 __attribute__((constructor)) static void init_preload(void)
 {
-       if (__init_state != done)
+       if (__init_state == done)
                return;
 
 #ifndef __GLIBC__
old mode 100644 (file)
new mode 100755 (executable)
diff --git a/tools/lib/lockdep/uinclude/asm/hash.h b/tools/lib/lockdep/uinclude/asm/hash.h
new file mode 100644 (file)
index 0000000..d82b170
--- /dev/null
@@ -0,0 +1,6 @@
+#ifndef __ASM_GENERIC_HASH_H
+#define __ASM_GENERIC_HASH_H
+
+/* Stub */
+
+#endif /* __ASM_GENERIC_HASH_H */
index 4c99fcb5da2769ef48067b5e6adf05806e758396..042ee8e463c98bf03d9a92e4d18dd3c01ec32162 100644 (file)
@@ -13,4 +13,9 @@ static inline int rcu_is_cpu_idle(void)
        return 1;
 }
 
+static inline bool rcu_is_watching(void)
+{
+       return false;
+}
+
 #endif