]> Pileus Git - ~andy/linux/commitdiff
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
authorLinus Torvalds <torvalds@linux-foundation.org>
Thu, 14 Jun 2012 12:33:55 +0000 (15:33 +0300)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 14 Jun 2012 12:33:55 +0000 (15:33 +0300)
Pull networking fixes from David S. Miller:

This has the fix for the wireless issues I ran into the other week as
well as:

 1) Fix CAN c_can driver transmit handling resulting in BUG check
    triggers, from AnilKumar Ch.

 2) Fix packet drop monitor sleeping in atomic context, from Eric
    Dumazet.

 3) Fix mv643xx_eth driver build regression, from Andrew Lunn.

 4) Inetpeer freeing needs an RCU grace period in order to avoid races
    during tree invalidation.  From Eric Dumazet.

 5) Fix endianness bugs in xt_HMARK netfilter module, from Hans
    Schillstrom.

 6) Add proper module refcounting to l2tp_eth to avoid crash on module
    unload, from Eric Dumazet.

 7) Fix truncation of neighbour entry dumps due to logic errors in
    neigh_dump_info() and friends, from Eric Dumazet.

 8) The conversion of fib6_age() to dst_neigh_lookup() accidently
    reversed the logic of a flags test, fix from Thomas Graf.

 9) Fix checksum configuration in newer sky2 chips, from Stephen
    Hemminger.

10) Revert BQL support in NIU driver, doesn't work.

11) l2tp_ip_sendmsg() illegally uses a route without a proper reference.
    From Eric Dumazet.

12) be2net driver references an SKB after it's potentially been freed,
    also from Eric Dumazet.

13) Fix RCU stalls in dummy net driver init.  Also from Eric Dumazet.

14) lpc_eth has several bugs in it's transmit engine leading to packet
    leaks and improper queue wakes, from Eric Dumazet.

15) Apply short DMA workaround to more tg3 chips, from Matt Carlson.

16) Add tilegx network driver.

17) Bonding queue mapping for a packet can get corrupted, fix from Eric
    Dumazet.

18) Fix bug in netpoll_send_udp() SKB management that can leave garbage
    in the payload in certain situations.  From Eric Dumazet.

19) bnx2x driver interprets chip RX checksum offload incorrectly in
    encapsulation situations.  Fix from Eric Dumazet.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (75 commits)
  bnx2x: fix checksum validation
  netpoll: fix netpoll_send_udp() bugs
  bonding: Fix corrupted queue_mapping
  bonding:record primary when modify it via sysfs
  tilegx network driver: initial support
  tg3: Apply short DMA frag workaround to 5906
  net: stmmac: Fix clock en-/disable calls
  lpc_eth: fix tx completion
  lpc_eth: add missing ndo_change_mtu()
  dummy: fix rcu_sched self-detected stalls
  net: Reorder initialization in ip_route_output to fix gcc warning
  virtio-net: fix a race on 32bit arches
  r8169: avoid NAPI scheduling delay.
  net: Make linux/tcp.h C++ friendly (trivial)
  netdev: fix drivers/net/phy/ kernel-doc warnings
  net/core: fix kernel-doc warnings
  be2net: fix a race in be_xmit()
  l2tp: fix a race in l2tp_ip_sendmsg()
  mac80211: add back channel change flag
  NFC: Fix possible NULL ptr deref when getting the name of a socket
  ...

91 files changed:
Documentation/networking/stmmac.txt
MAINTAINERS
drivers/bcma/driver_chipcommon_pmu.c
drivers/bcma/driver_pci.c
drivers/bcma/sprom.c
drivers/net/bonding/bond_main.c
drivers/net/bonding/bond_sysfs.c
drivers/net/can/c_can/c_can.c
drivers/net/can/c_can/c_can.h
drivers/net/can/cc770/cc770_platform.c
drivers/net/dummy.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
drivers/net/ethernet/broadcom/tg3.c
drivers/net/ethernet/emulex/benet/be_main.c
drivers/net/ethernet/intel/e1000e/ethtool.c
drivers/net/ethernet/intel/e1000e/mac.c
drivers/net/ethernet/intel/e1000e/netdev.c
drivers/net/ethernet/intel/e1000e/phy.c
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/marvell/mv643xx_eth.c
drivers/net/ethernet/marvell/sky2.c
drivers/net/ethernet/nxp/lpc_eth.c
drivers/net/ethernet/realtek/r8169.c
drivers/net/ethernet/stmicro/stmmac/Kconfig
drivers/net/ethernet/stmicro/stmmac/stmmac.h
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
drivers/net/ethernet/sun/niu.c
drivers/net/ethernet/tile/Kconfig
drivers/net/ethernet/tile/Makefile
drivers/net/ethernet/tile/tilegx.c [new file with mode: 0644]
drivers/net/hyperv/hyperv_net.h
drivers/net/hyperv/netvsc.c
drivers/net/phy/icplus.c
drivers/net/phy/mdio_bus.c
drivers/net/usb/sierra_net.c
drivers/net/virtio_net.c
drivers/net/wireless/b43/b43.h
drivers/net/wireless/b43/main.c
drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
drivers/net/wireless/ipw2x00/ipw2100.c
drivers/net/wireless/iwlwifi/iwl-6000.c
drivers/net/wireless/iwlwifi/iwl-agn-sta.c
drivers/net/wireless/iwlwifi/iwl-drv.c
drivers/net/wireless/iwlwifi/iwl-eeprom.c
drivers/net/wireless/iwlwifi/iwl-mac80211.c
drivers/net/wireless/iwlwifi/iwl-prph.h
drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
drivers/net/wireless/mac80211_hwsim.c
drivers/net/wireless/mwifiex/cfg80211.c
drivers/net/wireless/mwifiex/fw.h
drivers/net/wireless/mwifiex/uap_cmd.c
drivers/net/wireless/rt2x00/rt2x00.h
drivers/net/wireless/rt2x00/rt2x00mac.c
drivers/net/wireless/rt2x00/rt2x00queue.c
drivers/net/wireless/rtl818x/rtl8187/leds.c
include/linux/netfilter/xt_HMARK.h
include/linux/tcp.h
include/net/inetpeer.h
include/net/route.h
include/net/sch_generic.h
net/appletalk/ddp.c
net/bluetooth/af_bluetooth.c
net/core/drop_monitor.c
net/core/filter.c
net/core/neighbour.c
net/core/netpoll.c
net/core/skbuff.c
net/ipv4/inetpeer.c
net/ipv4/ip_forward.c
net/ipv4/ipmr.c
net/ipv6/ip6_fib.c
net/ipv6/ip6_output.c
net/ipv6/ip6mr.c
net/l2tp/l2tp_eth.c
net/l2tp/l2tp_ip.c
net/mac80211/agg-rx.c
net/mac80211/cfg.c
net/mac80211/iface.c
net/mac80211/mlme.c
net/mac80211/offchannel.c
net/mac80211/sta_info.c
net/mac80211/tx.c
net/mac80211/util.c
net/netfilter/nf_conntrack_h323_main.c
net/netfilter/xt_HMARK.c
net/nfc/llcp/sock.c
net/wireless/ibss.c
net/wireless/util.c

index ab1e8d7004c5238f9d4b30ec0137fd5f3e298226..5cb9a1972460fdcd2909f3a9fd06dd4c265a2921 100644 (file)
@@ -10,8 +10,8 @@ Currently this network device driver is for all STM embedded MAC/GMAC
 (i.e. 7xxx/5xxx SoCs), SPEAr (arm), Loongson1B (mips) and XLINX XC2V3000
 FF1152AMT0221 D1215994A VIRTEX FPGA board.
 
-DWC Ether MAC 10/100/1000 Universal version 3.60a (and older) and DWC Ether MAC 10/100
-Universal version 4.0 have been used for developing this driver.
+DWC Ether MAC 10/100/1000 Universal version 3.60a (and older) and DWC Ether
+MAC 10/100 Universal version 4.0 have been used for developing this driver.
 
 This driver supports both the platform bus and PCI.
 
@@ -54,27 +54,27 @@ net_device structure enabling the scatter/gather feature.
 When one or more packets are received, an interrupt happens. The interrupts
 are not queued so the driver has to scan all the descriptors in the ring during
 the receive process.
-This is based on NAPI so the interrupt handler signals only if there is work to be
-done, and it exits.
+This is based on NAPI so the interrupt handler signals only if there is work
+to be done, and it exits.
 Then the poll method will be scheduled at some future point.
 The incoming packets are stored, by the DMA, in a list of pre-allocated socket
 buffers in order to avoid the memcpy (Zero-copy).
 
 4.3) Timer-Driver Interrupt
-Instead of having the device that asynchronously notifies the frame receptions, the
-driver configures a timer to generate an interrupt at regular intervals.
-Based on the granularity of the timer, the frames that are received by the device
-will experience different levels of latency. Some NICs have dedicated timer
-device to perform this task. STMMAC can use either the RTC device or the TMU
-channel 2  on STLinux platforms.
+Instead of having the device that asynchronously notifies the frame receptions,
+the driver configures a timer to generate an interrupt at regular intervals.
+Based on the granularity of the timer, the frames that are received by the
+device will experience different levels of latency. Some NICs have dedicated
+timer device to perform this task. STMMAC can use either the RTC device or the
+TMU channel 2  on STLinux platforms.
 The timers frequency can be passed to the driver as parameter; when change it,
 take care of both hardware capability and network stability/performance impact.
-Several performance tests on STM platforms showed this optimisation allows to spare
-the CPU while having the maximum throughput.
+Several performance tests on STM platforms showed this optimisation allows to
+spare the CPU while having the maximum throughput.
 
 4.4) WOL
-Wake up on Lan feature through Magic and Unicast frames are supported for the GMAC
-core.
+Wake up on Lan feature through Magic and Unicast frames are supported for the
+GMAC core.
 
 4.5) DMA descriptors
 Driver handles both normal and enhanced descriptors. The latter has been only
@@ -106,7 +106,8 @@ Several driver's information can be passed through the platform
 These are included in the include/linux/stmmac.h header file
 and detailed below as well:
 
- struct plat_stmmacenet_data {
+struct plat_stmmacenet_data {
+       char *phy_bus_name;
        int bus_id;
        int phy_addr;
        int interface;
@@ -124,19 +125,24 @@ and detailed below as well:
        void (*bus_setup)(void __iomem *ioaddr);
        int (*init)(struct platform_device *pdev);
        void (*exit)(struct platform_device *pdev);
+       void *custom_cfg;
+       void *custom_data;
        void *bsp_priv;
  };
 
 Where:
+ o phy_bus_name: phy bus name to attach to the stmmac.
  o bus_id: bus identifier.
  o phy_addr: the physical address can be passed from the platform.
            If it is set to -1 the driver will automatically
            detect it at run-time by probing all the 32 addresses.
  o interface: PHY device's interface.
  o mdio_bus_data: specific platform fields for the MDIO bus.
- o pbl: the Programmable Burst Length is maximum number of beats to
+ o dma_cfg: internal DMA parameters
+   o pbl: the Programmable Burst Length is maximum number of beats to
        be transferred in one DMA transaction.
        GMAC also enables the 4xPBL by default.
+   o fixed_burst/mixed_burst/burst_len
  o clk_csr: fixed CSR Clock range selection.
  o has_gmac: uses the GMAC core.
  o enh_desc: if sets the MAC will use the enhanced descriptor structure.
@@ -160,8 +166,9 @@ Where:
             this is sometime necessary on some platforms (e.g. ST boxes)
             where the HW needs to have set some PIO lines or system cfg
             registers.
- o custom_cfg: this is a custom configuration that can be passed while
-             initialising the resources.
+ o custom_cfg/custom_data: this is a custom configuration that can be passed
+                          while initialising the resources.
+ o bsp_priv: another private poiter.
 
 For MDIO bus The we have:
 
@@ -180,7 +187,6 @@ Where:
  o irqs: list of IRQs, one per PHY.
  o probed_phy_irq: if irqs is NULL, use this for probed PHY.
 
-
 For DMA engine we have the following internal fields that should be
 tuned according to the HW capabilities.
 
index 14bc7071f9dfe692b495b50159d1799621d8ba04..f6e62def61cd07f1e45b18138d9890573e831bf2 100644 (file)
@@ -1800,6 +1800,9 @@ F:        include/linux/cfag12864b.h
 CFG80211 and NL80211
 M:     Johannes Berg <johannes@sipsolutions.net>
 L:     linux-wireless@vger.kernel.org
+W:     http://wireless.kernel.org/
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211-next.git
 S:     Maintained
 F:     include/linux/nl80211.h
 F:     include/net/cfg80211.h
@@ -4349,7 +4352,8 @@ MAC80211
 M:     Johannes Berg <johannes@sipsolutions.net>
 L:     linux-wireless@vger.kernel.org
 W:     http://linuxwireless.org/
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211-next.git
 S:     Maintained
 F:     Documentation/networking/mac80211-injection.txt
 F:     include/net/mac80211.h
@@ -4360,7 +4364,8 @@ M:        Stefano Brivio <stefano.brivio@polimi.it>
 M:     Mattias Nissler <mattias.nissler@gmx.de>
 L:     linux-wireless@vger.kernel.org
 W:     http://linuxwireless.org/en/developers/Documentation/mac80211/RateControl/PID
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211-next.git
 S:     Maintained
 F:     net/mac80211/rc80211_pid*
 
@@ -5711,6 +5716,9 @@ F:        include/linux/remoteproc.h
 RFKILL
 M:     Johannes Berg <johannes@sipsolutions.net>
 L:     linux-wireless@vger.kernel.org
+W:     http://wireless.kernel.org/
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211-next.git
 S:     Maintained
 F:     Documentation/rfkill.txt
 F:     net/rfkill/
index a058842f14fdf54b92b495f01c34a02ed2b74bcc..61ce4054b3c33b3ed70e0de39d0e19a9f5167cf0 100644 (file)
@@ -139,7 +139,9 @@ void bcma_pmu_workarounds(struct bcma_drv_cc *cc)
                bcma_chipco_chipctl_maskset(cc, 0, ~0, 0x7);
                break;
        case 0x4331:
-               /* BCM4331 workaround is SPROM-related, we put it in sprom.c */
+       case 43431:
+               /* Ext PA lines must be enabled for tx on BCM4331 */
+               bcma_chipco_bcm4331_ext_pa_lines_ctl(cc, true);
                break;
        case 43224:
                if (bus->chipinfo.rev == 0) {
index 9a96f14c8f474fba41442bbdcbe3bc910a31efc6..c32ebd537abe3a3e5f8f5e777f119c8768c9ea38 100644 (file)
@@ -232,17 +232,19 @@ void __devinit bcma_core_pci_init(struct bcma_drv_pci *pc)
 int bcma_core_pci_irq_ctl(struct bcma_drv_pci *pc, struct bcma_device *core,
                          bool enable)
 {
-       struct pci_dev *pdev = pc->core->bus->host_pci;
+       struct pci_dev *pdev;
        u32 coremask, tmp;
        int err = 0;
 
-       if (core->bus->hosttype != BCMA_HOSTTYPE_PCI) {
+       if (!pc || core->bus->hosttype != BCMA_HOSTTYPE_PCI) {
                /* This bcma device is not on a PCI host-bus. So the IRQs are
                 * not routed through the PCI core.
                 * So we must not enable routing through the PCI core. */
                goto out;
        }
 
+       pdev = pc->core->bus->host_pci;
+
        err = pci_read_config_dword(pdev, BCMA_PCI_IRQMASK, &tmp);
        if (err)
                goto out;
index c7f93359acb09affe99a398f45af7974ccdc30e3..f16f42d36071371414574305d6f057f9240f3bfd 100644 (file)
@@ -579,13 +579,13 @@ int bcma_sprom_get(struct bcma_bus *bus)
        if (!sprom)
                return -ENOMEM;
 
-       if (bus->chipinfo.id == 0x4331)
+       if (bus->chipinfo.id == 0x4331 || bus->chipinfo.id == 43431)
                bcma_chipco_bcm4331_ext_pa_lines_ctl(&bus->drv_cc, false);
 
        pr_debug("SPROM offset 0x%x\n", offset);
        bcma_sprom_read(bus, offset, sprom);
 
-       if (bus->chipinfo.id == 0x4331)
+       if (bus->chipinfo.id == 0x4331 || bus->chipinfo.id == 43431)
                bcma_chipco_bcm4331_ext_pa_lines_ctl(&bus->drv_cc, true);
 
        err = bcma_sprom_valid(sprom);
index 2ee8cf9e8a3b9fe8e728e1bc6d2334793712deb6..b9c2ae62166ddb3e8647a2c756e2ab966d2d8063 100644 (file)
@@ -76,6 +76,7 @@
 #include <net/route.h>
 #include <net/net_namespace.h>
 #include <net/netns/generic.h>
+#include <net/pkt_sched.h>
 #include "bonding.h"
 #include "bond_3ad.h"
 #include "bond_alb.h"
@@ -381,8 +382,6 @@ struct vlan_entry *bond_next_vlan(struct bonding *bond, struct vlan_entry *curr)
        return next;
 }
 
-#define bond_queue_mapping(skb) (*(u16 *)((skb)->cb))
-
 /**
  * bond_dev_queue_xmit - Prepare skb for xmit.
  *
@@ -395,7 +394,9 @@ int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb,
 {
        skb->dev = slave_dev;
 
-       skb->queue_mapping = bond_queue_mapping(skb);
+       BUILD_BUG_ON(sizeof(skb->queue_mapping) !=
+                    sizeof(qdisc_skb_cb(skb)->bond_queue_mapping));
+       skb->queue_mapping = qdisc_skb_cb(skb)->bond_queue_mapping;
 
        if (unlikely(netpoll_tx_running(slave_dev)))
                bond_netpoll_send_skb(bond_get_slave_by_dev(bond, slave_dev), skb);
@@ -4171,7 +4172,7 @@ static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb)
        /*
         * Save the original txq to restore before passing to the driver
         */
-       bond_queue_mapping(skb) = skb->queue_mapping;
+       qdisc_skb_cb(skb)->bond_queue_mapping = skb->queue_mapping;
 
        if (unlikely(txq >= dev->real_num_tx_queues)) {
                do {
index aef42f045320ae86eed4014835f90ccd786d1586..485bedb8278c1cd7bc7f85322232caced433c3f6 100644 (file)
@@ -1082,8 +1082,12 @@ static ssize_t bonding_store_primary(struct device *d,
                }
        }
 
-       pr_info("%s: Unable to set %.*s as primary slave.\n",
-               bond->dev->name, (int)strlen(buf) - 1, buf);
+       strncpy(bond->params.primary, ifname, IFNAMSIZ);
+       bond->params.primary[IFNAMSIZ - 1] = 0;
+
+       pr_info("%s: Recording %s as primary, "
+               "but it has not been enslaved to %s yet.\n",
+               bond->dev->name, ifname, bond->dev->name);
 out:
        write_unlock_bh(&bond->curr_slave_lock);
        read_unlock(&bond->lock);
index 536bda072a1677a18a396125f7df714c172ced54..8dc84d66eea1b446b66b7c8a12f394abba619d57 100644 (file)
@@ -686,7 +686,7 @@ static int c_can_get_berr_counter(const struct net_device *dev,
  *
  * We iterate from priv->tx_echo to priv->tx_next and check if the
  * packet has been transmitted, echo it back to the CAN framework.
- * If we discover a not yet transmitted package, stop looking for more.
+ * If we discover a not yet transmitted packet, stop looking for more.
  */
 static void c_can_do_tx(struct net_device *dev)
 {
@@ -698,7 +698,7 @@ static void c_can_do_tx(struct net_device *dev)
        for (/* nix */; (priv->tx_next - priv->tx_echo) > 0; priv->tx_echo++) {
                msg_obj_no = get_tx_echo_msg_obj(priv);
                val = c_can_read_reg32(priv, &priv->regs->txrqst1);
-               if (!(val & (1 << msg_obj_no))) {
+               if (!(val & (1 << (msg_obj_no - 1)))) {
                        can_get_echo_skb(dev,
                                        msg_obj_no - C_CAN_MSG_OBJ_TX_FIRST);
                        stats->tx_bytes += priv->read_reg(priv,
@@ -706,6 +706,8 @@ static void c_can_do_tx(struct net_device *dev)
                                        & IF_MCONT_DLC_MASK;
                        stats->tx_packets++;
                        c_can_inval_msg_object(dev, 0, msg_obj_no);
+               } else {
+                       break;
                }
        }
 
@@ -950,7 +952,7 @@ static int c_can_poll(struct napi_struct *napi, int quota)
        struct net_device *dev = napi->dev;
        struct c_can_priv *priv = netdev_priv(dev);
 
-       irqstatus = priv->read_reg(priv, &priv->regs->interrupt);
+       irqstatus = priv->irqstatus;
        if (!irqstatus)
                goto end;
 
@@ -1028,12 +1030,11 @@ end:
 
 static irqreturn_t c_can_isr(int irq, void *dev_id)
 {
-       u16 irqstatus;
        struct net_device *dev = (struct net_device *)dev_id;
        struct c_can_priv *priv = netdev_priv(dev);
 
-       irqstatus = priv->read_reg(priv, &priv->regs->interrupt);
-       if (!irqstatus)
+       priv->irqstatus = priv->read_reg(priv, &priv->regs->interrupt);
+       if (!priv->irqstatus)
                return IRQ_NONE;
 
        /* disable all interrupts and schedule the NAPI */
@@ -1063,10 +1064,11 @@ static int c_can_open(struct net_device *dev)
                goto exit_irq_fail;
        }
 
+       napi_enable(&priv->napi);
+
        /* start the c_can controller */
        c_can_start(dev);
 
-       napi_enable(&priv->napi);
        netif_start_queue(dev);
 
        return 0;
index 9b7fbef3d09a1248cda69974ac9c3cc4cf9e464e..5f32d34af507e7a9d51c4b37f8add3cd6ff0ddcb 100644 (file)
@@ -76,6 +76,7 @@ struct c_can_priv {
        unsigned int tx_next;
        unsigned int tx_echo;
        void *priv;             /* for board-specific data */
+       u16 irqstatus;
 };
 
 struct net_device *alloc_c_can_dev(void);
index 53115eee80758fd99e45a5a6258986fa81b9f8d5..688371cda37afc51ff125efa547e819126e4ca24 100644 (file)
@@ -154,7 +154,7 @@ static int __devinit cc770_get_platform_data(struct platform_device *pdev,
        struct cc770_platform_data *pdata = pdev->dev.platform_data;
 
        priv->can.clock.freq = pdata->osc_freq;
-       if (priv->cpu_interface | CPUIF_DSC)
+       if (priv->cpu_interface & CPUIF_DSC)
                priv->can.clock.freq /= 2;
        priv->clkout = pdata->cor;
        priv->bus_config = pdata->bcr;
index 442d91a2747b9d8136dd5809ec3d192fc8c1e04c..bab0158f1cc3180f112c3d296cb5ffb22bddeb0d 100644 (file)
@@ -187,8 +187,10 @@ static int __init dummy_init_module(void)
        rtnl_lock();
        err = __rtnl_link_register(&dummy_link_ops);
 
-       for (i = 0; i < numdummies && !err; i++)
+       for (i = 0; i < numdummies && !err; i++) {
                err = dummy_init_one();
+               cond_resched();
+       }
        if (err < 0)
                __rtnl_link_unregister(&dummy_link_ops);
        rtnl_unlock();
index e30e2a2f354c8fc30f2c59750c5b1ce63fa66676..7de824184979788b2f740b6e824c271c60ff45d9 100644 (file)
@@ -747,21 +747,6 @@ struct bnx2x_fastpath {
 
 #define ETH_RX_ERROR_FALGS             ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG
 
-#define BNX2X_IP_CSUM_ERR(cqe) \
-                       (!((cqe)->fast_path_cqe.status_flags & \
-                          ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG) && \
-                        ((cqe)->fast_path_cqe.type_error_flags & \
-                         ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG))
-
-#define BNX2X_L4_CSUM_ERR(cqe) \
-                       (!((cqe)->fast_path_cqe.status_flags & \
-                          ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG) && \
-                        ((cqe)->fast_path_cqe.type_error_flags & \
-                         ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
-
-#define BNX2X_RX_CSUM_OK(cqe) \
-                       (!(BNX2X_L4_CSUM_ERR(cqe) || BNX2X_IP_CSUM_ERR(cqe)))
-
 #define BNX2X_PRS_FLAG_OVERETH_IPV4(flags) \
                                (((le16_to_cpu(flags) & \
                                   PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) >> \
index ad0743bf4bdece7ac3cbc17e98f881e92530b22f..cbc56f274e0cd33298007c7cb457884ad9831580 100644 (file)
@@ -617,6 +617,25 @@ static int bnx2x_alloc_rx_data(struct bnx2x *bp,
        return 0;
 }
 
+static void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
+                               struct bnx2x_fastpath *fp)
+{
+       /* Do nothing if no IP/L4 csum validation was done */
+
+       if (cqe->fast_path_cqe.status_flags &
+           (ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG |
+            ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG))
+               return;
+
+       /* If both IP/L4 validation were done, check if an error was found. */
+
+       if (cqe->fast_path_cqe.type_error_flags &
+           (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
+            ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
+               fp->eth_q_stats.hw_csum_err++;
+       else
+               skb->ip_summed = CHECKSUM_UNNECESSARY;
+}
 
 int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
 {
@@ -806,13 +825,9 @@ reuse_rx:
 
                skb_checksum_none_assert(skb);
 
-               if (bp->dev->features & NETIF_F_RXCSUM) {
+               if (bp->dev->features & NETIF_F_RXCSUM)
+                       bnx2x_csum_validate(skb, cqe, fp);
 
-                       if (likely(BNX2X_RX_CSUM_OK(cqe)))
-                               skb->ip_summed = CHECKSUM_UNNECESSARY;
-                       else
-                               fp->eth_q_stats.hw_csum_err++;
-               }
 
                skb_record_rx_queue(skb, fp->rx_queue);
 
index edeeb516807a1399ceb8f8a9846bbd105fcce7c3..e47ff8be1d7b5c27be543c7d41f4584d56054644 100644 (file)
@@ -14275,7 +14275,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
                }
        }
 
-       if (tg3_flag(tp, 5755_PLUS))
+       if (tg3_flag(tp, 5755_PLUS) ||
+           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
                tg3_flag_set(tp, SHORT_DMA_BUG);
 
        if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
index 08efd308d78ae40640f73953a41531aabba2c054..fdb50cec6b515aa50271b079ea62f73e1755c582 100644 (file)
@@ -736,6 +736,8 @@ static netdev_tx_t be_xmit(struct sk_buff *skb,
 
        copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
        if (copied) {
+               int gso_segs = skb_shinfo(skb)->gso_segs;
+
                /* record the sent skb in the sent_skb table */
                BUG_ON(txo->sent_skb_list[start]);
                txo->sent_skb_list[start] = skb;
@@ -753,8 +755,7 @@ static netdev_tx_t be_xmit(struct sk_buff *skb,
 
                be_txq_notify(adapter, txq->id, wrb_cnt);
 
-               be_tx_stats_update(txo, wrb_cnt, copied,
-                               skb_shinfo(skb)->gso_segs, stopped);
+               be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
        } else {
                txq->head = start;
                dev_kfree_skb_any(skb);
index d863075df7a407cc59e194feebd652c622729384..905e2147d9182f4c8d9b26de66820fdc130debf0 100644 (file)
@@ -258,7 +258,8 @@ static int e1000_set_settings(struct net_device *netdev,
         * When SoL/IDER sessions are active, autoneg/speed/duplex
         * cannot be changed
         */
-       if (hw->phy.ops.check_reset_block(hw)) {
+       if (hw->phy.ops.check_reset_block &&
+           hw->phy.ops.check_reset_block(hw)) {
                e_err("Cannot change link characteristics when SoL/IDER is active.\n");
                return -EINVAL;
        }
@@ -1615,7 +1616,8 @@ static int e1000_loopback_test(struct e1000_adapter *adapter, u64 *data)
         * PHY loopback cannot be performed if SoL/IDER
         * sessions are active
         */
-       if (hw->phy.ops.check_reset_block(hw)) {
+       if (hw->phy.ops.check_reset_block &&
+           hw->phy.ops.check_reset_block(hw)) {
                e_err("Cannot do PHY loopback test when SoL/IDER is active.\n");
                *data = 0;
                goto out;
index 026e8b3ab52eee6be5ecadf5ee0de2745eae5faf..a13439928488c7aeab640d0a99f0833a5e74f203 100644 (file)
@@ -709,7 +709,7 @@ s32 e1000e_setup_link_generic(struct e1000_hw *hw)
         * In the case of the phy reset being blocked, we already have a link.
         * We do not need to set it up again.
         */
-       if (hw->phy.ops.check_reset_block(hw))
+       if (hw->phy.ops.check_reset_block && hw->phy.ops.check_reset_block(hw))
                return 0;
 
        /*
index a4b0435b00dc83078776346d9a510923b1cc20ac..31d37a2b5ba818e1366945b630f8b187ec9fb15b 100644 (file)
@@ -6237,7 +6237,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
                adapter->hw.phy.ms_type = e1000_ms_hw_default;
        }
 
-       if (hw->phy.ops.check_reset_block(hw))
+       if (hw->phy.ops.check_reset_block && hw->phy.ops.check_reset_block(hw))
                e_info("PHY reset is blocked due to SOL/IDER session.\n");
 
        /* Set initial default active device features */
@@ -6404,7 +6404,7 @@ err_register:
        if (!(adapter->flags & FLAG_HAS_AMT))
                e1000e_release_hw_control(adapter);
 err_eeprom:
-       if (!hw->phy.ops.check_reset_block(hw))
+       if (hw->phy.ops.check_reset_block && !hw->phy.ops.check_reset_block(hw))
                e1000_phy_hw_reset(&adapter->hw);
 err_hw_init:
        kfree(adapter->tx_ring);
index 0334d013bc3c828fc2256ae117287809f46a9d2c..b860d4f7ea2a950a7b24d0db8ca6f15446f1bfd3 100644 (file)
@@ -2155,9 +2155,11 @@ s32 e1000e_phy_hw_reset_generic(struct e1000_hw *hw)
        s32 ret_val;
        u32 ctrl;
 
-       ret_val = phy->ops.check_reset_block(hw);
-       if (ret_val)
-               return 0;
+       if (phy->ops.check_reset_block) {
+               ret_val = phy->ops.check_reset_block(hw);
+               if (ret_val)
+                       return 0;
+       }
 
        ret_val = phy->ops.acquire(hw);
        if (ret_val)
index bf20457ea23aba4a249837aca419d4250a079fcf..17ad6a3c1be126f2d25cf7e8ce448a7385ec8fc2 100644 (file)
@@ -1390,6 +1390,8 @@ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
                                     union ixgbe_adv_rx_desc *rx_desc,
                                     struct sk_buff *skb)
 {
+       struct net_device *dev = rx_ring->netdev;
+
        ixgbe_update_rsc_stats(rx_ring, skb);
 
        ixgbe_rx_hash(rx_ring, rx_desc, skb);
@@ -1401,14 +1403,15 @@ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
                ixgbe_ptp_rx_hwtstamp(rx_ring->q_vector, skb);
 #endif
 
-       if (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) {
+       if ((dev->features & NETIF_F_HW_VLAN_RX) &&
+           ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) {
                u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
                __vlan_hwaccel_put_tag(skb, vid);
        }
 
        skb_record_rx_queue(skb, rx_ring->queue_index);
 
-       skb->protocol = eth_type_trans(skb, rx_ring->netdev);
+       skb->protocol = eth_type_trans(skb, dev);
 }
 
 static void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector,
@@ -3607,10 +3610,6 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
        if (hw->mac.type == ixgbe_mac_82598EB)
                netif_set_gso_max_size(adapter->netdev, 32768);
 
-
-       /* Enable VLAN tag insert/strip */
-       adapter->netdev->features |= NETIF_F_HW_VLAN_RX;
-
        hw->mac.ops.set_vfta(&adapter->hw, 0, 0, true);
 
 #ifdef IXGBE_FCOE
@@ -6701,11 +6700,6 @@ static netdev_features_t ixgbe_fix_features(struct net_device *netdev,
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
 
-#ifdef CONFIG_DCB
-       if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
-               features &= ~NETIF_F_HW_VLAN_RX;
-#endif
-
        /* return error if RXHASH is being enabled when RSS is not supported */
        if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED))
                features &= ~NETIF_F_RXHASH;
@@ -6718,7 +6712,6 @@ static netdev_features_t ixgbe_fix_features(struct net_device *netdev,
        if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE))
                features &= ~NETIF_F_LRO;
 
-
        return features;
 }
 
@@ -6766,6 +6759,11 @@ static int ixgbe_set_features(struct net_device *netdev,
                need_reset = true;
        }
 
+       if (features & NETIF_F_HW_VLAN_RX)
+               ixgbe_vlan_strip_enable(adapter);
+       else
+               ixgbe_vlan_strip_disable(adapter);
+
        if (changed & NETIF_F_RXALL)
                need_reset = true;
 
index 04d901d0ff635f284185175bdc3698bc7940038b..f0f06b2bc28b3f951933c19bde6e788adcc7c5bf 100644 (file)
@@ -436,7 +436,9 @@ struct mv643xx_eth_private {
        /*
         * Hardware-specific parameters.
         */
+#if defined(CONFIG_HAVE_CLK)
        struct clk *clk;
+#endif
        unsigned int t_clk;
 };
 
@@ -2895,17 +2897,17 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
        mp->dev = dev;
 
        /*
-        * Get the clk rate, if there is one, otherwise use the default.
+        * Start with a default rate, and if there is a clock, allow
+        * it to override the default.
         */
+       mp->t_clk = 133000000;
+#if defined(CONFIG_HAVE_CLK)
        mp->clk = clk_get(&pdev->dev, (pdev->id ? "1" : "0"));
        if (!IS_ERR(mp->clk)) {
                clk_prepare_enable(mp->clk);
                mp->t_clk = clk_get_rate(mp->clk);
-       } else {
-               mp->t_clk = 133000000;
-               printk(KERN_WARNING "Unable to get clock");
        }
-
+#endif
        set_params(mp, pd);
        netif_set_real_num_tx_queues(dev, mp->txq_count);
        netif_set_real_num_rx_queues(dev, mp->rxq_count);
@@ -2995,10 +2997,13 @@ static int mv643xx_eth_remove(struct platform_device *pdev)
                phy_detach(mp->phy);
        cancel_work_sync(&mp->tx_timeout_task);
 
+#if defined(CONFIG_HAVE_CLK)
        if (!IS_ERR(mp->clk)) {
                clk_disable_unprepare(mp->clk);
                clk_put(mp->clk);
        }
+#endif
+
        free_netdev(mp->dev);
 
        platform_set_drvdata(pdev, NULL);
index cace36f2ab921772417b515182762c54412f13fa..28a54451a3e5060344c91af03cc1d51d557bae59 100644 (file)
@@ -4381,10 +4381,12 @@ static int sky2_set_features(struct net_device *dev, netdev_features_t features)
        struct sky2_port *sky2 = netdev_priv(dev);
        netdev_features_t changed = dev->features ^ features;
 
-       if (changed & NETIF_F_RXCSUM) {
-               bool on = features & NETIF_F_RXCSUM;
-               sky2_write32(sky2->hw, Q_ADDR(rxqaddr[sky2->port], Q_CSR),
-                            on ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM);
+       if ((changed & NETIF_F_RXCSUM) &&
+           !(sky2->hw->flags & SKY2_HW_NEW_LE)) {
+               sky2_write32(sky2->hw,
+                            Q_ADDR(rxqaddr[sky2->port], Q_CSR),
+                            (features & NETIF_F_RXCSUM)
+                            ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM);
        }
 
        if (changed & NETIF_F_RXHASH)
index 8d2666fcffd7eea6e2d913497be0f7ab080439c3..083d6715335cdb76200b905af3ee3ff4f25495f7 100644 (file)
@@ -946,16 +946,16 @@ static void __lpc_handle_xmit(struct net_device *ndev)
                        /* Update stats */
                        ndev->stats.tx_packets++;
                        ndev->stats.tx_bytes += skb->len;
-
-                       /* Free buffer */
-                       dev_kfree_skb_irq(skb);
                }
+               dev_kfree_skb_irq(skb);
 
                txcidx = readl(LPC_ENET_TXCONSUMEINDEX(pldat->net_base));
        }
 
-       if (netif_queue_stopped(ndev))
-               netif_wake_queue(ndev);
+       if (pldat->num_used_tx_buffs <= ENET_TX_DESC/2) {
+               if (netif_queue_stopped(ndev))
+                       netif_wake_queue(ndev);
+       }
 }
 
 static int __lpc_handle_recv(struct net_device *ndev, int budget)
@@ -1320,6 +1320,7 @@ static const struct net_device_ops lpc_netdev_ops = {
        .ndo_set_rx_mode        = lpc_eth_set_multicast_list,
        .ndo_do_ioctl           = lpc_eth_ioctl,
        .ndo_set_mac_address    = lpc_set_mac_address,
+       .ndo_change_mtu         = eth_change_mtu,
 };
 
 static int lpc_eth_drv_probe(struct platform_device *pdev)
index 9757ce3543a08746150e73cc933bf25537f71b12..7260aa79466a06055c56ad37d1a97119f4399031 100644 (file)
@@ -5889,11 +5889,7 @@ static void rtl_slow_event_work(struct rtl8169_private *tp)
        if (status & LinkChg)
                __rtl8169_check_link_status(dev, tp, tp->mmio_addr, true);
 
-       napi_disable(&tp->napi);
-       rtl_irq_disable(tp);
-
-       napi_enable(&tp->napi);
-       napi_schedule(&tp->napi);
+       rtl_irq_enable_all(tp);
 }
 
 static void rtl_task(struct work_struct *work)
index 036428348faa3e5b58cd261b9f0ec45e0ad3e4fc..9f448279e12a52ea7965c3500bd910c859afbb6b 100644 (file)
@@ -13,7 +13,7 @@ config STMMAC_ETH
 if STMMAC_ETH
 
 config STMMAC_PLATFORM
-       tristate "STMMAC platform bus support"
+       bool "STMMAC Platform bus support"
        depends on STMMAC_ETH
        default y
        ---help---
@@ -26,7 +26,7 @@ config STMMAC_PLATFORM
          If unsure, say N.
 
 config STMMAC_PCI
-       tristate "STMMAC support on PCI bus (EXPERIMENTAL)"
+       bool "STMMAC PCI bus support (EXPERIMENTAL)"
        depends on STMMAC_ETH && PCI && EXPERIMENTAL
        ---help---
          This is to select the Synopsys DWMAC available on PCI devices,
index 6b5d060ee9def7dd5fb5a8edac7037cc71bd869b..dc20c56efc9d6dcca84d8981d6648af10e5d66d3 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/clk.h>
 #include <linux/stmmac.h>
 #include <linux/phy.h>
+#include <linux/pci.h>
 #include "common.h"
 #ifdef CONFIG_STMMAC_TIMER
 #include "stmmac_timer.h"
@@ -95,7 +96,6 @@ extern int stmmac_mdio_register(struct net_device *ndev);
 extern void stmmac_set_ethtool_ops(struct net_device *netdev);
 extern const struct stmmac_desc_ops enh_desc_ops;
 extern const struct stmmac_desc_ops ndesc_ops;
-
 int stmmac_freeze(struct net_device *ndev);
 int stmmac_restore(struct net_device *ndev);
 int stmmac_resume(struct net_device *ndev);
@@ -109,7 +109,7 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device,
 static inline int stmmac_clk_enable(struct stmmac_priv *priv)
 {
        if (!IS_ERR(priv->stmmac_clk))
-               return clk_enable(priv->stmmac_clk);
+               return clk_prepare_enable(priv->stmmac_clk);
 
        return 0;
 }
@@ -119,7 +119,7 @@ static inline void stmmac_clk_disable(struct stmmac_priv *priv)
        if (IS_ERR(priv->stmmac_clk))
                return;
 
-       clk_disable(priv->stmmac_clk);
+       clk_disable_unprepare(priv->stmmac_clk);
 }
 static inline int stmmac_clk_get(struct stmmac_priv *priv)
 {
@@ -143,3 +143,60 @@ static inline int stmmac_clk_get(struct stmmac_priv *priv)
        return 0;
 }
 #endif /* CONFIG_HAVE_CLK */
+
+
+#ifdef CONFIG_STMMAC_PLATFORM
+extern struct platform_driver stmmac_pltfr_driver;
+static inline int stmmac_register_platform(void)
+{
+       int err;
+
+       err = platform_driver_register(&stmmac_pltfr_driver);
+       if (err)
+               pr_err("stmmac: failed to register the platform driver\n");
+
+       return err;
+}
+static inline void stmmac_unregister_platform(void)
+{
+       platform_driver_register(&stmmac_pltfr_driver);
+}
+#else
+static inline int stmmac_register_platform(void)
+{
+       pr_debug("stmmac: do not register the platf driver\n");
+
+       return -EINVAL;
+}
+static inline void stmmac_unregister_platform(void)
+{
+}
+#endif /* CONFIG_STMMAC_PLATFORM */
+
+#ifdef CONFIG_STMMAC_PCI
+extern struct pci_driver stmmac_pci_driver;
+static inline int stmmac_register_pci(void)
+{
+       int err;
+
+       err = pci_register_driver(&stmmac_pci_driver);
+       if (err)
+               pr_err("stmmac: failed to register the PCI driver\n");
+
+       return err;
+}
+static inline void stmmac_unregister_pci(void)
+{
+       pci_unregister_driver(&stmmac_pci_driver);
+}
+#else
+static inline int stmmac_register_pci(void)
+{
+       pr_debug("stmmac: do not register the PCI driver\n");
+
+       return -EINVAL;
+}
+static inline void stmmac_unregister_pci(void)
+{
+}
+#endif /* CONFIG_STMMAC_PCI */
index 70966330f44eca825d9456f5cea6723328c5e71b..51b3b68528ee8a429385f5083355125f2b4260c4 100644 (file)
@@ -833,8 +833,9 @@ static u32 stmmac_get_synopsys_id(struct stmmac_priv *priv)
 
 /**
  * stmmac_selec_desc_mode
- * @dev : device pointer
- * Description: select the Enhanced/Alternate or Normal descriptors */
+ * @priv : private structure
+ * Description: select the Enhanced/Alternate or Normal descriptors
+ */
 static void stmmac_selec_desc_mode(struct stmmac_priv *priv)
 {
        if (priv->plat->enh_desc) {
@@ -1861,6 +1862,8 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
 /**
  * stmmac_dvr_probe
  * @device: device pointer
+ * @plat_dat: platform data pointer
+ * @addr: iobase memory address
  * Description: this is the main probe function used to
  * call the alloc_etherdev, allocate the priv structure.
  */
@@ -2090,6 +2093,34 @@ int stmmac_restore(struct net_device *ndev)
 }
 #endif /* CONFIG_PM */
 
+/* Driver can be configured w/ and w/ both PCI and Platf drivers
+ * depending on the configuration selected.
+ */
+static int __init stmmac_init(void)
+{
+       int err_plt = 0;
+       int err_pci = 0;
+
+       err_plt = stmmac_register_platform();
+       err_pci = stmmac_register_pci();
+
+       if ((err_pci) && (err_plt)) {
+               pr_err("stmmac: driver registration failed\n");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static void __exit stmmac_exit(void)
+{
+       stmmac_unregister_platform();
+       stmmac_unregister_pci();
+}
+
+module_init(stmmac_init);
+module_exit(stmmac_exit);
+
 #ifndef MODULE
 static int __init stmmac_cmdline_opt(char *str)
 {
index 58fab5303e9cb45daaf4c134900dbb4ea47b60d8..cf826e6b6aa1d21eee5703d1b7edbcaef7b8e1c2 100644 (file)
@@ -179,7 +179,7 @@ static DEFINE_PCI_DEVICE_TABLE(stmmac_id_table) = {
 
 MODULE_DEVICE_TABLE(pci, stmmac_id_table);
 
-static struct pci_driver stmmac_driver = {
+struct pci_driver stmmac_pci_driver = {
        .name = STMMAC_RESOURCE_NAME,
        .id_table = stmmac_id_table,
        .probe = stmmac_pci_probe,
@@ -190,33 +190,6 @@ static struct pci_driver stmmac_driver = {
 #endif
 };
 
-/**
- * stmmac_init_module - Entry point for the driver
- * Description: This function is the entry point for the driver.
- */
-static int __init stmmac_init_module(void)
-{
-       int ret;
-
-       ret = pci_register_driver(&stmmac_driver);
-       if (ret < 0)
-               pr_err("%s: ERROR: driver registration failed\n", __func__);
-
-       return ret;
-}
-
-/**
- * stmmac_cleanup_module - Cleanup routine for the driver
- * Description: This function is the cleanup routine for the driver.
- */
-static void __exit stmmac_cleanup_module(void)
-{
-       pci_unregister_driver(&stmmac_driver);
-}
-
-module_init(stmmac_init_module);
-module_exit(stmmac_cleanup_module);
-
 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet PCI driver");
 MODULE_AUTHOR("Rayagond Kokatanur <rayagond.kokatanur@vayavyalabs.com>");
 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
index 3dd8f08038086896121e9e0031b7ea5af32778b1..680d2b8dfe27990852849744426e115b27e41e4f 100644 (file)
@@ -255,7 +255,7 @@ static const struct of_device_id stmmac_dt_ids[] = {
 };
 MODULE_DEVICE_TABLE(of, stmmac_dt_ids);
 
-static struct platform_driver stmmac_driver = {
+struct platform_driver stmmac_pltfr_driver = {
        .probe = stmmac_pltfr_probe,
        .remove = stmmac_pltfr_remove,
        .driver = {
@@ -266,8 +266,6 @@ static struct platform_driver stmmac_driver = {
                   },
 };
 
-module_platform_driver(stmmac_driver);
-
 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet PLATFORM driver");
 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
 MODULE_LICENSE("GPL");
index 703c8cce2a2cfae470546a3043fa69dc676ea84f..8c726b7004d32a3ab1c8ba3c677135e82b2e07dc 100644 (file)
@@ -3598,7 +3598,6 @@ static int release_tx_packet(struct niu *np, struct tx_ring_info *rp, int idx)
 static void niu_tx_work(struct niu *np, struct tx_ring_info *rp)
 {
        struct netdev_queue *txq;
-       unsigned int tx_bytes;
        u16 pkt_cnt, tmp;
        int cons, index;
        u64 cs;
@@ -3621,18 +3620,12 @@ static void niu_tx_work(struct niu *np, struct tx_ring_info *rp)
        netif_printk(np, tx_done, KERN_DEBUG, np->dev,
                     "%s() pkt_cnt[%u] cons[%d]\n", __func__, pkt_cnt, cons);
 
-       tx_bytes = 0;
-       tmp = pkt_cnt;
-       while (tmp--) {
-               tx_bytes += rp->tx_buffs[cons].skb->len;
+       while (pkt_cnt--)
                cons = release_tx_packet(np, rp, cons);
-       }
 
        rp->cons = cons;
        smp_mb();
 
-       netdev_tx_completed_queue(txq, pkt_cnt, tx_bytes);
-
 out:
        if (unlikely(netif_tx_queue_stopped(txq) &&
                     (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)))) {
@@ -4333,7 +4326,6 @@ static void niu_free_channels(struct niu *np)
                        struct tx_ring_info *rp = &np->tx_rings[i];
 
                        niu_free_tx_ring_info(np, rp);
-                       netdev_tx_reset_queue(netdev_get_tx_queue(np->dev, i));
                }
                kfree(np->tx_rings);
                np->tx_rings = NULL;
@@ -6739,8 +6731,6 @@ static netdev_tx_t niu_start_xmit(struct sk_buff *skb,
                prod = NEXT_TX(rp, prod);
        }
 
-       netdev_tx_sent_queue(txq, skb->len);
-
        if (prod < rp->prod)
                rp->wrap_bit ^= TX_RING_KICK_WRAP;
        rp->prod = prod;
index 2d9218f86bca7fdbb71e61515efc0974b28fa6ad..098b1c42b39368faef868e50fdbb3174a6ccf8d9 100644 (file)
@@ -7,6 +7,8 @@ config TILE_NET
        depends on TILE
        default y
        select CRC32
+       select TILE_GXIO_MPIPE if TILEGX
+       select HIGH_RES_TIMERS if TILEGX
        ---help---
          This is a standard Linux network device driver for the
          on-chip Tilera Gigabit Ethernet and XAUI interfaces.
index f634f142cab417b48e846de6a464032ee858733c..0ef9eefd32110560be6a51c141154aed71e758cc 100644 (file)
@@ -4,7 +4,7 @@
 
 obj-$(CONFIG_TILE_NET) += tile_net.o
 ifdef CONFIG_TILEGX
-tile_net-objs := tilegx.o mpipe.o iorpc_mpipe.o dma_queue.o
+tile_net-y := tilegx.o
 else
-tile_net-objs := tilepro.o
+tile_net-y := tilepro.o
 endif
diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c
new file mode 100644 (file)
index 0000000..83b4b38
--- /dev/null
@@ -0,0 +1,1898 @@
+/*
+ * Copyright 2012 Tilera Corporation. All Rights Reserved.
+ *
+ *   This program is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License
+ *   as published by the Free Software Foundation, version 2.
+ *
+ *   This program is distributed in the hope that it will be useful, but
+ *   WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ *   NON INFRINGEMENT.  See the GNU General Public License for
+ *   more details.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/moduleparam.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>      /* printk() */
+#include <linux/slab.h>        /* kmalloc() */
+#include <linux/errno.h>       /* error codes */
+#include <linux/types.h>       /* size_t */
+#include <linux/interrupt.h>
+#include <linux/in.h>
+#include <linux/irq.h>
+#include <linux/netdevice.h>   /* struct device, and other headers */
+#include <linux/etherdevice.h> /* eth_type_trans */
+#include <linux/skbuff.h>
+#include <linux/ioctl.h>
+#include <linux/cdev.h>
+#include <linux/hugetlb.h>
+#include <linux/in6.h>
+#include <linux/timer.h>
+#include <linux/hrtimer.h>
+#include <linux/ktime.h>
+#include <linux/io.h>
+#include <linux/ctype.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+
+#include <asm/checksum.h>
+#include <asm/homecache.h>
+#include <gxio/mpipe.h>
+#include <arch/sim.h>
+
+/* Default transmit lockup timeout period, in jiffies. */
+#define TILE_NET_TIMEOUT (5 * HZ)
+
+/* The maximum number of distinct channels (idesc.channel is 5 bits). */
+#define TILE_NET_CHANNELS 32
+
+/* Maximum number of idescs to handle per "poll". */
+#define TILE_NET_BATCH 128
+
+/* Maximum number of packets to handle per "poll". */
+#define TILE_NET_WEIGHT 64
+
+/* Number of entries in each iqueue. */
+#define IQUEUE_ENTRIES 512
+
+/* Number of entries in each equeue. */
+#define EQUEUE_ENTRIES 2048
+
+/* Total header bytes per equeue slot.  Must be big enough for 2 bytes
+ * of NET_IP_ALIGN alignment, plus 14 bytes (?) of L2 header, plus up to
+ * 60 bytes of actual TCP header.  We round up to align to cache lines.
+ */
+#define HEADER_BYTES 128
+
+/* Maximum completions per cpu per device (must be a power of two).
+ * ISSUE: What is the right number here?  If this is too small, then
+ * egress might block waiting for free space in a completions array.
+ * ISSUE: At the least, allocate these only for initialized echannels.
+ */
+#define TILE_NET_MAX_COMPS 64
+
+#define MAX_FRAGS (MAX_SKB_FRAGS + 1)
+
+/* Size of completions data to allocate.
+ * ISSUE: Probably more than needed since we don't use all the channels.
+ */
+#define COMPS_SIZE (TILE_NET_CHANNELS * sizeof(struct tile_net_comps))
+
+/* Size of NotifRing data to allocate. */
+#define NOTIF_RING_SIZE (IQUEUE_ENTRIES * sizeof(gxio_mpipe_idesc_t))
+
+/* Timeout to wake the per-device TX timer after we stop the queue.
+ * We don't want the timeout too short (adds overhead, and might end
+ * up causing stop/wake/stop/wake cycles) or too long (affects performance).
+ * For the 10 Gb NIC, 30 usec means roughly 30+ 1500-byte packets.
+ */
+#define TX_TIMER_DELAY_USEC 30
+
+/* Timeout to wake the per-cpu egress timer to free completions. */
+#define EGRESS_TIMER_DELAY_USEC 1000
+
+MODULE_AUTHOR("Tilera Corporation");
+MODULE_LICENSE("GPL");
+
+/* A "packet fragment" (a chunk of memory). */
+struct frag {
+       void *buf;
+       size_t length;
+};
+
+/* A single completion. */
+struct tile_net_comp {
+       /* The "complete_count" when the completion will be complete. */
+       s64 when;
+       /* The buffer to be freed when the completion is complete. */
+       struct sk_buff *skb;
+};
+
+/* The completions for a given cpu and echannel. */
+struct tile_net_comps {
+       /* The completions. */
+       struct tile_net_comp comp_queue[TILE_NET_MAX_COMPS];
+       /* The number of completions used. */
+       unsigned long comp_next;
+       /* The number of completions freed. */
+       unsigned long comp_last;
+};
+
+/* The transmit wake timer for a given cpu and echannel. */
+struct tile_net_tx_wake {
+       struct hrtimer timer;
+       struct net_device *dev;
+};
+
+/* Info for a specific cpu. */
+struct tile_net_info {
+       /* The NAPI struct. */
+       struct napi_struct napi;
+       /* Packet queue. */
+       gxio_mpipe_iqueue_t iqueue;
+       /* Our cpu. */
+       int my_cpu;
+       /* True if iqueue is valid. */
+       bool has_iqueue;
+       /* NAPI flags. */
+       bool napi_added;
+       bool napi_enabled;
+       /* Number of small sk_buffs which must still be provided. */
+       unsigned int num_needed_small_buffers;
+       /* Number of large sk_buffs which must still be provided. */
+       unsigned int num_needed_large_buffers;
+       /* A timer for handling egress completions. */
+       struct hrtimer egress_timer;
+       /* True if "egress_timer" is scheduled. */
+       bool egress_timer_scheduled;
+       /* Comps for each egress channel. */
+       struct tile_net_comps *comps_for_echannel[TILE_NET_CHANNELS];
+       /* Transmit wake timer for each egress channel. */
+       struct tile_net_tx_wake tx_wake[TILE_NET_CHANNELS];
+};
+
+/* Info for egress on a particular egress channel. */
+struct tile_net_egress {
+       /* The "equeue". */
+       gxio_mpipe_equeue_t *equeue;
+       /* The headers for TSO. */
+       unsigned char *headers;
+};
+
+/* Info for a specific device. */
+struct tile_net_priv {
+       /* Our network device. */
+       struct net_device *dev;
+       /* The primary link. */
+       gxio_mpipe_link_t link;
+       /* The primary channel, if open, else -1. */
+       int channel;
+       /* The "loopify" egress link, if needed. */
+       gxio_mpipe_link_t loopify_link;
+       /* The "loopify" egress channel, if open, else -1. */
+       int loopify_channel;
+       /* The egress channel (channel or loopify_channel). */
+       int echannel;
+       /* Total stats. */
+       struct net_device_stats stats;
+};
+
+/* Egress info, indexed by "priv->echannel" (lazily created as needed). */
+static struct tile_net_egress egress_for_echannel[TILE_NET_CHANNELS];
+
+/* Devices currently associated with each channel.
+ * NOTE: The array entry can become NULL after ifconfig down, but
+ * we do not free the underlying net_device structures, so it is
+ * safe to use a pointer after reading it from this array.
+ */
+static struct net_device *tile_net_devs_for_channel[TILE_NET_CHANNELS];
+
+/* A mutex for "tile_net_devs_for_channel". */
+static DEFINE_MUTEX(tile_net_devs_for_channel_mutex);
+
+/* The per-cpu info. */
+static DEFINE_PER_CPU(struct tile_net_info, per_cpu_info);
+
+/* The "context" for all devices. */
+static gxio_mpipe_context_t context;
+
+/* Buffer sizes and mpipe enum codes for buffer stacks.
+ * See arch/tile/include/gxio/mpipe.h for the set of possible values.
+ */
+#define BUFFER_SIZE_SMALL_ENUM GXIO_MPIPE_BUFFER_SIZE_128
+#define BUFFER_SIZE_SMALL 128
+#define BUFFER_SIZE_LARGE_ENUM GXIO_MPIPE_BUFFER_SIZE_1664
+#define BUFFER_SIZE_LARGE 1664
+
+/* The small/large "buffer stacks". */
+static int small_buffer_stack = -1;
+static int large_buffer_stack = -1;
+
+/* Amount of memory allocated for each buffer stack. */
+static size_t buffer_stack_size;
+
+/* The actual memory allocated for the buffer stacks. */
+static void *small_buffer_stack_va;
+static void *large_buffer_stack_va;
+
+/* The buckets. */
+static int first_bucket = -1;
+static int num_buckets = 1;
+
+/* The ingress irq. */
+static int ingress_irq = -1;
+
+/* Text value of tile_net.cpus if passed as a module parameter. */
+static char *network_cpus_string;
+
+/* The actual cpus in "network_cpus". */
+static struct cpumask network_cpus_map;
+
+/* If "loopify=LINK" was specified, this is "LINK". */
+static char *loopify_link_name;
+
+/* If "tile_net.custom" was specified, this is non-NULL. */
+static char *custom_str;
+
+/* The "tile_net.cpus" argument specifies the cpus that are dedicated
+ * to handle ingress packets.
+ *
+ * The parameter should be in the form "tile_net.cpus=m-n[,x-y]", where
+ * m, n, x, y are integer numbers that represent the cpus that can be
+ * neither a dedicated cpu nor a dataplane cpu.
+ */
+static bool network_cpus_init(void)
+{
+       char buf[1024];
+       int rc;
+
+       if (network_cpus_string == NULL)
+               return false;
+
+       rc = cpulist_parse_crop(network_cpus_string, &network_cpus_map);
+       if (rc != 0) {
+               pr_warn("tile_net.cpus=%s: malformed cpu list\n",
+                       network_cpus_string);
+               return false;
+       }
+
+       /* Remove dedicated cpus. */
+       cpumask_and(&network_cpus_map, &network_cpus_map, cpu_possible_mask);
+
+       if (cpumask_empty(&network_cpus_map)) {
+               pr_warn("Ignoring empty tile_net.cpus='%s'.\n",
+                       network_cpus_string);
+               return false;
+       }
+
+       cpulist_scnprintf(buf, sizeof(buf), &network_cpus_map);
+       pr_info("Linux network CPUs: %s\n", buf);
+       return true;
+}
+
+module_param_named(cpus, network_cpus_string, charp, 0444);
+MODULE_PARM_DESC(cpus, "cpulist of cores that handle network interrupts");
+
+/* The "tile_net.loopify=LINK" argument causes the named device to
+ * actually use "loop0" for ingress, and "loop1" for egress.  This
+ * allows an app to sit between the actual link and linux, passing
+ * (some) packets along to linux, and forwarding (some) packets sent
+ * out by linux.
+ */
+module_param_named(loopify, loopify_link_name, charp, 0444);
+MODULE_PARM_DESC(loopify, "name the device to use loop0/1 for ingress/egress");
+
+/* The "tile_net.custom" argument causes us to ignore the "conventional"
+ * classifier metadata, in particular, the "l2_offset".
+ */
+module_param_named(custom, custom_str, charp, 0444);
+MODULE_PARM_DESC(custom, "indicates a (heavily) customized classifier");
+
+/* Atomically update a statistics field.
+ * Note that on TILE-Gx, this operation is fire-and-forget on the
+ * issuing core (single-cycle dispatch) and takes only a few cycles
+ * longer than a regular store when the request reaches the home cache.
+ * No expensive bus management overhead is required.
+ */
+static void tile_net_stats_add(unsigned long value, unsigned long *field)
+{
+       BUILD_BUG_ON(sizeof(atomic_long_t) != sizeof(unsigned long));
+       atomic_long_add(value, (atomic_long_t *)field);
+}
+
+/* Allocate and push a buffer. */
+static bool tile_net_provide_buffer(bool small)
+{
+       int stack = small ? small_buffer_stack : large_buffer_stack;
+       const unsigned long buffer_alignment = 128;
+       struct sk_buff *skb;
+       int len;
+
+       len = sizeof(struct sk_buff **) + buffer_alignment;
+       len += (small ? BUFFER_SIZE_SMALL : BUFFER_SIZE_LARGE);
+       skb = dev_alloc_skb(len);
+       if (skb == NULL)
+               return false;
+
+       /* Make room for a back-pointer to 'skb' and guarantee alignment. */
+       skb_reserve(skb, sizeof(struct sk_buff **));
+       skb_reserve(skb, -(long)skb->data & (buffer_alignment - 1));
+
+       /* Save a back-pointer to 'skb'. */
+       *(struct sk_buff **)(skb->data - sizeof(struct sk_buff **)) = skb;
+
+       /* Make sure "skb" and the back-pointer have been flushed. */
+       wmb();
+
+       gxio_mpipe_push_buffer(&context, stack,
+                              (void *)va_to_tile_io_addr(skb->data));
+
+       return true;
+}
+
+/* Convert a raw mpipe buffer to its matching skb pointer. */
+static struct sk_buff *mpipe_buf_to_skb(void *va)
+{
+       /* Acquire the associated "skb". */
+       struct sk_buff **skb_ptr = va - sizeof(*skb_ptr);
+       struct sk_buff *skb = *skb_ptr;
+
+       /* Paranoia. */
+       if (skb->data != va) {
+               /* Panic here since there's a reasonable chance
+                * that corrupt buffers means generic memory
+                * corruption, with unpredictable system effects.
+                */
+               panic("Corrupt linux buffer! va=%p, skb=%p, skb->data=%p",
+                     va, skb, skb->data);
+       }
+
+       return skb;
+}
+
+static void tile_net_pop_all_buffers(int stack)
+{
+       for (;;) {
+               tile_io_addr_t addr =
+                       (tile_io_addr_t)gxio_mpipe_pop_buffer(&context, stack);
+               if (addr == 0)
+                       break;
+               dev_kfree_skb_irq(mpipe_buf_to_skb(tile_io_addr_to_va(addr)));
+       }
+}
+
+/* Provide linux buffers to mPIPE. */
+static void tile_net_provide_needed_buffers(void)
+{
+       struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
+
+       while (info->num_needed_small_buffers != 0) {
+               if (!tile_net_provide_buffer(true))
+                       goto oops;
+               info->num_needed_small_buffers--;
+       }
+
+       while (info->num_needed_large_buffers != 0) {
+               if (!tile_net_provide_buffer(false))
+                       goto oops;
+               info->num_needed_large_buffers--;
+       }
+
+       return;
+
+oops:
+       /* Add a description to the page allocation failure dump. */
+       pr_notice("Tile %d still needs some buffers\n", info->my_cpu);
+}
+
+static inline bool filter_packet(struct net_device *dev, void *buf)
+{
+       /* Filter packets received before we're up. */
+       if (dev == NULL || !(dev->flags & IFF_UP))
+               return true;
+
+       /* Filter out packets that aren't for us. */
+       if (!(dev->flags & IFF_PROMISC) &&
+           !is_multicast_ether_addr(buf) &&
+           compare_ether_addr(dev->dev_addr, buf) != 0)
+               return true;
+
+       return false;
+}
+
+static void tile_net_receive_skb(struct net_device *dev, struct sk_buff *skb,
+                                gxio_mpipe_idesc_t *idesc, unsigned long len)
+{
+       struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
+       struct tile_net_priv *priv = netdev_priv(dev);
+
+       /* Encode the actual packet length. */
+       skb_put(skb, len);
+
+       skb->protocol = eth_type_trans(skb, dev);
+
+       /* Acknowledge "good" hardware checksums. */
+       if (idesc->cs && idesc->csum_seed_val == 0xFFFF)
+               skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+       netif_receive_skb(skb);
+
+       /* Update stats. */
+       tile_net_stats_add(1, &priv->stats.rx_packets);
+       tile_net_stats_add(len, &priv->stats.rx_bytes);
+
+       /* Need a new buffer. */
+       if (idesc->size == BUFFER_SIZE_SMALL_ENUM)
+               info->num_needed_small_buffers++;
+       else
+               info->num_needed_large_buffers++;
+}
+
+/* Handle a packet.  Return true if "processed", false if "filtered". */
+static bool tile_net_handle_packet(gxio_mpipe_idesc_t *idesc)
+{
+       struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
+       struct net_device *dev = tile_net_devs_for_channel[idesc->channel];
+       uint8_t l2_offset;
+       void *va;
+       void *buf;
+       unsigned long len;
+       bool filter;
+
+       /* Drop packets for which no buffer was available.
+        * NOTE: This happens under heavy load.
+        */
+       if (idesc->be) {
+               struct tile_net_priv *priv = netdev_priv(dev);
+               tile_net_stats_add(1, &priv->stats.rx_dropped);
+               gxio_mpipe_iqueue_consume(&info->iqueue, idesc);
+               if (net_ratelimit())
+                       pr_info("Dropping packet (insufficient buffers).\n");
+               return false;
+       }
+
+       /* Get the "l2_offset", if allowed. */
+       l2_offset = custom_str ? 0 : gxio_mpipe_idesc_get_l2_offset(idesc);
+
+       /* Get the raw buffer VA (includes "headroom"). */
+       va = tile_io_addr_to_va((unsigned long)(long)idesc->va);
+
+       /* Get the actual packet start/length. */
+       buf = va + l2_offset;
+       len = idesc->l2_size - l2_offset;
+
+       /* Point "va" at the raw buffer. */
+       va -= NET_IP_ALIGN;
+
+       filter = filter_packet(dev, buf);
+       if (filter) {
+               gxio_mpipe_iqueue_drop(&info->iqueue, idesc);
+       } else {
+               struct sk_buff *skb = mpipe_buf_to_skb(va);
+
+               /* Skip headroom, and any custom header. */
+               skb_reserve(skb, NET_IP_ALIGN + l2_offset);
+
+               tile_net_receive_skb(dev, skb, idesc, len);
+       }
+
+       gxio_mpipe_iqueue_consume(&info->iqueue, idesc);
+       return !filter;
+}
+
+/* Handle some packets for the current CPU.
+ *
+ * This function handles up to TILE_NET_BATCH idescs per call.
+ *
+ * ISSUE: Since we do not provide new buffers until this function is
+ * complete, we must initially provide enough buffers for each network
+ * cpu to fill its iqueue and also its batched idescs.
+ *
+ * ISSUE: The "rotting packet" race condition occurs if a packet
+ * arrives after the queue appears to be empty, and before the
+ * hypervisor interrupt is re-enabled.
+ */
+static int tile_net_poll(struct napi_struct *napi, int budget)
+{
+       struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
+       unsigned int work = 0;
+       gxio_mpipe_idesc_t *idesc;
+       int i, n;
+
+       /* Process packets. */
+       while ((n = gxio_mpipe_iqueue_try_peek(&info->iqueue, &idesc)) > 0) {
+               for (i = 0; i < n; i++) {
+                       if (i == TILE_NET_BATCH)
+                               goto done;
+                       if (tile_net_handle_packet(idesc + i)) {
+                               if (++work >= budget)
+                                       goto done;
+                       }
+               }
+       }
+
+       /* There are no packets left. */
+       napi_complete(&info->napi);
+
+       /* Re-enable hypervisor interrupts. */
+       gxio_mpipe_enable_notif_ring_interrupt(&context, info->iqueue.ring);
+
+       /* HACK: Avoid the "rotting packet" problem. */
+       if (gxio_mpipe_iqueue_try_peek(&info->iqueue, &idesc) > 0)
+               napi_schedule(&info->napi);
+
+       /* ISSUE: Handle completions? */
+
+done:
+       tile_net_provide_needed_buffers();
+
+       return work;
+}
+
+/* Handle an ingress interrupt on the current cpu. */
+static irqreturn_t tile_net_handle_ingress_irq(int irq, void *unused)
+{
+       struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
+       napi_schedule(&info->napi);
+       return IRQ_HANDLED;
+}
+
+/* Free some completions.  This must be called with interrupts blocked. */
+static int tile_net_free_comps(gxio_mpipe_equeue_t *equeue,
+                               struct tile_net_comps *comps,
+                               int limit, bool force_update)
+{
+       int n = 0;
+       while (comps->comp_last < comps->comp_next) {
+               unsigned int cid = comps->comp_last % TILE_NET_MAX_COMPS;
+               struct tile_net_comp *comp = &comps->comp_queue[cid];
+               if (!gxio_mpipe_equeue_is_complete(equeue, comp->when,
+                                                  force_update || n == 0))
+                       break;
+               dev_kfree_skb_irq(comp->skb);
+               comps->comp_last++;
+               if (++n == limit)
+                       break;
+       }
+       return n;
+}
+
+/* Add a completion.  This must be called with interrupts blocked.
+ * tile_net_equeue_try_reserve() will have ensured a free completion entry.
+ */
+static void add_comp(gxio_mpipe_equeue_t *equeue,
+                    struct tile_net_comps *comps,
+                    uint64_t when, struct sk_buff *skb)
+{
+       int cid = comps->comp_next % TILE_NET_MAX_COMPS;
+       comps->comp_queue[cid].when = when;
+       comps->comp_queue[cid].skb = skb;
+       comps->comp_next++;
+}
+
+static void tile_net_schedule_tx_wake_timer(struct net_device *dev)
+{
+       struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
+       struct tile_net_priv *priv = netdev_priv(dev);
+
+       hrtimer_start(&info->tx_wake[priv->echannel].timer,
+                     ktime_set(0, TX_TIMER_DELAY_USEC * 1000UL),
+                     HRTIMER_MODE_REL_PINNED);
+}
+
+static enum hrtimer_restart tile_net_handle_tx_wake_timer(struct hrtimer *t)
+{
+       struct tile_net_tx_wake *tx_wake =
+               container_of(t, struct tile_net_tx_wake, timer);
+       netif_wake_subqueue(tx_wake->dev, smp_processor_id());
+       return HRTIMER_NORESTART;
+}
+
+/* Make sure the egress timer is scheduled. */
+static void tile_net_schedule_egress_timer(void)
+{
+       struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
+
+       if (!info->egress_timer_scheduled) {
+               hrtimer_start(&info->egress_timer,
+                             ktime_set(0, EGRESS_TIMER_DELAY_USEC * 1000UL),
+                             HRTIMER_MODE_REL_PINNED);
+               info->egress_timer_scheduled = true;
+       }
+}
+
+/* The "function" for "info->egress_timer".
+ *
+ * This timer will reschedule itself as long as there are any pending
+ * completions expected for this tile.
+ */
+static enum hrtimer_restart tile_net_handle_egress_timer(struct hrtimer *t)
+{
+       struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
+       unsigned long irqflags;
+       bool pending = false;
+       int i;
+
+       local_irq_save(irqflags);
+
+       /* The timer is no longer scheduled. */
+       info->egress_timer_scheduled = false;
+
+       /* Free all possible comps for this tile. */
+       for (i = 0; i < TILE_NET_CHANNELS; i++) {
+               struct tile_net_egress *egress = &egress_for_echannel[i];
+               struct tile_net_comps *comps = info->comps_for_echannel[i];
+               if (comps->comp_last >= comps->comp_next)
+                       continue;
+               tile_net_free_comps(egress->equeue, comps, -1, true);
+               pending = pending || (comps->comp_last < comps->comp_next);
+       }
+
+       /* Reschedule timer if needed. */
+       if (pending)
+               tile_net_schedule_egress_timer();
+
+       local_irq_restore(irqflags);
+
+       return HRTIMER_NORESTART;
+}
+
+/* Helper function for "tile_net_update()".
+ * "dev" (i.e. arg) is the device being brought up or down,
+ * or NULL if all devices are now down.
+ */
+static void tile_net_update_cpu(void *arg)
+{
+       struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
+       struct net_device *dev = arg;
+
+       if (!info->has_iqueue)
+               return;
+
+       if (dev != NULL) {
+               if (!info->napi_added) {
+                       netif_napi_add(dev, &info->napi,
+                                      tile_net_poll, TILE_NET_WEIGHT);
+                       info->napi_added = true;
+               }
+               if (!info->napi_enabled) {
+                       napi_enable(&info->napi);
+                       info->napi_enabled = true;
+               }
+               enable_percpu_irq(ingress_irq, 0);
+       } else {
+               disable_percpu_irq(ingress_irq);
+               if (info->napi_enabled) {
+                       napi_disable(&info->napi);
+                       info->napi_enabled = false;
+               }
+               /* FIXME: Drain the iqueue. */
+       }
+}
+
+/* Helper function for tile_net_open() and tile_net_stop().
+ * Always called under tile_net_devs_for_channel_mutex.
+ */
+static int tile_net_update(struct net_device *dev)
+{
+       static gxio_mpipe_rules_t rules;  /* too big to fit on the stack */
+       bool saw_channel = false;
+       int channel;
+       int rc;
+       int cpu;
+
+       gxio_mpipe_rules_init(&rules, &context);
+
+       for (channel = 0; channel < TILE_NET_CHANNELS; channel++) {
+               if (tile_net_devs_for_channel[channel] == NULL)
+                       continue;
+               if (!saw_channel) {
+                       saw_channel = true;
+                       gxio_mpipe_rules_begin(&rules, first_bucket,
+                                              num_buckets, NULL);
+                       gxio_mpipe_rules_set_headroom(&rules, NET_IP_ALIGN);
+               }
+               gxio_mpipe_rules_add_channel(&rules, channel);
+       }
+
+       /* NOTE: This can fail if there is no classifier.
+        * ISSUE: Can anything else cause it to fail?
+        */
+       rc = gxio_mpipe_rules_commit(&rules);
+       if (rc != 0) {
+               netdev_warn(dev, "gxio_mpipe_rules_commit failed: %d\n", rc);
+               return -EIO;
+       }
+
+       /* Update all cpus, sequentially (to protect "netif_napi_add()"). */
+       for_each_online_cpu(cpu)
+               smp_call_function_single(cpu, tile_net_update_cpu,
+                                        (saw_channel ? dev : NULL), 1);
+
+       /* HACK: Allow packets to flow in the simulator. */
+       if (saw_channel)
+               sim_enable_mpipe_links(0, -1);
+
+       return 0;
+}
+
+/* Allocate and initialize mpipe buffer stacks, and register them in
+ * the mPIPE TLBs, for both small and large packet sizes.
+ * This routine supports tile_net_init_mpipe(), below.
+ */
+static int init_buffer_stacks(struct net_device *dev, int num_buffers)
+{
+       pte_t hash_pte = pte_set_home((pte_t) { 0 }, PAGE_HOME_HASH);
+       int rc;
+
+       /* Compute stack bytes; we round up to 64KB and then use
+        * alloc_pages() so we get the required 64KB alignment as well.
+        */
+       buffer_stack_size =
+               ALIGN(gxio_mpipe_calc_buffer_stack_bytes(num_buffers),
+                     64 * 1024);
+
+       /* Allocate two buffer stack indices. */
+       rc = gxio_mpipe_alloc_buffer_stacks(&context, 2, 0, 0);
+       if (rc < 0) {
+               netdev_err(dev, "gxio_mpipe_alloc_buffer_stacks failed: %d\n",
+                          rc);
+               return rc;
+       }
+       small_buffer_stack = rc;
+       large_buffer_stack = rc + 1;
+
+       /* Allocate the small memory stack. */
+       small_buffer_stack_va =
+               alloc_pages_exact(buffer_stack_size, GFP_KERNEL);
+       if (small_buffer_stack_va == NULL) {
+               netdev_err(dev,
+                          "Could not alloc %zd bytes for buffer stacks\n",
+                          buffer_stack_size);
+               return -ENOMEM;
+       }
+       rc = gxio_mpipe_init_buffer_stack(&context, small_buffer_stack,
+                                         BUFFER_SIZE_SMALL_ENUM,
+                                         small_buffer_stack_va,
+                                         buffer_stack_size, 0);
+       if (rc != 0) {
+               netdev_err(dev, "gxio_mpipe_init_buffer_stack: %d\n", rc);
+               return rc;
+       }
+       rc = gxio_mpipe_register_client_memory(&context, small_buffer_stack,
+                                              hash_pte, 0);
+       if (rc != 0) {
+               netdev_err(dev,
+                          "gxio_mpipe_register_buffer_memory failed: %d\n",
+                          rc);
+               return rc;
+       }
+
+       /* Allocate the large buffer stack. */
+       large_buffer_stack_va =
+               alloc_pages_exact(buffer_stack_size, GFP_KERNEL);
+       if (large_buffer_stack_va == NULL) {
+               netdev_err(dev,
+                          "Could not alloc %zd bytes for buffer stacks\n",
+                          buffer_stack_size);
+               return -ENOMEM;
+       }
+       rc = gxio_mpipe_init_buffer_stack(&context, large_buffer_stack,
+                                         BUFFER_SIZE_LARGE_ENUM,
+                                         large_buffer_stack_va,
+                                         buffer_stack_size, 0);
+       if (rc != 0) {
+               netdev_err(dev, "gxio_mpipe_init_buffer_stack failed: %d\n",
+                          rc);
+               return rc;
+       }
+       rc = gxio_mpipe_register_client_memory(&context, large_buffer_stack,
+                                              hash_pte, 0);
+       if (rc != 0) {
+               netdev_err(dev,
+                          "gxio_mpipe_register_buffer_memory failed: %d\n",
+                          rc);
+               return rc;
+       }
+
+       return 0;
+}
+
+/* Allocate per-cpu resources (memory for completions and idescs).
+ * This routine supports tile_net_init_mpipe(), below.
+ */
+static int alloc_percpu_mpipe_resources(struct net_device *dev,
+                                       int cpu, int ring)
+{
+       struct tile_net_info *info = &per_cpu(per_cpu_info, cpu);
+       int order, i, rc;
+       struct page *page;
+       void *addr;
+
+       /* Allocate the "comps". */
+       order = get_order(COMPS_SIZE);
+       page = homecache_alloc_pages(GFP_KERNEL, order, cpu);
+       if (page == NULL) {
+               netdev_err(dev, "Failed to alloc %zd bytes comps memory\n",
+                          COMPS_SIZE);
+               return -ENOMEM;
+       }
+       addr = pfn_to_kaddr(page_to_pfn(page));
+       memset(addr, 0, COMPS_SIZE);
+       for (i = 0; i < TILE_NET_CHANNELS; i++)
+               info->comps_for_echannel[i] =
+                       addr + i * sizeof(struct tile_net_comps);
+
+       /* If this is a network cpu, create an iqueue. */
+       if (cpu_isset(cpu, network_cpus_map)) {
+               order = get_order(NOTIF_RING_SIZE);
+               page = homecache_alloc_pages(GFP_KERNEL, order, cpu);
+               if (page == NULL) {
+                       netdev_err(dev,
+                                  "Failed to alloc %zd bytes iqueue memory\n",
+                                  NOTIF_RING_SIZE);
+                       return -ENOMEM;
+               }
+               addr = pfn_to_kaddr(page_to_pfn(page));
+               rc = gxio_mpipe_iqueue_init(&info->iqueue, &context, ring++,
+                                           addr, NOTIF_RING_SIZE, 0);
+               if (rc < 0) {
+                       netdev_err(dev,
+                                  "gxio_mpipe_iqueue_init failed: %d\n", rc);
+                       return rc;
+               }
+               info->has_iqueue = true;
+       }
+
+       return ring;
+}
+
+/* Initialize NotifGroup and buckets.
+ * This routine supports tile_net_init_mpipe(), below.
+ */
+static int init_notif_group_and_buckets(struct net_device *dev,
+                                       int ring, int network_cpus_count)
+{
+       int group, rc;
+
+       /* Allocate one NotifGroup. */
+       rc = gxio_mpipe_alloc_notif_groups(&context, 1, 0, 0);
+       if (rc < 0) {
+               netdev_err(dev, "gxio_mpipe_alloc_notif_groups failed: %d\n",
+                          rc);
+               return rc;
+       }
+       group = rc;
+
+       /* Initialize global num_buckets value. */
+       if (network_cpus_count > 4)
+               num_buckets = 256;
+       else if (network_cpus_count > 1)
+               num_buckets = 16;
+
+       /* Allocate some buckets, and set global first_bucket value. */
+       rc = gxio_mpipe_alloc_buckets(&context, num_buckets, 0, 0);
+       if (rc < 0) {
+               netdev_err(dev, "gxio_mpipe_alloc_buckets failed: %d\n", rc);
+               return rc;
+       }
+       first_bucket = rc;
+
+       /* Init group and buckets. */
+       rc = gxio_mpipe_init_notif_group_and_buckets(
+               &context, group, ring, network_cpus_count,
+               first_bucket, num_buckets,
+               GXIO_MPIPE_BUCKET_STICKY_FLOW_LOCALITY);
+       if (rc != 0) {
+               netdev_err(
+                       dev,
+                       "gxio_mpipe_init_notif_group_and_buckets failed: %d\n",
+                       rc);
+               return rc;
+       }
+
+       return 0;
+}
+
+/* Create an irq and register it, then activate the irq and request
+ * interrupts on all cores.  Note that "ingress_irq" being initialized
+ * is how we know not to call tile_net_init_mpipe() again.
+ * This routine supports tile_net_init_mpipe(), below.
+ */
+static int tile_net_setup_interrupts(struct net_device *dev)
+{
+       int cpu, rc;
+
+       rc = create_irq();
+       if (rc < 0) {
+               netdev_err(dev, "create_irq failed: %d\n", rc);
+               return rc;
+       }
+       ingress_irq = rc;
+       tile_irq_activate(ingress_irq, TILE_IRQ_PERCPU);
+       rc = request_irq(ingress_irq, tile_net_handle_ingress_irq,
+                        0, NULL, NULL);
+       if (rc != 0) {
+               netdev_err(dev, "request_irq failed: %d\n", rc);
+               destroy_irq(ingress_irq);
+               ingress_irq = -1;
+               return rc;
+       }
+
+       for_each_online_cpu(cpu) {
+               struct tile_net_info *info = &per_cpu(per_cpu_info, cpu);
+               if (info->has_iqueue) {
+                       gxio_mpipe_request_notif_ring_interrupt(
+                               &context, cpu_x(cpu), cpu_y(cpu),
+                               1, ingress_irq, info->iqueue.ring);
+               }
+       }
+
+       return 0;
+}
+
+/* Undo any state set up partially by a failed call to tile_net_init_mpipe. */
+static void tile_net_init_mpipe_fail(void)
+{
+       int cpu;
+
+       /* Do cleanups that require the mpipe context first. */
+       if (small_buffer_stack >= 0)
+               tile_net_pop_all_buffers(small_buffer_stack);
+       if (large_buffer_stack >= 0)
+               tile_net_pop_all_buffers(large_buffer_stack);
+
+       /* Destroy mpipe context so the hardware no longer owns any memory. */
+       gxio_mpipe_destroy(&context);
+
+       for_each_online_cpu(cpu) {
+               struct tile_net_info *info = &per_cpu(per_cpu_info, cpu);
+               free_pages((unsigned long)(info->comps_for_echannel[0]),
+                          get_order(COMPS_SIZE));
+               info->comps_for_echannel[0] = NULL;
+               free_pages((unsigned long)(info->iqueue.idescs),
+                          get_order(NOTIF_RING_SIZE));
+               info->iqueue.idescs = NULL;
+       }
+
+       if (small_buffer_stack_va)
+               free_pages_exact(small_buffer_stack_va, buffer_stack_size);
+       if (large_buffer_stack_va)
+               free_pages_exact(large_buffer_stack_va, buffer_stack_size);
+
+       small_buffer_stack_va = NULL;
+       large_buffer_stack_va = NULL;
+       large_buffer_stack = -1;
+       small_buffer_stack = -1;
+       first_bucket = -1;
+}
+
+/* The first time any tilegx network device is opened, we initialize
+ * the global mpipe state.  If this step fails, we fail to open the
+ * device, but if it succeeds, we never need to do it again, and since
+ * tile_net can't be unloaded, we never undo it.
+ *
+ * Note that some resources in this path (buffer stack indices,
+ * bindings from init_buffer_stack, etc.) are hypervisor resources
+ * that are freed implicitly by gxio_mpipe_destroy().
+ */
+static int tile_net_init_mpipe(struct net_device *dev)
+{
+       int i, num_buffers, rc;
+       int cpu;
+       int first_ring, ring;
+       int network_cpus_count = cpus_weight(network_cpus_map);
+
+       if (!hash_default) {
+               netdev_err(dev, "Networking requires hash_default!\n");
+               return -EIO;
+       }
+
+       rc = gxio_mpipe_init(&context, 0);
+       if (rc != 0) {
+               netdev_err(dev, "gxio_mpipe_init failed: %d\n", rc);
+               return -EIO;
+       }
+
+       /* Set up the buffer stacks. */
+       num_buffers =
+               network_cpus_count * (IQUEUE_ENTRIES + TILE_NET_BATCH);
+       rc = init_buffer_stacks(dev, num_buffers);
+       if (rc != 0)
+               goto fail;
+
+       /* Provide initial buffers. */
+       rc = -ENOMEM;
+       for (i = 0; i < num_buffers; i++) {
+               if (!tile_net_provide_buffer(true)) {
+                       netdev_err(dev, "Cannot allocate initial sk_bufs!\n");
+                       goto fail;
+               }
+       }
+       for (i = 0; i < num_buffers; i++) {
+               if (!tile_net_provide_buffer(false)) {
+                       netdev_err(dev, "Cannot allocate initial sk_bufs!\n");
+                       goto fail;
+               }
+       }
+
+       /* Allocate one NotifRing for each network cpu. */
+       rc = gxio_mpipe_alloc_notif_rings(&context, network_cpus_count, 0, 0);
+       if (rc < 0) {
+               netdev_err(dev, "gxio_mpipe_alloc_notif_rings failed %d\n",
+                          rc);
+               goto fail;
+       }
+
+       /* Init NotifRings per-cpu. */
+       first_ring = rc;
+       ring = first_ring;
+       for_each_online_cpu(cpu) {
+               rc = alloc_percpu_mpipe_resources(dev, cpu, ring);
+               if (rc < 0)
+                       goto fail;
+               ring = rc;
+       }
+
+       /* Initialize NotifGroup and buckets. */
+       rc = init_notif_group_and_buckets(dev, first_ring, network_cpus_count);
+       if (rc != 0)
+               goto fail;
+
+       /* Create and enable interrupts. */
+       rc = tile_net_setup_interrupts(dev);
+       if (rc != 0)
+               goto fail;
+
+       return 0;
+
+fail:
+       tile_net_init_mpipe_fail();
+       return rc;
+}
+
+/* Create persistent egress info for a given egress channel.
+ * Note that this may be shared between, say, "gbe0" and "xgbe0".
+ * ISSUE: Defer header allocation until TSO is actually needed?
+ */
+static int tile_net_init_egress(struct net_device *dev, int echannel)
+{
+       struct page *headers_page, *edescs_page, *equeue_page;
+       gxio_mpipe_edesc_t *edescs;
+       gxio_mpipe_equeue_t *equeue;
+       unsigned char *headers;
+       int headers_order, edescs_order, equeue_order;
+       size_t edescs_size;
+       int edma;
+       int rc = -ENOMEM;
+
+       /* Only initialize once. */
+       if (egress_for_echannel[echannel].equeue != NULL)
+               return 0;
+
+       /* Allocate memory for the "headers". */
+       headers_order = get_order(EQUEUE_ENTRIES * HEADER_BYTES);
+       headers_page = alloc_pages(GFP_KERNEL, headers_order);
+       if (headers_page == NULL) {
+               netdev_warn(dev,
+                           "Could not alloc %zd bytes for TSO headers.\n",
+                           PAGE_SIZE << headers_order);
+               goto fail;
+       }
+       headers = pfn_to_kaddr(page_to_pfn(headers_page));
+
+       /* Allocate memory for the "edescs". */
+       edescs_size = EQUEUE_ENTRIES * sizeof(*edescs);
+       edescs_order = get_order(edescs_size);
+       edescs_page = alloc_pages(GFP_KERNEL, edescs_order);
+       if (edescs_page == NULL) {
+               netdev_warn(dev,
+                           "Could not alloc %zd bytes for eDMA ring.\n",
+                           edescs_size);
+               goto fail_headers;
+       }
+       edescs = pfn_to_kaddr(page_to_pfn(edescs_page));
+
+       /* Allocate memory for the "equeue". */
+       equeue_order = get_order(sizeof(*equeue));
+       equeue_page = alloc_pages(GFP_KERNEL, equeue_order);
+       if (equeue_page == NULL) {
+               netdev_warn(dev,
+                           "Could not alloc %zd bytes for equeue info.\n",
+                           PAGE_SIZE << equeue_order);
+               goto fail_edescs;
+       }
+       equeue = pfn_to_kaddr(page_to_pfn(equeue_page));
+
+       /* Allocate an edma ring.  Note that in practice this can't
+        * fail, which is good, because we will leak an edma ring if so.
+        */
+       rc = gxio_mpipe_alloc_edma_rings(&context, 1, 0, 0);
+       if (rc < 0) {
+               netdev_warn(dev, "gxio_mpipe_alloc_edma_rings failed: %d\n",
+                           rc);
+               goto fail_equeue;
+       }
+       edma = rc;
+
+       /* Initialize the equeue. */
+       rc = gxio_mpipe_equeue_init(equeue, &context, edma, echannel,
+                                   edescs, edescs_size, 0);
+       if (rc != 0) {
+               netdev_err(dev, "gxio_mpipe_equeue_init failed: %d\n", rc);
+               goto fail_equeue;
+       }
+
+       /* Done. */
+       egress_for_echannel[echannel].equeue = equeue;
+       egress_for_echannel[echannel].headers = headers;
+       return 0;
+
+fail_equeue:
+       __free_pages(equeue_page, equeue_order);
+
+fail_edescs:
+       __free_pages(edescs_page, edescs_order);
+
+fail_headers:
+       __free_pages(headers_page, headers_order);
+
+fail:
+       return rc;
+}
+
+/* Return channel number for a newly-opened link. */
+static int tile_net_link_open(struct net_device *dev, gxio_mpipe_link_t *link,
+                             const char *link_name)
+{
+       int rc = gxio_mpipe_link_open(link, &context, link_name, 0);
+       if (rc < 0) {
+               netdev_err(dev, "Failed to open '%s'\n", link_name);
+               return rc;
+       }
+       rc = gxio_mpipe_link_channel(link);
+       if (rc < 0 || rc >= TILE_NET_CHANNELS) {
+               netdev_err(dev, "gxio_mpipe_link_channel bad value: %d\n", rc);
+               gxio_mpipe_link_close(link);
+               return -EINVAL;
+       }
+       return rc;
+}
+
+/* Help the kernel activate the given network interface. */
+static int tile_net_open(struct net_device *dev)
+{
+       struct tile_net_priv *priv = netdev_priv(dev);
+       int cpu, rc;
+
+       mutex_lock(&tile_net_devs_for_channel_mutex);
+
+       /* Do one-time initialization the first time any device is opened. */
+       if (ingress_irq < 0) {
+               rc = tile_net_init_mpipe(dev);
+               if (rc != 0)
+                       goto fail;
+       }
+
+       /* Determine if this is the "loopify" device. */
+       if (unlikely((loopify_link_name != NULL) &&
+                    !strcmp(dev->name, loopify_link_name))) {
+               rc = tile_net_link_open(dev, &priv->link, "loop0");
+               if (rc < 0)
+                       goto fail;
+               priv->channel = rc;
+               rc = tile_net_link_open(dev, &priv->loopify_link, "loop1");
+               if (rc < 0)
+                       goto fail;
+               priv->loopify_channel = rc;
+               priv->echannel = rc;
+       } else {
+               rc = tile_net_link_open(dev, &priv->link, dev->name);
+               if (rc < 0)
+                       goto fail;
+               priv->channel = rc;
+               priv->echannel = rc;
+       }
+
+       /* Initialize egress info (if needed).  Once ever, per echannel. */
+       rc = tile_net_init_egress(dev, priv->echannel);
+       if (rc != 0)
+               goto fail;
+
+       tile_net_devs_for_channel[priv->channel] = dev;
+
+       rc = tile_net_update(dev);
+       if (rc != 0)
+               goto fail;
+
+       mutex_unlock(&tile_net_devs_for_channel_mutex);
+
+       /* Initialize the transmit wake timer for this device for each cpu. */
+       for_each_online_cpu(cpu) {
+               struct tile_net_info *info = &per_cpu(per_cpu_info, cpu);
+               struct tile_net_tx_wake *tx_wake =
+                       &info->tx_wake[priv->echannel];
+
+               hrtimer_init(&tx_wake->timer, CLOCK_MONOTONIC,
+                            HRTIMER_MODE_REL);
+               tx_wake->timer.function = tile_net_handle_tx_wake_timer;
+               tx_wake->dev = dev;
+       }
+
+       for_each_online_cpu(cpu)
+               netif_start_subqueue(dev, cpu);
+       netif_carrier_on(dev);
+       return 0;
+
+fail:
+       if (priv->loopify_channel >= 0) {
+               if (gxio_mpipe_link_close(&priv->loopify_link) != 0)
+                       netdev_warn(dev, "Failed to close loopify link!\n");
+               priv->loopify_channel = -1;
+       }
+       if (priv->channel >= 0) {
+               if (gxio_mpipe_link_close(&priv->link) != 0)
+                       netdev_warn(dev, "Failed to close link!\n");
+               priv->channel = -1;
+       }
+       priv->echannel = -1;
+       tile_net_devs_for_channel[priv->channel] = NULL;
+       mutex_unlock(&tile_net_devs_for_channel_mutex);
+
+       /* Don't return raw gxio error codes to generic Linux. */
+       return (rc > -512) ? rc : -EIO;
+}
+
+/* Help the kernel deactivate the given network interface. */
+static int tile_net_stop(struct net_device *dev)
+{
+       struct tile_net_priv *priv = netdev_priv(dev);
+       int cpu;
+
+       for_each_online_cpu(cpu) {
+               struct tile_net_info *info = &per_cpu(per_cpu_info, cpu);
+               struct tile_net_tx_wake *tx_wake =
+                       &info->tx_wake[priv->echannel];
+
+               hrtimer_cancel(&tx_wake->timer);
+               netif_stop_subqueue(dev, cpu);
+       }
+
+       mutex_lock(&tile_net_devs_for_channel_mutex);
+       tile_net_devs_for_channel[priv->channel] = NULL;
+       (void)tile_net_update(dev);
+       if (priv->loopify_channel >= 0) {
+               if (gxio_mpipe_link_close(&priv->loopify_link) != 0)
+                       netdev_warn(dev, "Failed to close loopify link!\n");
+               priv->loopify_channel = -1;
+       }
+       if (priv->channel >= 0) {
+               if (gxio_mpipe_link_close(&priv->link) != 0)
+                       netdev_warn(dev, "Failed to close link!\n");
+               priv->channel = -1;
+       }
+       priv->echannel = -1;
+       mutex_unlock(&tile_net_devs_for_channel_mutex);
+
+       return 0;
+}
+
+/* Determine the VA for a fragment. */
+static inline void *tile_net_frag_buf(skb_frag_t *f)
+{
+       unsigned long pfn = page_to_pfn(skb_frag_page(f));
+       return pfn_to_kaddr(pfn) + f->page_offset;
+}
+
+/* Acquire a completion entry and an egress slot, or if we can't,
+ * stop the queue and schedule the tx_wake timer.
+ */
+static s64 tile_net_equeue_try_reserve(struct net_device *dev,
+                                      struct tile_net_comps *comps,
+                                      gxio_mpipe_equeue_t *equeue,
+                                      int num_edescs)
+{
+       /* Try to acquire a completion entry. */
+       if (comps->comp_next - comps->comp_last < TILE_NET_MAX_COMPS - 1 ||
+           tile_net_free_comps(equeue, comps, 32, false) != 0) {
+
+               /* Try to acquire an egress slot. */
+               s64 slot = gxio_mpipe_equeue_try_reserve(equeue, num_edescs);
+               if (slot >= 0)
+                       return slot;
+
+               /* Freeing some completions gives the equeue time to drain. */
+               tile_net_free_comps(equeue, comps, TILE_NET_MAX_COMPS, false);
+
+               slot = gxio_mpipe_equeue_try_reserve(equeue, num_edescs);
+               if (slot >= 0)
+                       return slot;
+       }
+
+       /* Still nothing; give up and stop the queue for a short while. */
+       netif_stop_subqueue(dev, smp_processor_id());
+       tile_net_schedule_tx_wake_timer(dev);
+       return -1;
+}
+
+/* Determine how many edesc's are needed for TSO.
+ *
+ * Sometimes, if "sendfile()" requires copying, we will be called with
+ * "data" containing the header and payload, with "frags" being empty.
+ * Sometimes, for example when using NFS over TCP, a single segment can
+ * span 3 fragments.  This requires special care.
+ */
+static int tso_count_edescs(struct sk_buff *skb)
+{
+       struct skb_shared_info *sh = skb_shinfo(skb);
+       unsigned int data_len = skb->data_len;
+       unsigned int p_len = sh->gso_size;
+       long f_id = -1;    /* id of the current fragment */
+       long f_size = -1;  /* size of the current fragment */
+       long f_used = -1;  /* bytes used from the current fragment */
+       long n;            /* size of the current piece of payload */
+       int num_edescs = 0;
+       int segment;
+
+       for (segment = 0; segment < sh->gso_segs; segment++) {
+
+               unsigned int p_used = 0;
+
+               /* One edesc for header and for each piece of the payload. */
+               for (num_edescs++; p_used < p_len; num_edescs++) {
+
+                       /* Advance as needed. */
+                       while (f_used >= f_size) {
+                               f_id++;
+                               f_size = sh->frags[f_id].size;
+                               f_used = 0;
+                       }
+
+                       /* Use bytes from the current fragment. */
+                       n = p_len - p_used;
+                       if (n > f_size - f_used)
+                               n = f_size - f_used;
+                       f_used += n;
+                       p_used += n;
+               }
+
+               /* The last segment may be less than gso_size. */
+               data_len -= p_len;
+               if (data_len < p_len)
+                       p_len = data_len;
+       }
+
+       return num_edescs;
+}
+
+/* Prepare modified copies of the skbuff headers.
+ * FIXME: add support for IPv6.
+ */
+static void tso_headers_prepare(struct sk_buff *skb, unsigned char *headers,
+                               s64 slot)
+{
+       struct skb_shared_info *sh = skb_shinfo(skb);
+       struct iphdr *ih;
+       struct tcphdr *th;
+       unsigned int data_len = skb->data_len;
+       unsigned char *data = skb->data;
+       unsigned int ih_off, th_off, sh_len, p_len;
+       unsigned int isum_seed, tsum_seed, id, seq;
+       long f_id = -1;    /* id of the current fragment */
+       long f_size = -1;  /* size of the current fragment */
+       long f_used = -1;  /* bytes used from the current fragment */
+       long n;            /* size of the current piece of payload */
+       int segment;
+
+       /* Locate original headers and compute various lengths. */
+       ih = ip_hdr(skb);
+       th = tcp_hdr(skb);
+       ih_off = skb_network_offset(skb);
+       th_off = skb_transport_offset(skb);
+       sh_len = th_off + tcp_hdrlen(skb);
+       p_len = sh->gso_size;
+
+       /* Set up seed values for IP and TCP csum and initialize id and seq. */
+       isum_seed = ((0xFFFF - ih->check) +
+                    (0xFFFF - ih->tot_len) +
+                    (0xFFFF - ih->id));
+       tsum_seed = th->check + (0xFFFF ^ htons(skb->len));
+       id = ntohs(ih->id);
+       seq = ntohl(th->seq);
+
+       /* Prepare all the headers. */
+       for (segment = 0; segment < sh->gso_segs; segment++) {
+               unsigned char *buf;
+               unsigned int p_used = 0;
+
+               /* Copy to the header memory for this segment. */
+               buf = headers + (slot % EQUEUE_ENTRIES) * HEADER_BYTES +
+                       NET_IP_ALIGN;
+               memcpy(buf, data, sh_len);
+
+               /* Update copied ip header. */
+               ih = (struct iphdr *)(buf + ih_off);
+               ih->tot_len = htons(sh_len + p_len - ih_off);
+               ih->id = htons(id);
+               ih->check = csum_long(isum_seed + ih->tot_len +
+                                     ih->id) ^ 0xffff;
+
+               /* Update copied tcp header. */
+               th = (struct tcphdr *)(buf + th_off);
+               th->seq = htonl(seq);
+               th->check = csum_long(tsum_seed + htons(sh_len + p_len));
+               if (segment != sh->gso_segs - 1) {
+                       th->fin = 0;
+                       th->psh = 0;
+               }
+
+               /* Skip past the header. */
+               slot++;
+
+               /* Skip past the payload. */
+               while (p_used < p_len) {
+
+                       /* Advance as needed. */
+                       while (f_used >= f_size) {
+                               f_id++;
+                               f_size = sh->frags[f_id].size;
+                               f_used = 0;
+                       }
+
+                       /* Use bytes from the current fragment. */
+                       n = p_len - p_used;
+                       if (n > f_size - f_used)
+                               n = f_size - f_used;
+                       f_used += n;
+                       p_used += n;
+
+                       slot++;
+               }
+
+               id++;
+               seq += p_len;
+
+               /* The last segment may be less than gso_size. */
+               data_len -= p_len;
+               if (data_len < p_len)
+                       p_len = data_len;
+       }
+
+       /* Flush the headers so they are ready for hardware DMA. */
+       wmb();
+}
+
+/* Pass all the data to mpipe for egress. */
+static void tso_egress(struct net_device *dev, gxio_mpipe_equeue_t *equeue,
+                      struct sk_buff *skb, unsigned char *headers, s64 slot)
+{
+       struct tile_net_priv *priv = netdev_priv(dev);
+       struct skb_shared_info *sh = skb_shinfo(skb);
+       unsigned int data_len = skb->data_len;
+       unsigned int p_len = sh->gso_size;
+       gxio_mpipe_edesc_t edesc_head = { { 0 } };
+       gxio_mpipe_edesc_t edesc_body = { { 0 } };
+       long f_id = -1;    /* id of the current fragment */
+       long f_size = -1;  /* size of the current fragment */
+       long f_used = -1;  /* bytes used from the current fragment */
+       long n;            /* size of the current piece of payload */
+       unsigned long tx_packets = 0, tx_bytes = 0;
+       unsigned int csum_start, sh_len;
+       int segment;
+
+       /* Prepare to egress the headers: set up header edesc. */
+       csum_start = skb_checksum_start_offset(skb);
+       sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+       edesc_head.csum = 1;
+       edesc_head.csum_start = csum_start;
+       edesc_head.csum_dest = csum_start + skb->csum_offset;
+       edesc_head.xfer_size = sh_len;
+
+       /* This is only used to specify the TLB. */
+       edesc_head.stack_idx = large_buffer_stack;
+       edesc_body.stack_idx = large_buffer_stack;
+
+       /* Egress all the edescs. */
+       for (segment = 0; segment < sh->gso_segs; segment++) {
+               void *va;
+               unsigned char *buf;
+               unsigned int p_used = 0;
+
+               /* Egress the header. */
+               buf = headers + (slot % EQUEUE_ENTRIES) * HEADER_BYTES +
+                       NET_IP_ALIGN;
+               edesc_head.va = va_to_tile_io_addr(buf);
+               gxio_mpipe_equeue_put_at(equeue, edesc_head, slot);
+               slot++;
+
+               /* Egress the payload. */
+               while (p_used < p_len) {
+
+                       /* Advance as needed. */
+                       while (f_used >= f_size) {
+                               f_id++;
+                               f_size = sh->frags[f_id].size;
+                               f_used = 0;
+                       }
+
+                       va = tile_net_frag_buf(&sh->frags[f_id]) + f_used;
+
+                       /* Use bytes from the current fragment. */
+                       n = p_len - p_used;
+                       if (n > f_size - f_used)
+                               n = f_size - f_used;
+                       f_used += n;
+                       p_used += n;
+
+                       /* Egress a piece of the payload. */
+                       edesc_body.va = va_to_tile_io_addr(va);
+                       edesc_body.xfer_size = n;
+                       edesc_body.bound = !(p_used < p_len);
+                       gxio_mpipe_equeue_put_at(equeue, edesc_body, slot);
+                       slot++;
+               }
+
+               tx_packets++;
+               tx_bytes += sh_len + p_len;
+
+               /* The last segment may be less than gso_size. */
+               data_len -= p_len;
+               if (data_len < p_len)
+                       p_len = data_len;
+       }
+
+       /* Update stats. */
+       tile_net_stats_add(tx_packets, &priv->stats.tx_packets);
+       tile_net_stats_add(tx_bytes, &priv->stats.tx_bytes);
+}
+
+/* Do "TSO" handling for egress.
+ *
+ * Normally drivers set NETIF_F_TSO only to support hardware TSO;
+ * otherwise the stack uses scatter-gather to implement GSO in software.
+ * On our testing, enabling GSO support (via NETIF_F_SG) drops network
+ * performance down to around 7.5 Gbps on the 10G interfaces, although
+ * also dropping cpu utilization way down, to under 8%.  But
+ * implementing "TSO" in the driver brings performance back up to line
+ * rate, while dropping cpu usage even further, to less than 4%.  In
+ * practice, profiling of GSO shows that skb_segment() is what causes
+ * the performance overheads; we benefit in the driver from using
+ * preallocated memory to duplicate the TCP/IP headers.
+ */
+static int tile_net_tx_tso(struct sk_buff *skb, struct net_device *dev)
+{
+       struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
+       struct tile_net_priv *priv = netdev_priv(dev);
+       int channel = priv->echannel;
+       struct tile_net_egress *egress = &egress_for_echannel[channel];
+       struct tile_net_comps *comps = info->comps_for_echannel[channel];
+       gxio_mpipe_equeue_t *equeue = egress->equeue;
+       unsigned long irqflags;
+       int num_edescs;
+       s64 slot;
+
+       /* Determine how many mpipe edesc's are needed. */
+       num_edescs = tso_count_edescs(skb);
+
+       local_irq_save(irqflags);
+
+       /* Try to acquire a completion entry and an egress slot. */
+       slot = tile_net_equeue_try_reserve(dev, comps, equeue, num_edescs);
+       if (slot < 0) {
+               local_irq_restore(irqflags);
+               return NETDEV_TX_BUSY;
+       }
+
+       /* Set up copies of header data properly. */
+       tso_headers_prepare(skb, egress->headers, slot);
+
+       /* Actually pass the data to the network hardware. */
+       tso_egress(dev, equeue, skb, egress->headers, slot);
+
+       /* Add a completion record. */
+       add_comp(equeue, comps, slot + num_edescs - 1, skb);
+
+       local_irq_restore(irqflags);
+
+       /* Make sure the egress timer is scheduled. */
+       tile_net_schedule_egress_timer();
+
+       return NETDEV_TX_OK;
+}
+
+/* Analyze the body and frags for a transmit request. */
+static unsigned int tile_net_tx_frags(struct frag *frags,
+                                      struct sk_buff *skb,
+                                      void *b_data, unsigned int b_len)
+{
+       unsigned int i, n = 0;
+
+       struct skb_shared_info *sh = skb_shinfo(skb);
+
+       if (b_len != 0) {
+               frags[n].buf = b_data;
+               frags[n++].length = b_len;
+       }
+
+       for (i = 0; i < sh->nr_frags; i++) {
+               skb_frag_t *f = &sh->frags[i];
+               frags[n].buf = tile_net_frag_buf(f);
+               frags[n++].length = skb_frag_size(f);
+       }
+
+       return n;
+}
+
+/* Help the kernel transmit a packet. */
+static int tile_net_tx(struct sk_buff *skb, struct net_device *dev)
+{
+       struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
+       struct tile_net_priv *priv = netdev_priv(dev);
+       struct tile_net_egress *egress = &egress_for_echannel[priv->echannel];
+       gxio_mpipe_equeue_t *equeue = egress->equeue;
+       struct tile_net_comps *comps =
+               info->comps_for_echannel[priv->echannel];
+       unsigned int len = skb->len;
+       unsigned char *data = skb->data;
+       unsigned int num_edescs;
+       struct frag frags[MAX_FRAGS];
+       gxio_mpipe_edesc_t edescs[MAX_FRAGS];
+       unsigned long irqflags;
+       gxio_mpipe_edesc_t edesc = { { 0 } };
+       unsigned int i;
+       s64 slot;
+
+       if (skb_is_gso(skb))
+               return tile_net_tx_tso(skb, dev);
+
+       num_edescs = tile_net_tx_frags(frags, skb, data, skb_headlen(skb));
+
+       /* This is only used to specify the TLB. */
+       edesc.stack_idx = large_buffer_stack;
+
+       /* Prepare the edescs. */
+       for (i = 0; i < num_edescs; i++) {
+               edesc.xfer_size = frags[i].length;
+               edesc.va = va_to_tile_io_addr(frags[i].buf);
+               edescs[i] = edesc;
+       }
+
+       /* Mark the final edesc. */
+       edescs[num_edescs - 1].bound = 1;
+
+       /* Add checksum info to the initial edesc, if needed. */
+       if (skb->ip_summed == CHECKSUM_PARTIAL) {
+               unsigned int csum_start = skb_checksum_start_offset(skb);
+               edescs[0].csum = 1;
+               edescs[0].csum_start = csum_start;
+               edescs[0].csum_dest = csum_start + skb->csum_offset;
+       }
+
+       local_irq_save(irqflags);
+
+       /* Try to acquire a completion entry and an egress slot. */
+       slot = tile_net_equeue_try_reserve(dev, comps, equeue, num_edescs);
+       if (slot < 0) {
+               local_irq_restore(irqflags);
+               return NETDEV_TX_BUSY;
+       }
+
+       for (i = 0; i < num_edescs; i++)
+               gxio_mpipe_equeue_put_at(equeue, edescs[i], slot++);
+
+       /* Add a completion record. */
+       add_comp(equeue, comps, slot - 1, skb);
+
+       /* NOTE: Use ETH_ZLEN for short packets (e.g. 42 < 60). */
+       tile_net_stats_add(1, &priv->stats.tx_packets);
+       tile_net_stats_add(max_t(unsigned int, len, ETH_ZLEN),
+                          &priv->stats.tx_bytes);
+
+       local_irq_restore(irqflags);
+
+       /* Make sure the egress timer is scheduled. */
+       tile_net_schedule_egress_timer();
+
+       return NETDEV_TX_OK;
+}
+
+/* Return subqueue id on this core (one per core). */
+static u16 tile_net_select_queue(struct net_device *dev, struct sk_buff *skb)
+{
+       return smp_processor_id();
+}
+
+/* Deal with a transmit timeout. */
+static void tile_net_tx_timeout(struct net_device *dev)
+{
+       int cpu;
+
+       for_each_online_cpu(cpu)
+               netif_wake_subqueue(dev, cpu);
+}
+
+/* Ioctl commands. */
+static int tile_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+       return -EOPNOTSUPP;
+}
+
+/* Get system network statistics for device. */
+static struct net_device_stats *tile_net_get_stats(struct net_device *dev)
+{
+       struct tile_net_priv *priv = netdev_priv(dev);
+       return &priv->stats;
+}
+
+/* Change the MTU. */
+static int tile_net_change_mtu(struct net_device *dev, int new_mtu)
+{
+       if ((new_mtu < 68) || (new_mtu > 1500))
+               return -EINVAL;
+       dev->mtu = new_mtu;
+       return 0;
+}
+
+/* Change the Ethernet address of the NIC.
+ *
+ * The hypervisor driver does not support changing MAC address.  However,
+ * the hardware does not do anything with the MAC address, so the address
+ * which gets used on outgoing packets, and which is accepted on incoming
+ * packets, is completely up to us.
+ *
+ * Returns 0 on success, negative on failure.
+ */
+static int tile_net_set_mac_address(struct net_device *dev, void *p)
+{
+       struct sockaddr *addr = p;
+
+       if (!is_valid_ether_addr(addr->sa_data))
+               return -EINVAL;
+       memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+       return 0;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/* Polling 'interrupt' - used by things like netconsole to send skbs
+ * without having to re-enable interrupts. It's not called while
+ * the interrupt routine is executing.
+ */
+static void tile_net_netpoll(struct net_device *dev)
+{
+       disable_percpu_irq(ingress_irq);
+       tile_net_handle_ingress_irq(ingress_irq, NULL);
+       enable_percpu_irq(ingress_irq, 0);
+}
+#endif
+
+static const struct net_device_ops tile_net_ops = {
+       .ndo_open = tile_net_open,
+       .ndo_stop = tile_net_stop,
+       .ndo_start_xmit = tile_net_tx,
+       .ndo_select_queue = tile_net_select_queue,
+       .ndo_do_ioctl = tile_net_ioctl,
+       .ndo_get_stats = tile_net_get_stats,
+       .ndo_change_mtu = tile_net_change_mtu,
+       .ndo_tx_timeout = tile_net_tx_timeout,
+       .ndo_set_mac_address = tile_net_set_mac_address,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+       .ndo_poll_controller = tile_net_netpoll,
+#endif
+};
+
+/* The setup function.
+ *
+ * This uses ether_setup() to assign various fields in dev, including
+ * setting IFF_BROADCAST and IFF_MULTICAST, then sets some extra fields.
+ */
+static void tile_net_setup(struct net_device *dev)
+{
+       ether_setup(dev);
+       dev->netdev_ops = &tile_net_ops;
+       dev->watchdog_timeo = TILE_NET_TIMEOUT;
+       dev->features |= NETIF_F_LLTX;
+       dev->features |= NETIF_F_HW_CSUM;
+       dev->features |= NETIF_F_SG;
+       dev->features |= NETIF_F_TSO;
+       dev->mtu = 1500;
+}
+
+/* Allocate the device structure, register the device, and obtain the
+ * MAC address from the hypervisor.
+ */
+static void tile_net_dev_init(const char *name, const uint8_t *mac)
+{
+       int ret;
+       int i;
+       int nz_addr = 0;
+       struct net_device *dev;
+       struct tile_net_priv *priv;
+
+       /* HACK: Ignore "loop" links. */
+       if (strncmp(name, "loop", 4) == 0)
+               return;
+
+       /* Allocate the device structure.  Normally, "name" is a
+        * template, instantiated by register_netdev(), but not for us.
+        */
+       dev = alloc_netdev_mqs(sizeof(*priv), name, tile_net_setup,
+                              NR_CPUS, 1);
+       if (!dev) {
+               pr_err("alloc_netdev_mqs(%s) failed\n", name);
+               return;
+       }
+
+       /* Initialize "priv". */
+       priv = netdev_priv(dev);
+       memset(priv, 0, sizeof(*priv));
+       priv->dev = dev;
+       priv->channel = -1;
+       priv->loopify_channel = -1;
+       priv->echannel = -1;
+
+       /* Get the MAC address and set it in the device struct; this must
+        * be done before the device is opened.  If the MAC is all zeroes,
+        * we use a random address, since we're probably on the simulator.
+        */
+       for (i = 0; i < 6; i++)
+               nz_addr |= mac[i];
+
+       if (nz_addr) {
+               memcpy(dev->dev_addr, mac, 6);
+               dev->addr_len = 6;
+       } else {
+               random_ether_addr(dev->dev_addr);
+       }
+
+       /* Register the network device. */
+       ret = register_netdev(dev);
+       if (ret) {
+               netdev_err(dev, "register_netdev failed %d\n", ret);
+               free_netdev(dev);
+               return;
+       }
+}
+
+/* Per-cpu module initialization. */
+static void tile_net_init_module_percpu(void *unused)
+{
+       struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
+       int my_cpu = smp_processor_id();
+
+       info->has_iqueue = false;
+
+       info->my_cpu = my_cpu;
+
+       /* Initialize the egress timer. */
+       hrtimer_init(&info->egress_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+       info->egress_timer.function = tile_net_handle_egress_timer;
+}
+
+/* Module initialization. */
+static int __init tile_net_init_module(void)
+{
+       int i;
+       char name[GXIO_MPIPE_LINK_NAME_LEN];
+       uint8_t mac[6];
+
+       pr_info("Tilera Network Driver\n");
+
+       mutex_init(&tile_net_devs_for_channel_mutex);
+
+       /* Initialize each CPU. */
+       on_each_cpu(tile_net_init_module_percpu, NULL, 1);
+
+       /* Find out what devices we have, and initialize them. */
+       for (i = 0; gxio_mpipe_link_enumerate_mac(i, name, mac) >= 0; i++)
+               tile_net_dev_init(name, mac);
+
+       if (!network_cpus_init())
+               network_cpus_map = *cpu_online_mask;
+
+       return 0;
+}
+
+module_init(tile_net_init_module);
index 4ffcd57b011b142fd367530ba032b3495a74a657..2857ab078aac1f9940f406f49916706dc3255441 100644 (file)
@@ -478,6 +478,7 @@ struct netvsc_device {
        u32 nvsp_version;
 
        atomic_t num_outstanding_sends;
+       wait_queue_head_t wait_drain;
        bool start_remove;
        bool destroy;
        /*
index 8b919471472fb1dba4d34ffcf0bfe5b4c723af7b..0c569831db5aeb0de77ef9a4d81d6d7e7f3281b8 100644 (file)
@@ -42,6 +42,7 @@ static struct netvsc_device *alloc_net_device(struct hv_device *device)
        if (!net_device)
                return NULL;
 
+       init_waitqueue_head(&net_device->wait_drain);
        net_device->start_remove = false;
        net_device->destroy = false;
        net_device->dev = device;
@@ -387,12 +388,8 @@ int netvsc_device_remove(struct hv_device *device)
        spin_unlock_irqrestore(&device->channel->inbound_lock, flags);
 
        /* Wait for all send completions */
-       while (atomic_read(&net_device->num_outstanding_sends)) {
-               dev_info(&device->device,
-                       "waiting for %d requests to complete...\n",
-                       atomic_read(&net_device->num_outstanding_sends));
-               udelay(100);
-       }
+       wait_event(net_device->wait_drain,
+                  atomic_read(&net_device->num_outstanding_sends) == 0);
 
        netvsc_disconnect_vsp(net_device);
 
@@ -486,6 +483,9 @@ static void netvsc_send_completion(struct hv_device *device,
                num_outstanding_sends =
                        atomic_dec_return(&net_device->num_outstanding_sends);
 
+               if (net_device->destroy && num_outstanding_sends == 0)
+                       wake_up(&net_device->wait_drain);
+
                if (netif_queue_stopped(ndev) && !net_device->start_remove &&
                        (hv_ringbuf_avail_percent(&device->channel->outbound)
                        > RING_AVAIL_PERCENT_HIWATER ||
index 5ac46f5226f3c5b4b1d35e3450ec922326902896..47f8e8939266fd64ce097e49362a8e13a7019ef1 100644 (file)
@@ -41,6 +41,8 @@ MODULE_LICENSE("GPL");
 #define IP1001_APS_ON                  11      /* IP1001 APS Mode  bit */
 #define IP101A_G_APS_ON                        2       /* IP101A/G APS Mode bit */
 #define IP101A_G_IRQ_CONF_STATUS       0x11    /* Conf Info IRQ & Status Reg */
+#define        IP101A_G_IRQ_PIN_USED           (1<<15) /* INTR pin used */
+#define        IP101A_G_IRQ_DEFAULT            IP101A_G_IRQ_PIN_USED
 
 static int ip175c_config_init(struct phy_device *phydev)
 {
@@ -136,6 +138,11 @@ static int ip1001_config_init(struct phy_device *phydev)
        if (c < 0)
                return c;
 
+       /* INTR pin used: speed/link/duplex will cause an interrupt */
+       c = phy_write(phydev, IP101A_G_IRQ_CONF_STATUS, IP101A_G_IRQ_DEFAULT);
+       if (c < 0)
+               return c;
+
        if (phydev->interface == PHY_INTERFACE_MODE_RGMII) {
                /* Additional delay (2ns) used to adjust RX clock phase
                 * at RGMII interface */
index 683ef1ce55196315a90f69f35f015b4773899830..5061608f408c67a41ab6a0432f10a206032af56a 100644 (file)
@@ -96,7 +96,7 @@ static int of_mdio_bus_match(struct device *dev, void *mdio_bus_np)
 }
 /**
  * of_mdio_find_bus - Given an mii_bus node, find the mii_bus.
- * @mdio_np: Pointer to the mii_bus.
+ * @mdio_bus_np: Pointer to the mii_bus.
  *
  * Returns a pointer to the mii_bus, or NULL if none found.
  *
index 3faef5670d1ff60547ec9fcbb42eb956fa7b58df..d75d1f56becff95ae9cf7b8f8dc08990d142d06c 100644 (file)
@@ -946,7 +946,7 @@ struct sk_buff *sierra_net_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
 }
 
 static const u8 sierra_net_ifnum_list[] = { 7, 10, 11 };
-static const struct sierra_net_info_data sierra_net_info_data_68A3 = {
+static const struct sierra_net_info_data sierra_net_info_data_direct_ip = {
        .rx_urb_size = 8 * 1024,
        .whitelist = {
                .infolen = ARRAY_SIZE(sierra_net_ifnum_list),
@@ -954,7 +954,7 @@ static const struct sierra_net_info_data sierra_net_info_data_68A3 = {
        }
 };
 
-static const struct driver_info sierra_net_info_68A3 = {
+static const struct driver_info sierra_net_info_direct_ip = {
        .description = "Sierra Wireless USB-to-WWAN Modem",
        .flags = FLAG_WWAN | FLAG_SEND_ZLP,
        .bind = sierra_net_bind,
@@ -962,12 +962,18 @@ static const struct driver_info sierra_net_info_68A3 = {
        .status = sierra_net_status,
        .rx_fixup = sierra_net_rx_fixup,
        .tx_fixup = sierra_net_tx_fixup,
-       .data = (unsigned long)&sierra_net_info_data_68A3,
+       .data = (unsigned long)&sierra_net_info_data_direct_ip,
 };
 
 static const struct usb_device_id products[] = {
        {USB_DEVICE(0x1199, 0x68A3), /* Sierra Wireless USB-to-WWAN modem */
-       .driver_info = (unsigned long) &sierra_net_info_68A3},
+       .driver_info = (unsigned long) &sierra_net_info_direct_ip},
+       {USB_DEVICE(0x0F3D, 0x68A3), /* AT&T Direct IP modem */
+       .driver_info = (unsigned long) &sierra_net_info_direct_ip},
+       {USB_DEVICE(0x1199, 0x68AA), /* Sierra Wireless Direct IP LTE modem */
+       .driver_info = (unsigned long) &sierra_net_info_direct_ip},
+       {USB_DEVICE(0x0F3D, 0x68AA), /* AT&T Direct IP LTE modem */
+       .driver_info = (unsigned long) &sierra_net_info_direct_ip},
 
        {}, /* last item */
 };
index 5214b1eceb9516282cb9ae8b38f79a606da0ecb7..f18149ae2588e682e661100f026f7bcf60a16d06 100644 (file)
@@ -42,7 +42,8 @@ module_param(gso, bool, 0444);
 #define VIRTNET_DRIVER_VERSION "1.0.0"
 
 struct virtnet_stats {
-       struct u64_stats_sync syncp;
+       struct u64_stats_sync tx_syncp;
+       struct u64_stats_sync rx_syncp;
        u64 tx_bytes;
        u64 tx_packets;
 
@@ -300,10 +301,10 @@ static void receive_buf(struct net_device *dev, void *buf, unsigned int len)
 
        hdr = skb_vnet_hdr(skb);
 
-       u64_stats_update_begin(&stats->syncp);
+       u64_stats_update_begin(&stats->rx_syncp);
        stats->rx_bytes += skb->len;
        stats->rx_packets++;
-       u64_stats_update_end(&stats->syncp);
+       u64_stats_update_end(&stats->rx_syncp);
 
        if (hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
                pr_debug("Needs csum!\n");
@@ -565,10 +566,10 @@ static unsigned int free_old_xmit_skbs(struct virtnet_info *vi)
        while ((skb = virtqueue_get_buf(vi->svq, &len)) != NULL) {
                pr_debug("Sent skb %p\n", skb);
 
-               u64_stats_update_begin(&stats->syncp);
+               u64_stats_update_begin(&stats->tx_syncp);
                stats->tx_bytes += skb->len;
                stats->tx_packets++;
-               u64_stats_update_end(&stats->syncp);
+               u64_stats_update_end(&stats->tx_syncp);
 
                tot_sgs += skb_vnet_hdr(skb)->num_sg;
                dev_kfree_skb_any(skb);
@@ -703,12 +704,16 @@ static struct rtnl_link_stats64 *virtnet_stats(struct net_device *dev,
                u64 tpackets, tbytes, rpackets, rbytes;
 
                do {
-                       start = u64_stats_fetch_begin(&stats->syncp);
+                       start = u64_stats_fetch_begin(&stats->tx_syncp);
                        tpackets = stats->tx_packets;
                        tbytes   = stats->tx_bytes;
+               } while (u64_stats_fetch_retry(&stats->tx_syncp, start));
+
+               do {
+                       start = u64_stats_fetch_begin(&stats->rx_syncp);
                        rpackets = stats->rx_packets;
                        rbytes   = stats->rx_bytes;
-               } while (u64_stats_fetch_retry(&stats->syncp, start));
+               } while (u64_stats_fetch_retry(&stats->rx_syncp, start));
 
                tot->rx_packets += rpackets;
                tot->tx_packets += tpackets;
index 67c13af6f206e04475996642b00698ac9e9d12f5..c06b6cb5c91ea6c64c14a4b38faf7c5a2dd74e1b 100644 (file)
@@ -877,6 +877,10 @@ struct b43_wl {
         * from the mac80211 subsystem. */
        u16 mac80211_initially_registered_queues;
 
+       /* Set this if we call ieee80211_register_hw() and check if we call
+        * ieee80211_unregister_hw(). */
+       bool hw_registred;
+
        /* We can only have one operating interface (802.11 core)
         * at a time. General information about this interface follows.
         */
index 5a39b226b2e3193958bf29472c3112c7361de0e5..acd03a4f973079dbcba384f9a9bde0567a888cc6 100644 (file)
@@ -2437,6 +2437,7 @@ start_ieee80211:
        err = ieee80211_register_hw(wl->hw);
        if (err)
                goto err_one_core_detach;
+       wl->hw_registred = true;
        b43_leds_register(wl->current_dev);
        goto out;
 
@@ -5299,6 +5300,7 @@ static struct b43_wl *b43_wireless_init(struct b43_bus_dev *dev)
 
        hw->queues = modparam_qos ? B43_QOS_QUEUE_NUM : 1;
        wl->mac80211_initially_registered_queues = hw->queues;
+       wl->hw_registred = false;
        hw->max_rates = 2;
        SET_IEEE80211_DEV(hw, dev->dev);
        if (is_valid_ether_addr(sprom->et1mac))
@@ -5370,12 +5372,15 @@ static void b43_bcma_remove(struct bcma_device *core)
         * as the ieee80211 unreg will destroy the workqueue. */
        cancel_work_sync(&wldev->restart_work);
 
-       /* Restore the queues count before unregistering, because firmware detect
-        * might have modified it. Restoring is important, so the networking
-        * stack can properly free resources. */
-       wl->hw->queues = wl->mac80211_initially_registered_queues;
-       b43_leds_stop(wldev);
-       ieee80211_unregister_hw(wl->hw);
+       B43_WARN_ON(!wl);
+       if (wl->current_dev == wldev && wl->hw_registred) {
+               /* Restore the queues count before unregistering, because firmware detect
+                * might have modified it. Restoring is important, so the networking
+                * stack can properly free resources. */
+               wl->hw->queues = wl->mac80211_initially_registered_queues;
+               b43_leds_stop(wldev);
+               ieee80211_unregister_hw(wl->hw);
+       }
 
        b43_one_core_detach(wldev->dev);
 
@@ -5446,7 +5451,7 @@ static void b43_ssb_remove(struct ssb_device *sdev)
        cancel_work_sync(&wldev->restart_work);
 
        B43_WARN_ON(!wl);
-       if (wl->current_dev == wldev) {
+       if (wl->current_dev == wldev && wl->hw_registred) {
                /* Restore the queues count before unregistering, because firmware detect
                 * might have modified it. Restoring is important, so the networking
                 * stack can properly free resources. */
index e2480d19627679c89cb166f9a4954db74dd21ff8..8e7e6928c93699bf9b7df35f9efc4c9b26928481 100644 (file)
@@ -89,9 +89,9 @@ int brcmf_sdio_intr_register(struct brcmf_sdio_dev *sdiodev)
        data |= 1 << SDIO_FUNC_1 | 1 << SDIO_FUNC_2 | 1;
        brcmf_sdio_regwb(sdiodev, SDIO_CCCR_IENx, data, &ret);
 
-       /* redirect, configure ane enable io for interrupt signal */
+       /* redirect, configure and enable io for interrupt signal */
        data = SDIO_SEPINT_MASK | SDIO_SEPINT_OE;
-       if (sdiodev->irq_flags | IRQF_TRIGGER_HIGH)
+       if (sdiodev->irq_flags & IRQF_TRIGGER_HIGH)
                data |= SDIO_SEPINT_ACT_HI;
        brcmf_sdio_regwb(sdiodev, SDIO_CCCR_BRCM_SEPINT, data, &ret);
 
index 9cfae0c08707d95e21a68bdd8822bc4f966e223f..95aa8e1683ecb4bdeb663e0cf605d699407b96f0 100644 (file)
@@ -1903,14 +1903,6 @@ static void ipw2100_down(struct ipw2100_priv *priv)
        netif_stop_queue(priv->net_dev);
 }
 
-/* Called by register_netdev() */
-static int ipw2100_net_init(struct net_device *dev)
-{
-       struct ipw2100_priv *priv = libipw_priv(dev);
-
-       return ipw2100_up(priv, 1);
-}
-
 static int ipw2100_wdev_init(struct net_device *dev)
 {
        struct ipw2100_priv *priv = libipw_priv(dev);
@@ -6087,7 +6079,6 @@ static const struct net_device_ops ipw2100_netdev_ops = {
        .ndo_stop               = ipw2100_close,
        .ndo_start_xmit         = libipw_xmit,
        .ndo_change_mtu         = libipw_change_mtu,
-       .ndo_init               = ipw2100_net_init,
        .ndo_tx_timeout         = ipw2100_tx_timeout,
        .ndo_set_mac_address    = ipw2100_set_address,
        .ndo_validate_addr      = eth_validate_addr,
@@ -6329,6 +6320,10 @@ static int ipw2100_pci_init_one(struct pci_dev *pci_dev,
        printk(KERN_INFO DRV_NAME
               ": Detected Intel PRO/Wireless 2100 Network Connection\n");
 
+       err = ipw2100_up(priv, 1);
+       if (err)
+               goto fail;
+
        err = ipw2100_wdev_init(dev);
        if (err)
                goto fail;
@@ -6338,12 +6333,7 @@ static int ipw2100_pci_init_one(struct pci_dev *pci_dev,
         * network device we would call ipw2100_up.  This introduced a race
         * condition with newer hotplug configurations (network was coming
         * up and making calls before the device was initialized).
-        *
-        * If we called ipw2100_up before we registered the device, then the
-        * device name wasn't registered.  So, we instead use the net_dev->init
-        * member to call a function that then just turns and calls ipw2100_up.
-        * net_dev->init is called after name allocation but before the
-        * notifier chain is called */
+        */
        err = register_netdev(dev);
        if (err) {
                printk(KERN_WARNING DRV_NAME
index 19f7ee84ae89e2b76ba493016cb508745eaae173..e5e8ada4aaf6ad93cd109ee2112921ed136b3c00 100644 (file)
 #define IWL6000_UCODE_API_MAX 6
 #define IWL6050_UCODE_API_MAX 5
 #define IWL6000G2_UCODE_API_MAX 6
+#define IWL6035_UCODE_API_MAX 6
 
 /* Oldest version we won't warn about */
 #define IWL6000_UCODE_API_OK 4
 #define IWL6000G2_UCODE_API_OK 5
 #define IWL6050_UCODE_API_OK 5
 #define IWL6000G2B_UCODE_API_OK 6
+#define IWL6035_UCODE_API_OK 6
 
 /* Lowest firmware API version supported */
 #define IWL6000_UCODE_API_MIN 4
 #define IWL6050_UCODE_API_MIN 4
-#define IWL6000G2_UCODE_API_MIN 4
+#define IWL6000G2_UCODE_API_MIN 5
+#define IWL6035_UCODE_API_MIN 6
 
 /* EEPROM versions */
 #define EEPROM_6000_TX_POWER_VERSION   (4)
@@ -227,9 +230,25 @@ const struct iwl_cfg iwl6030_2bg_cfg = {
        IWL_DEVICE_6030,
 };
 
+#define IWL_DEVICE_6035                                                \
+       .fw_name_pre = IWL6030_FW_PRE,                          \
+       .ucode_api_max = IWL6035_UCODE_API_MAX,                 \
+       .ucode_api_ok = IWL6035_UCODE_API_OK,                   \
+       .ucode_api_min = IWL6035_UCODE_API_MIN,                 \
+       .device_family = IWL_DEVICE_FAMILY_6030,                \
+       .max_inst_size = IWL60_RTC_INST_SIZE,                   \
+       .max_data_size = IWL60_RTC_DATA_SIZE,                   \
+       .eeprom_ver = EEPROM_6030_EEPROM_VERSION,               \
+       .eeprom_calib_ver = EEPROM_6030_TX_POWER_VERSION,       \
+       .base_params = &iwl6000_g2_base_params,                 \
+       .bt_params = &iwl6000_bt_params,                        \
+       .need_temp_offset_calib = true,                         \
+       .led_mode = IWL_LED_RF_STATE,                           \
+       .adv_pm = true
+
 const struct iwl_cfg iwl6035_2agn_cfg = {
        .name = "Intel(R) Centrino(R) Advanced-N 6235 AGN",
-       IWL_DEVICE_6030,
+       IWL_DEVICE_6035,
        .ht_params = &iwl6000_ht_params,
 };
 
index aea07aab3c9e82c44f6b417d20794f4523e6ae70..eb6a8eaf42fc54b54ddfc57926f6f29c60371bdc 100644 (file)
@@ -1267,7 +1267,7 @@ int iwl_remove_dynamic_key(struct iwl_priv *priv,
                key_flags |= STA_KEY_MULTICAST_MSK;
 
        sta_cmd.key.key_flags = key_flags;
-       sta_cmd.key.key_offset = WEP_INVALID_OFFSET;
+       sta_cmd.key.key_offset = keyconf->hw_key_idx;
        sta_cmd.sta.modify_mask = STA_MODIFY_KEY_MASK;
        sta_cmd.mode = STA_CONTROL_MODIFY_MSK;
 
index d742900969eabc913feb5e3643b3cfdc0ddf3840..fac67a526a30880199bb12b8fb7310e6a3471a75 100644 (file)
@@ -861,13 +861,18 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
 
        /* We have our copies now, allow OS release its copies */
        release_firmware(ucode_raw);
-       complete(&drv->request_firmware_complete);
 
        drv->op_mode = iwl_dvm_ops.start(drv->trans, drv->cfg, &drv->fw);
 
        if (!drv->op_mode)
-               goto out_free_fw;
+               goto out_unbind;
 
+       /*
+        * Complete the firmware request last so that
+        * a driver unbind (stop) doesn't run while we
+        * are doing the start() above.
+        */
+       complete(&drv->request_firmware_complete);
        return;
 
  try_again:
index 50c58911e7188c3a7a2f29a6d4bf55f7a9e05953..b8e2b223ac36b6c1634e78256253d6095bcce08a 100644 (file)
@@ -568,28 +568,28 @@ static int iwl_find_otp_image(struct iwl_trans *trans,
  * iwl_get_max_txpower_avg - get the highest tx power from all chains.
  *     find the highest tx power from all chains for the channel
  */
-static s8 iwl_get_max_txpower_avg(const struct iwl_cfg *cfg,
+static s8 iwl_get_max_txpower_avg(struct iwl_priv *priv,
                struct iwl_eeprom_enhanced_txpwr *enhanced_txpower,
                int element, s8 *max_txpower_in_half_dbm)
 {
        s8 max_txpower_avg = 0; /* (dBm) */
 
        /* Take the highest tx power from any valid chains */
-       if ((cfg->valid_tx_ant & ANT_A) &&
+       if ((priv->hw_params.valid_tx_ant & ANT_A) &&
            (enhanced_txpower[element].chain_a_max > max_txpower_avg))
                max_txpower_avg = enhanced_txpower[element].chain_a_max;
-       if ((cfg->valid_tx_ant & ANT_B) &&
+       if ((priv->hw_params.valid_tx_ant & ANT_B) &&
            (enhanced_txpower[element].chain_b_max > max_txpower_avg))
                max_txpower_avg = enhanced_txpower[element].chain_b_max;
-       if ((cfg->valid_tx_ant & ANT_C) &&
+       if ((priv->hw_params.valid_tx_ant & ANT_C) &&
            (enhanced_txpower[element].chain_c_max > max_txpower_avg))
                max_txpower_avg = enhanced_txpower[element].chain_c_max;
-       if (((cfg->valid_tx_ant == ANT_AB) |
-           (cfg->valid_tx_ant == ANT_BC) |
-           (cfg->valid_tx_ant == ANT_AC)) &&
+       if (((priv->hw_params.valid_tx_ant == ANT_AB) |
+           (priv->hw_params.valid_tx_ant == ANT_BC) |
+           (priv->hw_params.valid_tx_ant == ANT_AC)) &&
            (enhanced_txpower[element].mimo2_max > max_txpower_avg))
                max_txpower_avg =  enhanced_txpower[element].mimo2_max;
-       if ((cfg->valid_tx_ant == ANT_ABC) &&
+       if ((priv->hw_params.valid_tx_ant == ANT_ABC) &&
            (enhanced_txpower[element].mimo3_max > max_txpower_avg))
                max_txpower_avg = enhanced_txpower[element].mimo3_max;
 
@@ -691,7 +691,7 @@ static void iwl_eeprom_enhanced_txpower(struct iwl_priv *priv)
                                 ((txp->delta_20_in_40 & 0xf0) >> 4),
                                 (txp->delta_20_in_40 & 0x0f));
 
-               max_txp_avg = iwl_get_max_txpower_avg(priv->cfg, txp_array, idx,
+               max_txp_avg = iwl_get_max_txpower_avg(priv, txp_array, idx,
                                                      &max_txp_avg_halfdbm);
 
                /*
index ab2f4d7500a40df03d68293986d8c6ae46fc9969..3ee23134c02b9b96ee184a5999e9577bf6452511 100644 (file)
@@ -199,6 +199,7 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv,
                            WIPHY_FLAG_DISABLE_BEACON_HINTS |
                            WIPHY_FLAG_IBSS_RSN;
 
+#ifdef CONFIG_PM_SLEEP
        if (priv->fw->img[IWL_UCODE_WOWLAN].sec[0].len &&
            priv->trans->ops->wowlan_suspend &&
            device_can_wakeup(priv->trans->dev)) {
@@ -217,6 +218,7 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv,
                hw->wiphy->wowlan.pattern_max_len =
                                        IWLAGN_WOWLAN_MAX_PATTERN_LEN;
        }
+#endif
 
        if (iwlwifi_mod_params.power_save)
                hw->wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT;
@@ -249,6 +251,7 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv,
        ret = ieee80211_register_hw(priv->hw);
        if (ret) {
                IWL_ERR(priv, "Failed to register hw (error %d)\n", ret);
+               iwl_leds_exit(priv);
                return ret;
        }
        priv->mac80211_registered = 1;
index 3b1069290fa9a9cf646a2f04325d6c1862b70a7a..dfd54662e3e675bc2c560894cd47d002cbf5ff76 100644 (file)
 #define SCD_TXFACT             (SCD_BASE + 0x10)
 #define SCD_ACTIVE             (SCD_BASE + 0x14)
 #define SCD_QUEUECHAIN_SEL     (SCD_BASE + 0xe8)
+#define SCD_CHAINEXT_EN                (SCD_BASE + 0x244)
 #define SCD_AGGR_SEL           (SCD_BASE + 0x248)
 #define SCD_INTERRUPT_MASK     (SCD_BASE + 0x108)
 
index ec6fb395b84d0aca4e7d7bfcf2e1729c3959dc5a..79c6b91417f9430c2982b4e51aefd8f3f7dc47b3 100644 (file)
@@ -1058,6 +1058,11 @@ static void iwl_tx_start(struct iwl_trans *trans)
        iwl_write_prph(trans, SCD_DRAM_BASE_ADDR,
                       trans_pcie->scd_bc_tbls.dma >> 10);
 
+       /* The chain extension of the SCD doesn't work well. This feature is
+        * enabled by default by the HW, so we need to disable it manually.
+        */
+       iwl_write_prph(trans, SCD_CHAINEXT_EN, 0);
+
        /* Enable DMA channel */
        for (chan = 0; chan < FH_TCSR_CHNL_NUM ; chan++)
                iwl_write_direct32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
index fb787df0166699f9c31e7adf8ff25c924c08f7a8..a0b7cfd3468532e705eeedaf4cf93cdb1e5be536 100644 (file)
@@ -1555,6 +1555,7 @@ static int hwsim_tx_info_frame_received_nl(struct sk_buff *skb_2,
                        hdr = (struct ieee80211_hdr *) skb->data;
                        mac80211_hwsim_monitor_ack(data2->hw, hdr->addr2);
                }
+               txi->flags |= IEEE80211_TX_STAT_ACK;
        }
        ieee80211_tx_status_irqsafe(data2->hw, skb);
        return 0;
@@ -1721,6 +1722,24 @@ static void hwsim_exit_netlink(void)
                       "unregister family %i\n", ret);
 }
 
+static const struct ieee80211_iface_limit hwsim_if_limits[] = {
+       { .max = 1, .types = BIT(NL80211_IFTYPE_ADHOC) },
+       { .max = 2048,  .types = BIT(NL80211_IFTYPE_STATION) |
+                                BIT(NL80211_IFTYPE_P2P_CLIENT) |
+#ifdef CONFIG_MAC80211_MESH
+                                BIT(NL80211_IFTYPE_MESH_POINT) |
+#endif
+                                BIT(NL80211_IFTYPE_AP) |
+                                BIT(NL80211_IFTYPE_P2P_GO) },
+};
+
+static const struct ieee80211_iface_combination hwsim_if_comb = {
+       .limits = hwsim_if_limits,
+       .n_limits = ARRAY_SIZE(hwsim_if_limits),
+       .max_interfaces = 2048,
+       .num_different_channels = 1,
+};
+
 static int __init init_mac80211_hwsim(void)
 {
        int i, err = 0;
@@ -1782,6 +1801,9 @@ static int __init init_mac80211_hwsim(void)
                hw->wiphy->n_addresses = 2;
                hw->wiphy->addresses = data->addresses;
 
+               hw->wiphy->iface_combinations = &hwsim_if_comb;
+               hw->wiphy->n_iface_combinations = 1;
+
                if (fake_hw_scan) {
                        hw->wiphy->max_scan_ssids = 255;
                        hw->wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN;
index 87671446e24b00203e0398142894e8fec547d308..015fec3371a05af4185b7f8cef8ac8815f9bbdb2 100644 (file)
@@ -948,6 +948,19 @@ static int mwifiex_cfg80211_start_ap(struct wiphy *wiphy,
                bss_cfg->ssid.ssid_len = params->ssid_len;
        }
 
+       switch (params->hidden_ssid) {
+       case NL80211_HIDDEN_SSID_NOT_IN_USE:
+               bss_cfg->bcast_ssid_ctl = 1;
+               break;
+       case NL80211_HIDDEN_SSID_ZERO_LEN:
+               bss_cfg->bcast_ssid_ctl = 0;
+               break;
+       case NL80211_HIDDEN_SSID_ZERO_CONTENTS:
+               /* firmware doesn't support this type of hidden SSID */
+       default:
+               return -EINVAL;
+       }
+
        if (mwifiex_set_secure_params(priv, bss_cfg, params)) {
                kfree(bss_cfg);
                wiphy_err(wiphy, "Failed to parse secuirty parameters!\n");
index 9f674bbebe65afc2f3fa2edf001af0d414cd8b4d..561452a5c818f4a3d6ea653d029cd32fdab80587 100644 (file)
@@ -122,6 +122,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
 #define TLV_TYPE_CHANNELBANDLIST    (PROPRIETARY_TLV_BASE_ID + 42)
 #define TLV_TYPE_UAP_BEACON_PERIOD  (PROPRIETARY_TLV_BASE_ID + 44)
 #define TLV_TYPE_UAP_DTIM_PERIOD    (PROPRIETARY_TLV_BASE_ID + 45)
+#define TLV_TYPE_UAP_BCAST_SSID     (PROPRIETARY_TLV_BASE_ID + 48)
 #define TLV_TYPE_UAP_RTS_THRESHOLD  (PROPRIETARY_TLV_BASE_ID + 51)
 #define TLV_TYPE_UAP_WPA_PASSPHRASE (PROPRIETARY_TLV_BASE_ID + 60)
 #define TLV_TYPE_UAP_ENCRY_PROTOCOL (PROPRIETARY_TLV_BASE_ID + 64)
@@ -1209,6 +1210,11 @@ struct host_cmd_tlv_ssid {
        u8 ssid[0];
 } __packed;
 
+struct host_cmd_tlv_bcast_ssid {
+       struct host_cmd_tlv tlv;
+       u8 bcast_ctl;
+} __packed;
+
 struct host_cmd_tlv_beacon_period {
        struct host_cmd_tlv tlv;
        __le16 period;
index 76dfbc42a732fc92530fc68a7b8573ea0f54d437..8173ab66066ddf3e83e402cc45f92f808f4f26c3 100644 (file)
@@ -132,6 +132,7 @@ mwifiex_uap_bss_param_prepare(u8 *tlv, void *cmd_buf, u16 *param_size)
        struct host_cmd_tlv_dtim_period *dtim_period;
        struct host_cmd_tlv_beacon_period *beacon_period;
        struct host_cmd_tlv_ssid *ssid;
+       struct host_cmd_tlv_bcast_ssid *bcast_ssid;
        struct host_cmd_tlv_channel_band *chan_band;
        struct host_cmd_tlv_frag_threshold *frag_threshold;
        struct host_cmd_tlv_rts_threshold *rts_threshold;
@@ -153,6 +154,14 @@ mwifiex_uap_bss_param_prepare(u8 *tlv, void *cmd_buf, u16 *param_size)
                cmd_size += sizeof(struct host_cmd_tlv) +
                            bss_cfg->ssid.ssid_len;
                tlv += sizeof(struct host_cmd_tlv) + bss_cfg->ssid.ssid_len;
+
+               bcast_ssid = (struct host_cmd_tlv_bcast_ssid *)tlv;
+               bcast_ssid->tlv.type = cpu_to_le16(TLV_TYPE_UAP_BCAST_SSID);
+               bcast_ssid->tlv.len =
+                               cpu_to_le16(sizeof(bcast_ssid->bcast_ctl));
+               bcast_ssid->bcast_ctl = bss_cfg->bcast_ssid_ctl;
+               cmd_size += sizeof(struct host_cmd_tlv_bcast_ssid);
+               tlv += sizeof(struct host_cmd_tlv_bcast_ssid);
        }
        if (bss_cfg->channel && bss_cfg->channel <= MAX_CHANNEL_BAND_BG) {
                chan_band = (struct host_cmd_tlv_channel_band *)tlv;
@@ -416,6 +425,7 @@ int mwifiex_uap_set_channel(struct mwifiex_private *priv, int channel)
        if (!bss_cfg)
                return -ENOMEM;
 
+       mwifiex_set_sys_config_invalid_data(bss_cfg);
        bss_cfg->band_cfg = BAND_CONFIG_MANUAL;
        bss_cfg->channel = channel;
 
index ca36cccaba31d87b1920f4238aad0be364fce060..8f754025b06ead9b9a2c9d5be62d5d19b871690f 100644 (file)
@@ -396,8 +396,7 @@ struct rt2x00_intf {
         * for hardware which doesn't support hardware
         * sequence counting.
         */
-       spinlock_t seqlock;
-       u16 seqno;
+       atomic_t seqno;
 };
 
 static inline struct rt2x00_intf* vif_to_intf(struct ieee80211_vif *vif)
index b49773ef72f2d0c32e3e4e5f55f2ebac3bd540bc..dd24b2663b5e528e04a0814726ec0f06ceae6cf3 100644 (file)
@@ -277,7 +277,6 @@ int rt2x00mac_add_interface(struct ieee80211_hw *hw,
        else
                rt2x00dev->intf_sta_count++;
 
-       spin_lock_init(&intf->seqlock);
        mutex_init(&intf->beacon_skb_mutex);
        intf->beacon = entry;
 
index 4c662eccf53c60e32e352ac96259268103b539f7..2fd830103415dca94a8d582ec1de91c93b074cb0 100644 (file)
@@ -207,6 +207,7 @@ static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev,
        struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
        struct rt2x00_intf *intf = vif_to_intf(tx_info->control.vif);
+       u16 seqno;
 
        if (!(tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ))
                return;
@@ -238,15 +239,13 @@ static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev,
         * sequence counting per-frame, since those will override the
         * sequence counter given by mac80211.
         */
-       spin_lock(&intf->seqlock);
-
        if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
-               intf->seqno += 0x10;
-       hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
-       hdr->seq_ctrl |= cpu_to_le16(intf->seqno);
-
-       spin_unlock(&intf->seqlock);
+               seqno = atomic_add_return(0x10, &intf->seqno);
+       else
+               seqno = atomic_read(&intf->seqno);
 
+       hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
+       hdr->seq_ctrl |= cpu_to_le16(seqno);
 }
 
 static void rt2x00queue_create_tx_descriptor_plcp(struct rt2x00_dev *rt2x00dev,
index 2e0de2f5f0f92d22a1e384dab601c72912216f9a..c2d5b495c179a1021dd4cd4221c0032f3a99e34a 100644 (file)
@@ -117,7 +117,7 @@ static void rtl8187_led_brightness_set(struct led_classdev *led_dev,
                        radio_on = true;
                } else if (radio_on) {
                        radio_on = false;
-                       cancel_delayed_work_sync(&priv->led_on);
+                       cancel_delayed_work(&priv->led_on);
                        ieee80211_queue_delayed_work(hw, &priv->led_off, 0);
                }
        } else if (radio_on) {
index abb1650940d21f11babe11aaf3730e434c5f0e38..826fc580757778dd1bfa6345edc680e0406bc6db 100644 (file)
@@ -27,7 +27,12 @@ union hmark_ports {
                __u16   src;
                __u16   dst;
        } p16;
+       struct {
+               __be16  src;
+               __be16  dst;
+       } b16;
        __u32   v32;
+       __be32  b32;
 };
 
 struct xt_hmark_info {
index 4c5b63283377449ff94252f1ff64e2a8b16fddcf..5f359dbfcdce5bbf40b9fe5cada0d59d60981e5f 100644 (file)
@@ -69,16 +69,16 @@ union tcp_word_hdr {
 #define tcp_flag_word(tp) ( ((union tcp_word_hdr *)(tp))->words [3]) 
 
 enum { 
-       TCP_FLAG_CWR = __cpu_to_be32(0x00800000),
-       TCP_FLAG_ECE = __cpu_to_be32(0x00400000),
-       TCP_FLAG_URG = __cpu_to_be32(0x00200000),
-       TCP_FLAG_ACK = __cpu_to_be32(0x00100000),
-       TCP_FLAG_PSH = __cpu_to_be32(0x00080000),
-       TCP_FLAG_RST = __cpu_to_be32(0x00040000),
-       TCP_FLAG_SYN = __cpu_to_be32(0x00020000),
-       TCP_FLAG_FIN = __cpu_to_be32(0x00010000),
-       TCP_RESERVED_BITS = __cpu_to_be32(0x0F000000),
-       TCP_DATA_OFFSET = __cpu_to_be32(0xF0000000)
+       TCP_FLAG_CWR = __constant_cpu_to_be32(0x00800000),
+       TCP_FLAG_ECE = __constant_cpu_to_be32(0x00400000),
+       TCP_FLAG_URG = __constant_cpu_to_be32(0x00200000),
+       TCP_FLAG_ACK = __constant_cpu_to_be32(0x00100000),
+       TCP_FLAG_PSH = __constant_cpu_to_be32(0x00080000),
+       TCP_FLAG_RST = __constant_cpu_to_be32(0x00040000),
+       TCP_FLAG_SYN = __constant_cpu_to_be32(0x00020000),
+       TCP_FLAG_FIN = __constant_cpu_to_be32(0x00010000),
+       TCP_RESERVED_BITS = __constant_cpu_to_be32(0x0F000000),
+       TCP_DATA_OFFSET = __constant_cpu_to_be32(0xF0000000)
 }; 
 
 /*
index b94765e38e8074aa5a877c90b395f0bc8cb6bb4a..2040bff945d4562e0c0129d078a26a0a1be34672 100644 (file)
@@ -40,7 +40,10 @@ struct inet_peer {
        u32                     pmtu_orig;
        u32                     pmtu_learned;
        struct inetpeer_addr_base redirect_learned;
-       struct list_head        gc_list;
+       union {
+               struct list_head        gc_list;
+               struct rcu_head     gc_rcu;
+       };
        /*
         * Once inet_peer is queued for deletion (refcnt == -1), following fields
         * are not available: rid, ip_id_count, tcp_ts, tcp_ts_stamp
index ed2b78e2375d0de3ad537f4b901630d0ffa7b69d..98705468ac0329c884bfd0e2663422b73de565b8 100644 (file)
@@ -130,9 +130,9 @@ static inline struct rtable *ip_route_output(struct net *net, __be32 daddr,
 {
        struct flowi4 fl4 = {
                .flowi4_oif = oif,
+               .flowi4_tos = tos,
                .daddr = daddr,
                .saddr = saddr,
-               .flowi4_tos = tos,
        };
        return ip_route_output_key(net, &fl4);
 }
index 55ce96b53b092e3ca04db6eaa7d6888fb9f478bc..9d7d54a00e63f28feb80942bcaf7afb7ee68cd5c 100644 (file)
@@ -220,13 +220,16 @@ struct tcf_proto {
 
 struct qdisc_skb_cb {
        unsigned int            pkt_len;
-       unsigned char           data[24];
+       u16                     bond_queue_mapping;
+       u16                     _pad;
+       unsigned char           data[20];
 };
 
 static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz)
 {
        struct qdisc_skb_cb *qcb;
-       BUILD_BUG_ON(sizeof(skb->cb) < sizeof(unsigned int) + sz);
+
+       BUILD_BUG_ON(sizeof(skb->cb) < offsetof(struct qdisc_skb_cb, data) + sz);
        BUILD_BUG_ON(sizeof(qcb->data) < sz);
 }
 
index 0301b328cf0fe04cf39f302ab6061bdbc288c42b..86852963b7f708b92e4596c63a2a2960d8676cda 100644 (file)
@@ -1208,9 +1208,7 @@ static int atalk_connect(struct socket *sock, struct sockaddr *uaddr,
        if (addr->sat_addr.s_node == ATADDR_BCAST &&
            !sock_flag(sk, SOCK_BROADCAST)) {
 #if 1
-               printk(KERN_WARNING "%s is broken and did not set "
-                                   "SO_BROADCAST. It will break when 2.2 is "
-                                   "released.\n",
+               pr_warn("atalk_connect: %s is broken and did not set SO_BROADCAST.\n",
                        current->comm);
 #else
                return -EACCES;
index 46e7f86acfc99f820b66564f553dc64fe8fbcbac..3e18af4dadc442573960b94abefed550212b9b47 100644 (file)
@@ -210,7 +210,7 @@ struct sock *bt_accept_dequeue(struct sock *parent, struct socket *newsock)
                }
 
                if (sk->sk_state == BT_CONNECTED || !newsock ||
-                   test_bit(BT_DEFER_SETUP, &bt_sk(parent)->flags)) {
+                   test_bit(BT_SK_DEFER_SETUP, &bt_sk(parent)->flags)) {
                        bt_accept_unlink(sk);
                        if (newsock)
                                sock_graft(sk, newsock);
index ea5fb9fcc3f5937777db311ea88a75ae3f4b81f4..d23b6682f4e95cfd029cd19db31252184ec03d2d 100644 (file)
@@ -36,9 +36,6 @@
 #define TRACE_ON 1
 #define TRACE_OFF 0
 
-static void send_dm_alert(struct work_struct *unused);
-
-
 /*
  * Globals, our netlink socket pointer
  * and the work handle that will send up
@@ -48,11 +45,10 @@ static int trace_state = TRACE_OFF;
 static DEFINE_MUTEX(trace_state_mutex);
 
 struct per_cpu_dm_data {
-       struct work_struct dm_alert_work;
-       struct sk_buff __rcu *skb;
-       atomic_t dm_hit_count;
-       struct timer_list send_timer;
-       int cpu;
+       spinlock_t              lock;
+       struct sk_buff          *skb;
+       struct work_struct      dm_alert_work;
+       struct timer_list       send_timer;
 };
 
 struct dm_hw_stat_delta {
@@ -78,13 +74,13 @@ static int dm_delay = 1;
 static unsigned long dm_hw_check_delta = 2*HZ;
 static LIST_HEAD(hw_stats_list);
 
-static void reset_per_cpu_data(struct per_cpu_dm_data *data)
+static struct sk_buff *reset_per_cpu_data(struct per_cpu_dm_data *data)
 {
        size_t al;
        struct net_dm_alert_msg *msg;
        struct nlattr *nla;
        struct sk_buff *skb;
-       struct sk_buff *oskb = rcu_dereference_protected(data->skb, 1);
+       unsigned long flags;
 
        al = sizeof(struct net_dm_alert_msg);
        al += dm_hit_limit * sizeof(struct net_dm_drop_point);
@@ -99,65 +95,40 @@ static void reset_per_cpu_data(struct per_cpu_dm_data *data)
                                  sizeof(struct net_dm_alert_msg));
                msg = nla_data(nla);
                memset(msg, 0, al);
-       } else
-               schedule_work_on(data->cpu, &data->dm_alert_work);
-
-       /*
-        * Don't need to lock this, since we are guaranteed to only
-        * run this on a single cpu at a time.
-        * Note also that we only update data->skb if the old and new skb
-        * pointers don't match.  This ensures that we don't continually call
-        * synchornize_rcu if we repeatedly fail to alloc a new netlink message.
-        */
-       if (skb != oskb) {
-               rcu_assign_pointer(data->skb, skb);
-
-               synchronize_rcu();
-
-               atomic_set(&data->dm_hit_count, dm_hit_limit);
+       } else {
+               mod_timer(&data->send_timer, jiffies + HZ / 10);
        }
 
+       spin_lock_irqsave(&data->lock, flags);
+       swap(data->skb, skb);
+       spin_unlock_irqrestore(&data->lock, flags);
+
+       return skb;
 }
 
-static void send_dm_alert(struct work_struct *unused)
+static void send_dm_alert(struct work_struct *work)
 {
        struct sk_buff *skb;
-       struct per_cpu_dm_data *data = &get_cpu_var(dm_cpu_data);
+       struct per_cpu_dm_data *data;
 
-       WARN_ON_ONCE(data->cpu != smp_processor_id());
+       data = container_of(work, struct per_cpu_dm_data, dm_alert_work);
 
-       /*
-        * Grab the skb we're about to send
-        */
-       skb = rcu_dereference_protected(data->skb, 1);
-
-       /*
-        * Replace it with a new one
-        */
-       reset_per_cpu_data(data);
+       skb = reset_per_cpu_data(data);
 
-       /*
-        * Ship it!
-        */
        if (skb)
                genlmsg_multicast(skb, 0, NET_DM_GRP_ALERT, GFP_KERNEL);
-
-       put_cpu_var(dm_cpu_data);
 }
 
 /*
  * This is the timer function to delay the sending of an alert
  * in the event that more drops will arrive during the
- * hysteresis period.  Note that it operates under the timer interrupt
- * so we don't need to disable preemption here
+ * hysteresis period.
  */
-static void sched_send_work(unsigned long unused)
+static void sched_send_work(unsigned long _data)
 {
-       struct per_cpu_dm_data *data =  &get_cpu_var(dm_cpu_data);
-
-       schedule_work_on(smp_processor_id(), &data->dm_alert_work);
+       struct per_cpu_dm_data *data = (struct per_cpu_dm_data *)_data;
 
-       put_cpu_var(dm_cpu_data);
+       schedule_work(&data->dm_alert_work);
 }
 
 static void trace_drop_common(struct sk_buff *skb, void *location)
@@ -167,33 +138,28 @@ static void trace_drop_common(struct sk_buff *skb, void *location)
        struct nlattr *nla;
        int i;
        struct sk_buff *dskb;
-       struct per_cpu_dm_data *data = &get_cpu_var(dm_cpu_data);
-
+       struct per_cpu_dm_data *data;
+       unsigned long flags;
 
-       rcu_read_lock();
-       dskb = rcu_dereference(data->skb);
+       local_irq_save(flags);
+       data = &__get_cpu_var(dm_cpu_data);
+       spin_lock(&data->lock);
+       dskb = data->skb;
 
        if (!dskb)
                goto out;
 
-       if (!atomic_add_unless(&data->dm_hit_count, -1, 0)) {
-               /*
-                * we're already at zero, discard this hit
-                */
-               goto out;
-       }
-
        nlh = (struct nlmsghdr *)dskb->data;
        nla = genlmsg_data(nlmsg_data(nlh));
        msg = nla_data(nla);
        for (i = 0; i < msg->entries; i++) {
                if (!memcmp(&location, msg->points[i].pc, sizeof(void *))) {
                        msg->points[i].count++;
-                       atomic_inc(&data->dm_hit_count);
                        goto out;
                }
        }
-
+       if (msg->entries == dm_hit_limit)
+               goto out;
        /*
         * We need to create a new entry
         */
@@ -205,13 +171,11 @@ static void trace_drop_common(struct sk_buff *skb, void *location)
 
        if (!timer_pending(&data->send_timer)) {
                data->send_timer.expires = jiffies + dm_delay * HZ;
-               add_timer_on(&data->send_timer, smp_processor_id());
+               add_timer(&data->send_timer);
        }
 
 out:
-       rcu_read_unlock();
-       put_cpu_var(dm_cpu_data);
-       return;
+       spin_unlock_irqrestore(&data->lock, flags);
 }
 
 static void trace_kfree_skb_hit(void *ignore, struct sk_buff *skb, void *location)
@@ -418,11 +382,11 @@ static int __init init_net_drop_monitor(void)
 
        for_each_possible_cpu(cpu) {
                data = &per_cpu(dm_cpu_data, cpu);
-               data->cpu = cpu;
                INIT_WORK(&data->dm_alert_work, send_dm_alert);
                init_timer(&data->send_timer);
-               data->send_timer.data = cpu;
+               data->send_timer.data = (unsigned long)data;
                data->send_timer.function = sched_send_work;
+               spin_lock_init(&data->lock);
                reset_per_cpu_data(data);
        }
 
index a3eddb515d1b282dc9dd8c597e09d8476de7916d..d4ce2dc712e34b7b1cb974c5e938313f58e9a8aa 100644 (file)
@@ -616,9 +616,9 @@ static int __sk_prepare_filter(struct sk_filter *fp)
 /**
  *     sk_unattached_filter_create - create an unattached filter
  *     @fprog: the filter program
- *     @sk: the socket to use
+ *     @pfp: the unattached filter that is created
  *
- * Create a filter independent ofr any socket. We first run some
+ * Create a filter independent of any socket. We first run some
  * sanity checks on it to make sure it does not explode on us later.
  * If an error occurs or there is insufficient memory for the filter
  * a negative errno code is returned. On success the return is zero.
index eb09f8bbbf075bcc10f3335198dc8e097c2f9316..d81d026138f0810471ce4cf2540c0ec4229d853f 100644 (file)
@@ -2219,9 +2219,7 @@ static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
        rcu_read_lock_bh();
        nht = rcu_dereference_bh(tbl->nht);
 
-       for (h = 0; h < (1 << nht->hash_shift); h++) {
-               if (h < s_h)
-                       continue;
+       for (h = s_h; h < (1 << nht->hash_shift); h++) {
                if (h > s_h)
                        s_idx = 0;
                for (n = rcu_dereference_bh(nht->hash_buckets[h]), idx = 0;
@@ -2260,9 +2258,7 @@ static int pneigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
 
        read_lock_bh(&tbl->lock);
 
-       for (h = 0; h <= PNEIGH_HASHMASK; h++) {
-               if (h < s_h)
-                       continue;
+       for (h = s_h; h <= PNEIGH_HASHMASK; h++) {
                if (h > s_h)
                        s_idx = 0;
                for (n = tbl->phash_buckets[h], idx = 0; n; n = n->next) {
@@ -2297,7 +2293,7 @@ static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
        struct neigh_table *tbl;
        int t, family, s_t;
        int proxy = 0;
-       int err = 0;
+       int err;
 
        read_lock(&neigh_tbl_lock);
        family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family;
@@ -2311,7 +2307,7 @@ static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
 
        s_t = cb->args[0];
 
-       for (tbl = neigh_tables, t = 0; tbl && (err >= 0);
+       for (tbl = neigh_tables, t = 0; tbl;
             tbl = tbl->next, t++) {
                if (t < s_t || (family && tbl->family != family))
                        continue;
@@ -2322,6 +2318,8 @@ static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
                        err = pneigh_dump_table(tbl, skb, cb);
                else
                        err = neigh_dump_table(tbl, skb, cb);
+               if (err < 0)
+                       break;
        }
        read_unlock(&neigh_tbl_lock);
 
index 3d84fb9d88739629b32c77f1a1d77c7f55ad7630..f9f40b932e4b855fc1a4dc3b3c74620efdc4f970 100644 (file)
@@ -362,22 +362,23 @@ EXPORT_SYMBOL(netpoll_send_skb_on_dev);
 
 void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
 {
-       int total_len, eth_len, ip_len, udp_len;
+       int total_len, ip_len, udp_len;
        struct sk_buff *skb;
        struct udphdr *udph;
        struct iphdr *iph;
        struct ethhdr *eth;
 
        udp_len = len + sizeof(*udph);
-       ip_len = eth_len = udp_len + sizeof(*iph);
-       total_len = eth_len + ETH_HLEN + NET_IP_ALIGN;
+       ip_len = udp_len + sizeof(*iph);
+       total_len = ip_len + LL_RESERVED_SPACE(np->dev);
 
-       skb = find_skb(np, total_len, total_len - len);
+       skb = find_skb(np, total_len + np->dev->needed_tailroom,
+                      total_len - len);
        if (!skb)
                return;
 
        skb_copy_to_linear_data(skb, msg, len);
-       skb->len += len;
+       skb_put(skb, len);
 
        skb_push(skb, sizeof(*udph));
        skb_reset_transport_header(skb);
index 016694d624843c8ca1df3013639ffd4f6ae75f39..d78671e9d545be838f9ab5140c9ee07fb1309c26 100644 (file)
@@ -3361,7 +3361,7 @@ EXPORT_SYMBOL(kfree_skb_partial);
  * @to: prior buffer
  * @from: buffer to add
  * @fragstolen: pointer to boolean
- *
+ * @delta_truesize: how much more was allocated than was requested
  */
 bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
                      bool *fragstolen, int *delta_truesize)
index d4d61b694fab9bc497b1cccb808a3a568ad30cc2..dfba343b25092de39c9a5d1eea5d43226690e984 100644 (file)
@@ -560,6 +560,17 @@ bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout)
 }
 EXPORT_SYMBOL(inet_peer_xrlim_allow);
 
+static void inetpeer_inval_rcu(struct rcu_head *head)
+{
+       struct inet_peer *p = container_of(head, struct inet_peer, gc_rcu);
+
+       spin_lock_bh(&gc_lock);
+       list_add_tail(&p->gc_list, &gc_list);
+       spin_unlock_bh(&gc_lock);
+
+       schedule_delayed_work(&gc_work, gc_delay);
+}
+
 void inetpeer_invalidate_tree(int family)
 {
        struct inet_peer *old, *new, *prev;
@@ -576,10 +587,7 @@ void inetpeer_invalidate_tree(int family)
        prev = cmpxchg(&base->root, old, new);
        if (prev == old) {
                base->total = 0;
-               spin_lock(&gc_lock);
-               list_add_tail(&prev->gc_list, &gc_list);
-               spin_unlock(&gc_lock);
-               schedule_delayed_work(&gc_work, gc_delay);
+               call_rcu(&prev->gc_rcu, inetpeer_inval_rcu);
        }
 
 out:
index e5c44fc586abe7157f8b75b8f164a7222ae8548c..ab09b126423ce3e56fd1fee2f6bda54e4b851022 100644 (file)
@@ -44,6 +44,7 @@ static int ip_forward_finish(struct sk_buff *skb)
        struct ip_options *opt  = &(IPCB(skb)->opt);
 
        IP_INC_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTFORWDATAGRAMS);
+       IP_ADD_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTOCTETS, skb->len);
 
        if (unlikely(opt->optlen))
                ip_forward_options(skb);
index a9e519ad6db53d544c73d145724602cde1e3f48e..c94bbc6f2ba331bb9e261692151f55f78277258b 100644 (file)
@@ -1574,6 +1574,7 @@ static inline int ipmr_forward_finish(struct sk_buff *skb)
        struct ip_options *opt = &(IPCB(skb)->opt);
 
        IP_INC_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTFORWDATAGRAMS);
+       IP_ADD_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTOCTETS, skb->len);
 
        if (unlikely(opt->optlen))
                ip_forward_options(skb);
index 0c220a416626af196f534ab062920c169f6dcd7a..74c21b924a7900d7d3e68791750011e689b185e8 100644 (file)
@@ -1561,7 +1561,7 @@ static int fib6_age(struct rt6_info *rt, void *arg)
                                neigh_flags = neigh->flags;
                                neigh_release(neigh);
                        }
-                       if (neigh_flags & NTF_ROUTER) {
+                       if (!(neigh_flags & NTF_ROUTER)) {
                                RT6_TRACE("purging route %p via non-router but gateway\n",
                                          rt);
                                return -1;
index 17b8c67998bb80dc5e7052af210c7b64aa1471ee..decc21d19c53e4b0c073b44e02deba46b6eac432 100644 (file)
@@ -526,6 +526,7 @@ int ip6_forward(struct sk_buff *skb)
        hdr->hop_limit--;
 
        IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS);
+       IP6_ADD_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTOCTETS, skb->len);
        return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD, skb, skb->dev, dst->dev,
                       ip6_forward_finish);
 
index b15dc08643a42f5a45ea9bb0bd2bd0ec1865a377..461e47c8e95620456e83710eaf99643c1382c8fc 100644 (file)
@@ -1886,6 +1886,8 @@ static inline int ip6mr_forward2_finish(struct sk_buff *skb)
 {
        IP6_INC_STATS_BH(dev_net(skb_dst(skb)->dev), ip6_dst_idev(skb_dst(skb)),
                         IPSTATS_MIB_OUTFORWDATAGRAMS);
+       IP6_ADD_STATS_BH(dev_net(skb_dst(skb)->dev), ip6_dst_idev(skb_dst(skb)),
+                        IPSTATS_MIB_OUTOCTETS, skb->len);
        return dst_output(skb);
 }
 
index 443591d629caadff0da53b2c861b8748e61edf7e..185f12f4a5fa21a90bbce5641b7d751722d8c9ca 100644 (file)
@@ -162,6 +162,7 @@ static void l2tp_eth_delete(struct l2tp_session *session)
                if (dev) {
                        unregister_netdev(dev);
                        spriv->dev = NULL;
+                       module_put(THIS_MODULE);
                }
        }
 }
@@ -249,6 +250,7 @@ static int l2tp_eth_create(struct net *net, u32 tunnel_id, u32 session_id, u32 p
        if (rc < 0)
                goto out_del_dev;
 
+       __module_get(THIS_MODULE);
        /* Must be done after register_netdev() */
        strlcpy(session->ifname, dev->name, IFNAMSIZ);
 
index 70614e7affabded003aef1bf5ed4c097d4b515fa..61d8b75d2686c0272a1618887c08c34de2b4abe5 100644 (file)
@@ -464,10 +464,12 @@ static int l2tp_ip_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m
                                           sk->sk_bound_dev_if);
                if (IS_ERR(rt))
                        goto no_route;
-               if (connected)
+               if (connected) {
                        sk_setup_caps(sk, &rt->dst);
-               else
-                       dst_release(&rt->dst); /* safe since we hold rcu_read_lock */
+               } else {
+                       skb_dst_set(skb, &rt->dst);
+                       goto xmit;
+               }
        }
 
        /* We dont need to clone dst here, it is guaranteed to not disappear.
@@ -475,6 +477,7 @@ static int l2tp_ip_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m
         */
        skb_dst_set_noref(skb, &rt->dst);
 
+xmit:
        /* Queue the packet to IP for output */
        rc = ip_queue_xmit(skb, &inet->cork.fl);
        rcu_read_unlock();
index 26ddb699d693dcbc2f2610fdd4a35b1784b04b8c..c649188314cce99c17fca198e75d597c24701197 100644 (file)
@@ -145,15 +145,20 @@ static void sta_rx_agg_session_timer_expired(unsigned long data)
        struct tid_ampdu_rx *tid_rx;
        unsigned long timeout;
 
+       rcu_read_lock();
        tid_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[*ptid]);
-       if (!tid_rx)
+       if (!tid_rx) {
+               rcu_read_unlock();
                return;
+       }
 
        timeout = tid_rx->last_rx + TU_TO_JIFFIES(tid_rx->timeout);
        if (time_is_after_jiffies(timeout)) {
                mod_timer(&tid_rx->session_timer, timeout);
+               rcu_read_unlock();
                return;
        }
+       rcu_read_unlock();
 
 #ifdef CONFIG_MAC80211_HT_DEBUG
        printk(KERN_DEBUG "rx session timer expired on tid %d\n", (u16)*ptid);
index 495831ee48f1b007abe235852968e9fd3bd188b5..e9cecca5c44d1a4b9134de59c6ee9afb7418e0e5 100644 (file)
@@ -533,16 +533,16 @@ static void ieee80211_get_et_stats(struct wiphy *wiphy,
                sinfo.filled = 0;
                sta_set_sinfo(sta, &sinfo);
 
-               if (sinfo.filled | STATION_INFO_TX_BITRATE)
+               if (sinfo.filled & STATION_INFO_TX_BITRATE)
                        data[i] = 100000 *
                                cfg80211_calculate_bitrate(&sinfo.txrate);
                i++;
-               if (sinfo.filled | STATION_INFO_RX_BITRATE)
+               if (sinfo.filled & STATION_INFO_RX_BITRATE)
                        data[i] = 100000 *
                                cfg80211_calculate_bitrate(&sinfo.rxrate);
                i++;
 
-               if (sinfo.filled | STATION_INFO_SIGNAL_AVG)
+               if (sinfo.filled & STATION_INFO_SIGNAL_AVG)
                        data[i] = (u8)sinfo.signal_avg;
                i++;
        } else {
index d4c19a7773db24b12bacf0407330ebccd75ba772..8664111d05663d47678f2088f4169a6ab80fdb96 100644 (file)
@@ -637,6 +637,18 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
                ieee80211_configure_filter(local);
                break;
        default:
+               mutex_lock(&local->mtx);
+               if (local->hw_roc_dev == sdata->dev &&
+                   local->hw_roc_channel) {
+                       /* ignore return value since this is racy */
+                       drv_cancel_remain_on_channel(local);
+                       ieee80211_queue_work(&local->hw, &local->hw_roc_done);
+               }
+               mutex_unlock(&local->mtx);
+
+               flush_work(&local->hw_roc_start);
+               flush_work(&local->hw_roc_done);
+
                flush_work(&sdata->work);
                /*
                 * When we get here, the interface is marked down.
index 04c3063089874fa7c8eb5ded233be3e5d7b86502..91d84cc77bbf7f9a3bda9b7ca1d04677b0672e7f 100644 (file)
@@ -1220,6 +1220,22 @@ static void ieee80211_sta_wmm_params(struct ieee80211_local *local,
        sdata->vif.bss_conf.qos = true;
 }
 
+static void __ieee80211_stop_poll(struct ieee80211_sub_if_data *sdata)
+{
+       lockdep_assert_held(&sdata->local->mtx);
+
+       sdata->u.mgd.flags &= ~(IEEE80211_STA_CONNECTION_POLL |
+                               IEEE80211_STA_BEACON_POLL);
+       ieee80211_run_deferred_scan(sdata->local);
+}
+
+static void ieee80211_stop_poll(struct ieee80211_sub_if_data *sdata)
+{
+       mutex_lock(&sdata->local->mtx);
+       __ieee80211_stop_poll(sdata);
+       mutex_unlock(&sdata->local->mtx);
+}
+
 static u32 ieee80211_handle_bss_capability(struct ieee80211_sub_if_data *sdata,
                                           u16 capab, bool erp_valid, u8 erp)
 {
@@ -1285,8 +1301,7 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata,
        sdata->u.mgd.flags |= IEEE80211_STA_RESET_SIGNAL_AVE;
 
        /* just to be sure */
-       sdata->u.mgd.flags &= ~(IEEE80211_STA_CONNECTION_POLL |
-                               IEEE80211_STA_BEACON_POLL);
+       ieee80211_stop_poll(sdata);
 
        ieee80211_led_assoc(local, 1);
 
@@ -1456,8 +1471,7 @@ static void ieee80211_reset_ap_probe(struct ieee80211_sub_if_data *sdata)
                return;
        }
 
-       ifmgd->flags &= ~(IEEE80211_STA_CONNECTION_POLL |
-                         IEEE80211_STA_BEACON_POLL);
+       __ieee80211_stop_poll(sdata);
 
        mutex_lock(&local->iflist_mtx);
        ieee80211_recalc_ps(local, -1);
@@ -1477,7 +1491,6 @@ static void ieee80211_reset_ap_probe(struct ieee80211_sub_if_data *sdata)
                  round_jiffies_up(jiffies +
                                   IEEE80211_CONNECTION_IDLE_TIME));
 out:
-       ieee80211_run_deferred_scan(local);
        mutex_unlock(&local->mtx);
 }
 
@@ -2408,7 +2421,11 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
                net_dbg_ratelimited("%s: cancelling probereq poll due to a received beacon\n",
                                    sdata->name);
 #endif
+               mutex_lock(&local->mtx);
                ifmgd->flags &= ~IEEE80211_STA_BEACON_POLL;
+               ieee80211_run_deferred_scan(local);
+               mutex_unlock(&local->mtx);
+
                mutex_lock(&local->iflist_mtx);
                ieee80211_recalc_ps(local, -1);
                mutex_unlock(&local->iflist_mtx);
@@ -2595,8 +2612,7 @@ static void ieee80211_sta_connection_lost(struct ieee80211_sub_if_data *sdata,
        struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
        u8 frame_buf[DEAUTH_DISASSOC_LEN];
 
-       ifmgd->flags &= ~(IEEE80211_STA_CONNECTION_POLL |
-                         IEEE80211_STA_BEACON_POLL);
+       ieee80211_stop_poll(sdata);
 
        ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH, reason,
                               false, frame_buf);
@@ -2874,8 +2890,7 @@ static void ieee80211_restart_sta_timer(struct ieee80211_sub_if_data *sdata)
        u32 flags;
 
        if (sdata->vif.type == NL80211_IFTYPE_STATION) {
-               sdata->u.mgd.flags &= ~(IEEE80211_STA_BEACON_POLL |
-                                       IEEE80211_STA_CONNECTION_POLL);
+               __ieee80211_stop_poll(sdata);
 
                /* let's probe the connection once */
                flags = sdata->local->hw.flags;
@@ -2944,7 +2959,10 @@ void ieee80211_sta_restart(struct ieee80211_sub_if_data *sdata)
        if (test_and_clear_bit(TMR_RUNNING_CHANSW, &ifmgd->timers_running))
                add_timer(&ifmgd->chswitch_timer);
        ieee80211_sta_reset_beacon_monitor(sdata);
+
+       mutex_lock(&sdata->local->mtx);
        ieee80211_restart_sta_timer(sdata);
+       mutex_unlock(&sdata->local->mtx);
 }
 #endif
 
@@ -3106,7 +3124,7 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
        }
 
        local->oper_channel = cbss->channel;
-       ieee80211_hw_config(local, 0);
+       ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
 
        if (!have_sta) {
                u32 rates = 0, basic_rates = 0;
index f054e94901a295443abcae7c3cfddda963f8d7c4..935aa4b6deee0220737ee4c1cebad69472dea343 100644 (file)
@@ -234,6 +234,22 @@ static void ieee80211_hw_roc_done(struct work_struct *work)
                return;
        }
 
+       /* was never transmitted */
+       if (local->hw_roc_skb) {
+               u64 cookie;
+
+               cookie = local->hw_roc_cookie ^ 2;
+
+               cfg80211_mgmt_tx_status(local->hw_roc_dev, cookie,
+                                       local->hw_roc_skb->data,
+                                       local->hw_roc_skb->len, false,
+                                       GFP_KERNEL);
+
+               kfree_skb(local->hw_roc_skb);
+               local->hw_roc_skb = NULL;
+               local->hw_roc_skb_for_status = NULL;
+       }
+
        if (!local->hw_roc_for_tx)
                cfg80211_remain_on_channel_expired(local->hw_roc_dev,
                                                   local->hw_roc_cookie,
index f5b1638fbf8092a5ac30eebaea5b46b0b840867a..de455f8bbb91c0ffbedd2c911623380946d07d0b 100644 (file)
@@ -378,7 +378,7 @@ static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU)
        /* make the station visible */
        sta_info_hash_add(local, sta);
 
-       list_add(&sta->list, &local->sta_list);
+       list_add_rcu(&sta->list, &local->sta_list);
 
        set_sta_flag(sta, WLAN_STA_INSERTED);
 
@@ -688,7 +688,7 @@ int __must_check __sta_info_destroy(struct sta_info *sta)
        if (ret)
                return ret;
 
-       list_del(&sta->list);
+       list_del_rcu(&sta->list);
 
        mutex_lock(&local->key_mtx);
        for (i = 0; i < NUM_DEFAULT_KEYS; i++)
index 847215bb2a6fc63c1ed010dbfc9bd2d35cb6b7c0..e453212fa17f741bc380b2cbdabe59ee4f6df5d7 100644 (file)
@@ -1737,7 +1737,7 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
        __le16 fc;
        struct ieee80211_hdr hdr;
        struct ieee80211s_hdr mesh_hdr __maybe_unused;
-       struct mesh_path __maybe_unused *mppath = NULL;
+       struct mesh_path __maybe_unused *mppath = NULL, *mpath = NULL;
        const u8 *encaps_data;
        int encaps_len, skip_header_bytes;
        int nh_pos, h_pos;
@@ -1803,8 +1803,11 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
                        goto fail;
                }
                rcu_read_lock();
-               if (!is_multicast_ether_addr(skb->data))
-                       mppath = mpp_path_lookup(skb->data, sdata);
+               if (!is_multicast_ether_addr(skb->data)) {
+                       mpath = mesh_path_lookup(skb->data, sdata);
+                       if (!mpath)
+                               mppath = mpp_path_lookup(skb->data, sdata);
+               }
 
                /*
                 * Use address extension if it is a packet from
index a44c6807df01914a04c5675d1422d765260a8c29..8dd4712620ff53832a212a3d69791e13cd59905f 100644 (file)
@@ -1271,7 +1271,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
                        enum ieee80211_sta_state state;
 
                        for (state = IEEE80211_STA_NOTEXIST;
-                            state < sta->sta_state - 1; state++)
+                            state < sta->sta_state; state++)
                                WARN_ON(drv_sta_state(local, sta->sdata, sta,
                                                      state, state + 1));
                }
index 46d69d7f1bb4e15a3116c389131e23fd9a8d28d6..31f50bc3a3124a111ce0c2f75f4f9e22f095243f 100644 (file)
@@ -270,9 +270,8 @@ static int expect_rtp_rtcp(struct sk_buff *skb, struct nf_conn *ct,
                return 0;
 
        /* RTP port is even */
-       port &= htons(~1);
-       rtp_port = port;
-       rtcp_port = htons(ntohs(port) + 1);
+       rtp_port = port & ~htons(1);
+       rtcp_port = port | htons(1);
 
        /* Create expect for RTP */
        if ((rtp_exp = nf_ct_expect_alloc(ct)) == NULL)
index 0a96a43108edde1dcdc251a168f0547c9f277378..1686ca1b53a157d8568ae8ac104aeb0f9415d696 100644 (file)
@@ -32,13 +32,13 @@ MODULE_ALIAS("ipt_HMARK");
 MODULE_ALIAS("ip6t_HMARK");
 
 struct hmark_tuple {
-       u32                     src;
-       u32                     dst;
+       __be32                  src;
+       __be32                  dst;
        union hmark_ports       uports;
-       uint8_t                 proto;
+       u8                      proto;
 };
 
-static inline u32 hmark_addr6_mask(const __u32 *addr32, const __u32 *mask)
+static inline __be32 hmark_addr6_mask(const __be32 *addr32, const __be32 *mask)
 {
        return (addr32[0] & mask[0]) ^
               (addr32[1] & mask[1]) ^
@@ -46,8 +46,8 @@ static inline u32 hmark_addr6_mask(const __u32 *addr32, const __u32 *mask)
               (addr32[3] & mask[3]);
 }
 
-static inline u32
-hmark_addr_mask(int l3num, const __u32 *addr32, const __u32 *mask)
+static inline __be32
+hmark_addr_mask(int l3num, const __be32 *addr32, const __be32 *mask)
 {
        switch (l3num) {
        case AF_INET:
@@ -58,6 +58,22 @@ hmark_addr_mask(int l3num, const __u32 *addr32, const __u32 *mask)
        return 0;
 }
 
+static inline void hmark_swap_ports(union hmark_ports *uports,
+                                   const struct xt_hmark_info *info)
+{
+       union hmark_ports hp;
+       u16 src, dst;
+
+       hp.b32 = (uports->b32 & info->port_mask.b32) | info->port_set.b32;
+       src = ntohs(hp.b16.src);
+       dst = ntohs(hp.b16.dst);
+
+       if (dst > src)
+               uports->v32 = (dst << 16) | src;
+       else
+               uports->v32 = (src << 16) | dst;
+}
+
 static int
 hmark_ct_set_htuple(const struct sk_buff *skb, struct hmark_tuple *t,
                    const struct xt_hmark_info *info)
@@ -74,22 +90,19 @@ hmark_ct_set_htuple(const struct sk_buff *skb, struct hmark_tuple *t,
        otuple = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
        rtuple = &ct->tuplehash[IP_CT_DIR_REPLY].tuple;
 
-       t->src = hmark_addr_mask(otuple->src.l3num, otuple->src.u3.all,
-                                info->src_mask.all);
-       t->dst = hmark_addr_mask(otuple->src.l3num, rtuple->src.u3.all,
-                                info->dst_mask.all);
+       t->src = hmark_addr_mask(otuple->src.l3num, otuple->src.u3.ip6,
+                                info->src_mask.ip6);
+       t->dst = hmark_addr_mask(otuple->src.l3num, rtuple->src.u3.ip6,
+                                info->dst_mask.ip6);
 
        if (info->flags & XT_HMARK_FLAG(XT_HMARK_METHOD_L3))
                return 0;
 
        t->proto = nf_ct_protonum(ct);
        if (t->proto != IPPROTO_ICMP) {
-               t->uports.p16.src = otuple->src.u.all;
-               t->uports.p16.dst = rtuple->src.u.all;
-               t->uports.v32 = (t->uports.v32 & info->port_mask.v32) |
-                               info->port_set.v32;
-               if (t->uports.p16.dst < t->uports.p16.src)
-                       swap(t->uports.p16.dst, t->uports.p16.src);
+               t->uports.b16.src = otuple->src.u.all;
+               t->uports.b16.dst = rtuple->src.u.all;
+               hmark_swap_ports(&t->uports, info);
        }
 
        return 0;
@@ -98,15 +111,19 @@ hmark_ct_set_htuple(const struct sk_buff *skb, struct hmark_tuple *t,
 #endif
 }
 
+/* This hash function is endian independent, to ensure consistent hashing if
+ * the cluster is composed of big and little endian systems. */
 static inline u32
 hmark_hash(struct hmark_tuple *t, const struct xt_hmark_info *info)
 {
        u32 hash;
+       u32 src = ntohl(t->src);
+       u32 dst = ntohl(t->dst);
 
-       if (t->dst < t->src)
-               swap(t->src, t->dst);
+       if (dst < src)
+               swap(src, dst);
 
-       hash = jhash_3words(t->src, t->dst, t->uports.v32, info->hashrnd);
+       hash = jhash_3words(src, dst, t->uports.v32, info->hashrnd);
        hash = hash ^ (t->proto & info->proto_mask);
 
        return (((u64)hash * info->hmodulus) >> 32) + info->hoffset;
@@ -126,11 +143,7 @@ hmark_set_tuple_ports(const struct sk_buff *skb, unsigned int nhoff,
        if (skb_copy_bits(skb, nhoff, &t->uports, sizeof(t->uports)) < 0)
                return;
 
-       t->uports.v32 = (t->uports.v32 & info->port_mask.v32) |
-                       info->port_set.v32;
-
-       if (t->uports.p16.dst < t->uports.p16.src)
-               swap(t->uports.p16.dst, t->uports.p16.src);
+       hmark_swap_ports(&t->uports, info);
 }
 
 #if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
@@ -178,8 +191,8 @@ hmark_pkt_set_htuple_ipv6(const struct sk_buff *skb, struct hmark_tuple *t,
                        return -1;
        }
 noicmp:
-       t->src = hmark_addr6_mask(ip6->saddr.s6_addr32, info->src_mask.all);
-       t->dst = hmark_addr6_mask(ip6->daddr.s6_addr32, info->dst_mask.all);
+       t->src = hmark_addr6_mask(ip6->saddr.s6_addr32, info->src_mask.ip6);
+       t->dst = hmark_addr6_mask(ip6->daddr.s6_addr32, info->dst_mask.ip6);
 
        if (info->flags & XT_HMARK_FLAG(XT_HMARK_METHOD_L3))
                return 0;
@@ -255,11 +268,8 @@ hmark_pkt_set_htuple_ipv4(const struct sk_buff *skb, struct hmark_tuple *t,
                }
        }
 
-       t->src = (__force u32) ip->saddr;
-       t->dst = (__force u32) ip->daddr;
-
-       t->src &= info->src_mask.ip;
-       t->dst &= info->dst_mask.ip;
+       t->src = ip->saddr & info->src_mask.ip;
+       t->dst = ip->daddr & info->dst_mask.ip;
 
        if (info->flags & XT_HMARK_FLAG(XT_HMARK_METHOD_L3))
                return 0;
index 3f339b19d140d666328b5dfd462f5d27bafb94d5..17a707db40eb9865000cd073d3661fef1d1ced13 100644 (file)
@@ -292,6 +292,9 @@ static int llcp_sock_getname(struct socket *sock, struct sockaddr *addr,
 
        pr_debug("%p\n", sk);
 
+       if (llcp_sock == NULL)
+               return -EBADFD;
+
        addr->sa_family = AF_NFC;
        *len = sizeof(struct sockaddr_nfc_llcp);
 
index d2a19b0ff71f134544ac1afc65dd5d3fe3bc4891..89baa3328411485ac4748aa9e0f9d097fdd30a97 100644 (file)
@@ -42,6 +42,7 @@ void __cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid)
        cfg80211_hold_bss(bss_from_pub(bss));
        wdev->current_bss = bss_from_pub(bss);
 
+       wdev->sme_state = CFG80211_SME_CONNECTED;
        cfg80211_upload_connect_keys(wdev);
 
        nl80211_send_ibss_bssid(wiphy_to_dev(wdev->wiphy), dev, bssid,
@@ -60,7 +61,7 @@ void cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid, gfp_t gfp)
        struct cfg80211_event *ev;
        unsigned long flags;
 
-       CFG80211_DEV_WARN_ON(!wdev->ssid_len);
+       CFG80211_DEV_WARN_ON(wdev->sme_state != CFG80211_SME_CONNECTING);
 
        ev = kzalloc(sizeof(*ev), gfp);
        if (!ev)
@@ -115,9 +116,11 @@ int __cfg80211_join_ibss(struct cfg80211_registered_device *rdev,
 #ifdef CONFIG_CFG80211_WEXT
        wdev->wext.ibss.channel = params->channel;
 #endif
+       wdev->sme_state = CFG80211_SME_CONNECTING;
        err = rdev->ops->join_ibss(&rdev->wiphy, dev, params);
        if (err) {
                wdev->connect_keys = NULL;
+               wdev->sme_state = CFG80211_SME_IDLE;
                return err;
        }
 
@@ -169,6 +172,7 @@ static void __cfg80211_clear_ibss(struct net_device *dev, bool nowext)
        }
 
        wdev->current_bss = NULL;
+       wdev->sme_state = CFG80211_SME_IDLE;
        wdev->ssid_len = 0;
 #ifdef CONFIG_CFG80211_WEXT
        if (!nowext)
index 55d99466babb1d8060dc09d841315e9d366ecc21..8f2d68fc3a444b3c90a699d29d50bf5e48cbb31f 100644 (file)
@@ -935,6 +935,7 @@ int cfg80211_can_change_interface(struct cfg80211_registered_device *rdev,
                                  enum nl80211_iftype iftype)
 {
        struct wireless_dev *wdev_iter;
+       u32 used_iftypes = BIT(iftype);
        int num[NUM_NL80211_IFTYPES];
        int total = 1;
        int i, j;
@@ -961,6 +962,7 @@ int cfg80211_can_change_interface(struct cfg80211_registered_device *rdev,
 
                num[wdev_iter->iftype]++;
                total++;
+               used_iftypes |= BIT(wdev_iter->iftype);
        }
        mutex_unlock(&rdev->devlist_mtx);
 
@@ -970,6 +972,7 @@ int cfg80211_can_change_interface(struct cfg80211_registered_device *rdev,
        for (i = 0; i < rdev->wiphy.n_iface_combinations; i++) {
                const struct ieee80211_iface_combination *c;
                struct ieee80211_iface_limit *limits;
+               u32 all_iftypes = 0;
 
                c = &rdev->wiphy.iface_combinations[i];
 
@@ -984,6 +987,7 @@ int cfg80211_can_change_interface(struct cfg80211_registered_device *rdev,
                        if (rdev->wiphy.software_iftypes & BIT(iftype))
                                continue;
                        for (j = 0; j < c->n_limits; j++) {
+                               all_iftypes |= limits[j].types;
                                if (!(limits[j].types & BIT(iftype)))
                                        continue;
                                if (limits[j].max < num[iftype])
@@ -991,7 +995,20 @@ int cfg80211_can_change_interface(struct cfg80211_registered_device *rdev,
                                limits[j].max -= num[iftype];
                        }
                }
-               /* yay, it fits */
+
+               /*
+                * Finally check that all iftypes that we're currently
+                * using are actually part of this combination. If they
+                * aren't then we can't use this combination and have
+                * to continue to the next.
+                */
+               if ((all_iftypes & used_iftypes) != used_iftypes)
+                       goto cont;
+
+               /*
+                * This combination covered all interface types and
+                * supported the requested numbers, so we're good.
+                */
                kfree(limits);
                return 0;
  cont: