]> Pileus Git - ~andy/linux/commitdiff
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
authorDavid S. Miller <davem@davemloft.net>
Tue, 14 Jan 2014 22:37:09 +0000 (14:37 -0800)
committerDavid S. Miller <davem@davemloft.net>
Tue, 14 Jan 2014 22:42:42 +0000 (14:42 -0800)
28 files changed:
1  2 
MAINTAINERS
drivers/net/bonding/bond_main.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
drivers/net/ethernet/lantiq_etop.c
drivers/net/ethernet/mellanox/mlx4/en_tx.c
drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
drivers/net/ethernet/tile/tilegx.c
drivers/net/macvlan.c
drivers/net/tun.c
drivers/net/usb/usbnet.c
drivers/net/wireless/iwlwifi/pcie/drv.c
drivers/net/wireless/mac80211_hwsim.c
drivers/net/wireless/mwifiex/main.c
include/linux/netdevice.h
net/core/dev.c
net/core/flow_dissector.c
net/ipv6/addrconf.c
net/ipv6/ip6_vti.c
net/mac80211/iface.c
net/mac80211/tx.c
net/nfc/core.c
net/sched/sch_generic.c
net/tipc/link.c

diff --combined MAINTAINERS
index e11d4952bb26d0b61945a8163aa93edd2f16bd61,31a046213274e06e63e26da0514ede7ebc7c4443..b358a3f0cacd4ae3b91220b79750d33ca1dba306
@@@ -1368,6 -1368,9 +1368,9 @@@ T:      git git://git.xilinx.com/linux-xlnx.
  S:    Supported
  F:    arch/arm/mach-zynq/
  F:    drivers/cpuidle/cpuidle-zynq.c
+ N:    zynq
+ N:    xilinx
+ F:    drivers/clocksource/cadence_ttc_timer.c
  
  ARM SMMU DRIVER
  M:    Will Deacon <will.deacon@arm.com>
@@@ -1432,7 -1435,7 +1435,7 @@@ F:      Documentation/aoe
  F:    drivers/block/aoe/
  
  ATHEROS ATH GENERIC UTILITIES
 -M:    "Luis R. Rodriguez" <mcgrof@qca.qualcomm.com>
 +M:    "Luis R. Rodriguez" <mcgrof@do-not-panic.com>
  L:    linux-wireless@vger.kernel.org
  S:    Supported
  F:    drivers/net/wireless/ath/*
  ATHEROS ATH5K WIRELESS DRIVER
  M:    Jiri Slaby <jirislaby@gmail.com>
  M:    Nick Kossifidis <mickflemm@gmail.com>
 -M:    "Luis R. Rodriguez" <mcgrof@qca.qualcomm.com>
 +M:    "Luis R. Rodriguez" <mcgrof@do-not-panic.com>
  L:    linux-wireless@vger.kernel.org
  L:    ath5k-devel@lists.ath5k.org
  W:    http://wireless.kernel.org/en/users/Drivers/ath5k
@@@ -1455,6 -1458,17 +1458,6 @@@ T:     git git://github.com/kvalo/ath.gi
  S:    Supported
  F:    drivers/net/wireless/ath/ath6kl/
  
 -ATHEROS ATH9K WIRELESS DRIVER
 -M:    "Luis R. Rodriguez" <mcgrof@qca.qualcomm.com>
 -M:    Jouni Malinen <jouni@qca.qualcomm.com>
 -M:    Vasanthakumar Thiagarajan <vthiagar@qca.qualcomm.com>
 -M:    Senthil Balasubramanian <senthilb@qca.qualcomm.com>
 -L:    linux-wireless@vger.kernel.org
 -L:    ath9k-devel@lists.ath9k.org
 -W:    http://wireless.kernel.org/en/users/Drivers/ath9k
 -S:    Supported
 -F:    drivers/net/wireless/ath/ath9k/
 -
  WILOCITY WIL6210 WIRELESS DRIVER
  M:    Vladimir Kondratiev <qca_vkondrat@qca.qualcomm.com>
  L:    linux-wireless@vger.kernel.org
@@@ -2010,7 -2024,6 +2013,7 @@@ L:      linux-can@vger.kernel.or
  W:    http://gitorious.org/linux-can
  T:    git git://gitorious.org/linux-can/linux-can-next.git
  S:    Maintained
 +F:    Documentation/networking/can.txt
  F:    net/can/
  F:    include/linux/can/core.h
  F:    include/uapi/linux/can.h
@@@ -2815,8 -2828,10 +2818,10 @@@ F:    include/uapi/drm
  
  INTEL DRM DRIVERS (excluding Poulsbo, Moorestown and derivative chipsets)
  M:    Daniel Vetter <daniel.vetter@ffwll.ch>
+ M:    Jani Nikula <jani.nikula@linux.intel.com>
  L:    intel-gfx@lists.freedesktop.org
  L:    dri-devel@lists.freedesktop.org
+ Q:    http://patchwork.freedesktop.org/project/intel-gfx/
  T:    git git://people.freedesktop.org/~danvet/drm-intel
  S:    Supported
  F:    drivers/gpu/drm/i915/
@@@ -4459,7 -4474,7 +4464,7 @@@ M:      Deepak Saxena <dsaxena@plexity.net
  S:    Maintained
  F:    drivers/char/hw_random/ixp4xx-rng.c
  
 -INTEL ETHERNET DRIVERS (e100/e1000/e1000e/igb/igbvf/ixgb/ixgbe/ixgbevf/i40e)
 +INTEL ETHERNET DRIVERS (e100/e1000/e1000e/igb/igbvf/ixgb/ixgbe/ixgbevf/i40e/i40evf)
  M:    Jeff Kirsher <jeffrey.t.kirsher@intel.com>
  M:    Jesse Brandeburg <jesse.brandeburg@intel.com>
  M:    Bruce Allan <bruce.w.allan@intel.com>
@@@ -4468,7 -4483,6 +4473,7 @@@ M:      Don Skidmore <donald.c.skidmore@inte
  M:    Greg Rose <gregory.v.rose@intel.com>
  M:    Alex Duyck <alexander.h.duyck@intel.com>
  M:    John Ronciak <john.ronciak@intel.com>
 +M:    Mitch Williams <mitch.a.williams@intel.com>
  L:    e1000-devel@lists.sourceforge.net
  W:    http://www.intel.com/support/feedback.htm
  W:    http://e1000.sourceforge.net/
@@@ -4484,7 -4498,6 +4489,7 @@@ F:      Documentation/networking/ixgb.tx
  F:    Documentation/networking/ixgbe.txt
  F:    Documentation/networking/ixgbevf.txt
  F:    Documentation/networking/i40e.txt
 +F:    Documentation/networking/i40evf.txt
  F:    drivers/net/ethernet/intel/
  
  INTEL-MID GPIO DRIVER
@@@ -6975,14 -6988,6 +6980,14 @@@ T:    git git://linuxtv.org/anttip/media_t
  S:    Maintained
  F:    drivers/media/tuners/qt1010*
  
 +QUALCOMM ATHEROS ATH9K WIRELESS DRIVER
 +M:    QCA ath9k Development <ath9k-devel@qca.qualcomm.com>
 +L:    linux-wireless@vger.kernel.org
 +L:    ath9k-devel@lists.ath9k.org
 +W:    http://wireless.kernel.org/en/users/Drivers/ath9k
 +S:    Supported
 +F:    drivers/net/wireless/ath/ath9k/
 +
  QUALCOMM ATHEROS ATH10K WIRELESS DRIVER
  M:    Kalle Valo <kvalo@qca.qualcomm.com>
  L:    ath10k@lists.infradead.org
@@@ -8623,11 -8628,12 +8628,11 @@@ S:   Maintaine
  F:    sound/soc/codecs/twl4030*
  
  TI WILINK WIRELESS DRIVERS
 -M:    Luciano Coelho <luca@coelho.fi>
  L:    linux-wireless@vger.kernel.org
  W:    http://wireless.kernel.org/en/users/Drivers/wl12xx
  W:    http://wireless.kernel.org/en/users/Drivers/wl1251
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/luca/wl12xx.git
 -S:    Maintained
 +S:    Orphan
  F:    drivers/net/wireless/ti/
  F:    include/linux/wl12xx.h
  
index e06c4453eabb99e3c22347335d65853d84b2b4d4,4b8c58b0ec243575bbc17a547f7370b8d0b26d54..7069b846a6ceb2309943e7341a9e59c4e02ba69d
@@@ -113,7 -113,6 +113,7 @@@ static int all_slaves_active
  static struct bond_params bonding_defaults;
  static int resend_igmp = BOND_DEFAULT_RESEND_IGMP;
  static int packets_per_slave = 1;
 +static int lp_interval = BOND_ALB_DEFAULT_LP_INTERVAL;
  
  module_param(max_bonds, int, 0);
  MODULE_PARM_DESC(max_bonds, "Max number of bonded devices");
@@@ -190,10 -189,6 +190,10 @@@ module_param(packets_per_slave, int, 0)
  MODULE_PARM_DESC(packets_per_slave, "Packets to send per slave in balance-rr "
                                    "mode; 0 for a random slave, 1 packet per "
                                    "slave (default), >1 packets per slave.");
 +module_param(lp_interval, uint, 0);
 +MODULE_PARM_DESC(lp_interval, "The number of seconds between instances where "
 +                            "the bonding driver sends learning packets to "
 +                            "each slaves peer switch. The default is 1.");
  
  /*----------------------------- Global variables ----------------------------*/
  
@@@ -304,7 -299,7 +304,7 @@@ const char *bond_mode_name(int mode
   * @skb: hw accel VLAN tagged skb to transmit
   * @slave_dev: slave that is supposed to xmit this skbuff
   */
 -int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb,
 +void bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb,
                        struct net_device *slave_dev)
  {
        skb->dev = slave_dev;
                bond_netpoll_send_skb(bond_get_slave_by_dev(bond, slave_dev), skb);
        else
                dev_queue_xmit(skb);
 -
 -      return 0;
  }
  
  /*
@@@ -594,22 -591,33 +594,22 @@@ static int bond_set_allmulti(struct bon
   * device and retransmit an IGMP JOIN request to the current active
   * slave.
   */
 -static void bond_resend_igmp_join_requests(struct bonding *bond)
 +static void bond_resend_igmp_join_requests_delayed(struct work_struct *work)
  {
 +      struct bonding *bond = container_of(work, struct bonding,
 +                                          mcast_work.work);
 +
        if (!rtnl_trylock()) {
                queue_delayed_work(bond->wq, &bond->mcast_work, 1);
                return;
        }
        call_netdevice_notifiers(NETDEV_RESEND_IGMP, bond->dev);
 -      rtnl_unlock();
  
 -      /* We use curr_slave_lock to protect against concurrent access to
 -       * igmp_retrans from multiple running instances of this function and
 -       * bond_change_active_slave
 -       */
 -      write_lock_bh(&bond->curr_slave_lock);
        if (bond->igmp_retrans > 1) {
                bond->igmp_retrans--;
                queue_delayed_work(bond->wq, &bond->mcast_work, HZ/5);
        }
 -      write_unlock_bh(&bond->curr_slave_lock);
 -}
 -
 -static void bond_resend_igmp_join_requests_delayed(struct work_struct *work)
 -{
 -      struct bonding *bond = container_of(work, struct bonding,
 -                                          mcast_work.work);
 -
 -      bond_resend_igmp_join_requests(bond);
 +      rtnl_unlock();
  }
  
  /* Flush bond's hardware addresses from slave
@@@ -689,12 -697,14 +689,12 @@@ static void bond_set_dev_addr(struct ne
   *
   * Perform special MAC address swapping for fail_over_mac settings
   *
 - * Called with RTNL, bond->lock for read, curr_slave_lock for write_bh.
 + * Called with RTNL, curr_slave_lock for write_bh.
   */
  static void bond_do_fail_over_mac(struct bonding *bond,
                                  struct slave *new_active,
                                  struct slave *old_active)
        __releases(&bond->curr_slave_lock)
 -      __releases(&bond->lock)
 -      __acquires(&bond->lock)
        __acquires(&bond->curr_slave_lock)
  {
        u8 tmp_mac[ETH_ALEN];
        case BOND_FOM_ACTIVE:
                if (new_active) {
                        write_unlock_bh(&bond->curr_slave_lock);
 -                      read_unlock(&bond->lock);
                        bond_set_dev_addr(bond->dev, new_active->dev);
 -                      read_lock(&bond->lock);
                        write_lock_bh(&bond->curr_slave_lock);
                }
                break;
                        return;
  
                write_unlock_bh(&bond->curr_slave_lock);
 -              read_unlock(&bond->lock);
  
                if (old_active) {
                        memcpy(tmp_mac, new_active->dev->dev_addr, ETH_ALEN);
                        pr_err("%s: Error %d setting MAC of slave %s\n",
                               bond->dev->name, -rv, new_active->dev->name);
  out:
 -              read_lock(&bond->lock);
                write_lock_bh(&bond->curr_slave_lock);
                break;
        default:
@@@ -807,11 -821,7 +807,11 @@@ static struct slave *bond_find_best_sla
  
  static bool bond_should_notify_peers(struct bonding *bond)
  {
 -      struct slave *slave = bond->curr_active_slave;
 +      struct slave *slave;
 +
 +      rcu_read_lock();
 +      slave = rcu_dereference(bond->curr_active_slave);
 +      rcu_read_unlock();
  
        pr_debug("bond_should_notify_peers: bond %s slave %s\n",
                 bond->dev->name, slave ? slave->dev->name : "NULL");
   * because it is apparently the best available slave we have, even though its
   * updelay hasn't timed out yet.
   *
 - * If new_active is not NULL, caller must hold bond->lock for read and
 - * curr_slave_lock for write_bh.
 + * If new_active is not NULL, caller must hold curr_slave_lock for write_bh.
   */
  void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
  {
                        }
  
                        write_unlock_bh(&bond->curr_slave_lock);
 -                      read_unlock(&bond->lock);
  
                        call_netdevice_notifiers(NETDEV_BONDING_FAILOVER, bond->dev);
                        if (should_notify_peers)
                                call_netdevice_notifiers(NETDEV_NOTIFY_PEERS,
                                                         bond->dev);
  
 -                      read_lock(&bond->lock);
                        write_lock_bh(&bond->curr_slave_lock);
                }
        }
   * - The primary_slave has got its link back.
   * - A slave has got its link back and there's no old curr_active_slave.
   *
 - * Caller must hold bond->lock for read and curr_slave_lock for write_bh.
 + * Caller must hold curr_slave_lock for write_bh.
   */
  void bond_select_active_slave(struct bonding *bond)
  {
@@@ -1581,9 -1594,11 +1581,9 @@@ int bond_enslave(struct net_device *bon
        bond_set_carrier(bond);
  
        if (USES_PRIMARY(bond->params.mode)) {
 -              read_lock(&bond->lock);
                write_lock_bh(&bond->curr_slave_lock);
                bond_select_active_slave(bond);
                write_unlock_bh(&bond->curr_slave_lock);
 -              read_unlock(&bond->lock);
        }
  
        pr_info("%s: enslaving %s as a%s interface with a%s link.\n",
@@@ -1603,13 -1618,19 +1603,13 @@@ err_detach
                bond_hw_addr_flush(bond_dev, slave_dev);
  
        vlan_vids_del_by_dev(slave_dev, bond_dev);
 -      write_lock_bh(&bond->lock);
        if (bond->primary_slave == new_slave)
                bond->primary_slave = NULL;
        if (bond->curr_active_slave == new_slave) {
 -              bond_change_active_slave(bond, NULL);
 -              write_unlock_bh(&bond->lock);
 -              read_lock(&bond->lock);
                write_lock_bh(&bond->curr_slave_lock);
 +              bond_change_active_slave(bond, NULL);
                bond_select_active_slave(bond);
                write_unlock_bh(&bond->curr_slave_lock);
 -              read_unlock(&bond->lock);
 -      } else {
 -              write_unlock_bh(&bond->lock);
        }
        slave_disable_netpoll(new_slave);
  
@@@ -1637,7 -1658,7 +1637,7 @@@ err_free
  err_undo_flags:
        /* Enslave of first slave has failed and we need to fix master's mac */
        if (!bond_has_slaves(bond) &&
 -          ether_addr_equal(bond_dev->dev_addr, slave_dev->dev_addr))
 +          ether_addr_equal_64bits(bond_dev->dev_addr, slave_dev->dev_addr))
                eth_hw_addr_random(bond_dev);
  
        return res;
@@@ -1674,16 -1695,20 +1674,16 @@@ static int __bond_release_one(struct ne
        }
  
        block_netpoll_tx();
 -      write_lock_bh(&bond->lock);
  
        slave = bond_get_slave_by_dev(bond, slave_dev);
        if (!slave) {
                /* not a slave of this bond */
                pr_info("%s: %s not enslaved\n",
                        bond_dev->name, slave_dev->name);
 -              write_unlock_bh(&bond->lock);
                unblock_netpoll_tx();
                return -EINVAL;
        }
  
 -      write_unlock_bh(&bond->lock);
 -
        /* release the slave from its bond */
        bond->slave_cnt--;
  
        write_lock_bh(&bond->lock);
  
        /* Inform AD package of unbinding of slave. */
 -      if (bond->params.mode == BOND_MODE_8023AD) {
 -              /* must be called before the slave is
 -               * detached from the list
 -               */
 +      if (bond->params.mode == BOND_MODE_8023AD)
                bond_3ad_unbind_slave(slave);
 -      }
 +
 +      write_unlock_bh(&bond->lock);
  
        pr_info("%s: releasing %s interface %s\n",
                bond_dev->name,
        bond->current_arp_slave = NULL;
  
        if (!all && !bond->params.fail_over_mac) {
 -              if (ether_addr_equal(bond_dev->dev_addr, slave->perm_hwaddr) &&
 +              if (ether_addr_equal_64bits(bond_dev->dev_addr, slave->perm_hwaddr) &&
                    bond_has_slaves(bond))
                        pr_warn("%s: Warning: the permanent HWaddr of %s - %pM - is still in use by %s. Set the HWaddr of %s to a different address to avoid conflicts.\n",
                                   bond_dev->name, slave_dev->name,
        if (bond->primary_slave == slave)
                bond->primary_slave = NULL;
  
 -      if (oldcurrent == slave)
 +      if (oldcurrent == slave) {
 +              write_lock_bh(&bond->curr_slave_lock);
                bond_change_active_slave(bond, NULL);
 +              write_unlock_bh(&bond->curr_slave_lock);
 +      }
  
        if (bond_is_lb(bond)) {
                /* Must be called only after the slave has been
                 * has been cleared (if our_slave == old_current),
                 * but before a new active slave is selected.
                 */
 -              write_unlock_bh(&bond->lock);
                bond_alb_deinit_slave(bond, slave);
 -              write_lock_bh(&bond->lock);
        }
  
        if (all) {
                 * is no concern that another slave add/remove event
                 * will interfere.
                 */
 -              write_unlock_bh(&bond->lock);
 -              read_lock(&bond->lock);
                write_lock_bh(&bond->curr_slave_lock);
  
                bond_select_active_slave(bond);
  
                write_unlock_bh(&bond->curr_slave_lock);
 -              read_unlock(&bond->lock);
 -              write_lock_bh(&bond->lock);
        }
  
        if (!bond_has_slaves(bond)) {
                }
        }
  
 -      write_unlock_bh(&bond->lock);
        unblock_netpoll_tx();
        synchronize_rcu();
  
@@@ -1897,7 -1928,7 +1897,7 @@@ static int bond_miimon_inspect(struct b
  
        ignore_updelay = !bond->curr_active_slave ? true : false;
  
 -      bond_for_each_slave(bond, slave, iter) {
 +      bond_for_each_slave_rcu(bond, slave, iter) {
                slave->new_link = BOND_LINK_NOCHANGE;
  
                link_state = bond_check_dev_link(bond, slave->dev, 0);
@@@ -2088,42 -2119,48 +2088,42 @@@ do_failover
   * an acquisition of appropriate locks followed by a commit phase to
   * implement whatever link state changes are indicated.
   */
 -void bond_mii_monitor(struct work_struct *work)
 +static void bond_mii_monitor(struct work_struct *work)
  {
        struct bonding *bond = container_of(work, struct bonding,
                                            mii_work.work);
        bool should_notify_peers = false;
        unsigned long delay;
  
 -      read_lock(&bond->lock);
 -
        delay = msecs_to_jiffies(bond->params.miimon);
  
        if (!bond_has_slaves(bond))
                goto re_arm;
  
 +      rcu_read_lock();
 +
        should_notify_peers = bond_should_notify_peers(bond);
  
        if (bond_miimon_inspect(bond)) {
 -              read_unlock(&bond->lock);
 +              rcu_read_unlock();
  
                /* Race avoidance with bond_close cancel of workqueue */
                if (!rtnl_trylock()) {
 -                      read_lock(&bond->lock);
                        delay = 1;
                        should_notify_peers = false;
                        goto re_arm;
                }
  
 -              read_lock(&bond->lock);
 -
                bond_miimon_commit(bond);
  
 -              read_unlock(&bond->lock);
                rtnl_unlock();  /* might sleep, hold no other locks */
 -              read_lock(&bond->lock);
 -      }
 +      } else
 +              rcu_read_unlock();
  
  re_arm:
        if (bond->params.miimon)
                queue_delayed_work(bond->wq, &bond->mii_work, delay);
  
 -      read_unlock(&bond->lock);
 -
        if (should_notify_peers) {
                if (!rtnl_trylock())
                        return;
@@@ -2377,7 -2414,7 +2377,7 @@@ static bool bond_time_in_interval(struc
   * arp is transmitted to generate traffic. see activebackup_arp_monitor for
   * arp monitoring in active backup mode.
   */
 -void bond_loadbalance_arp_mon(struct work_struct *work)
 +static void bond_loadbalance_arp_mon(struct work_struct *work)
  {
        struct bonding *bond = container_of(work, struct bonding,
                                            arp_work.work);
        struct list_head *iter;
        int do_failover = 0;
  
 -      read_lock(&bond->lock);
 -
        if (!bond_has_slaves(bond))
                goto re_arm;
  
 -      oldcurrent = bond->curr_active_slave;
 +      rcu_read_lock();
 +
 +      oldcurrent = ACCESS_ONCE(bond->curr_active_slave);
        /* see if any of the previous devices are up now (i.e. they have
         * xmt and rcv traffic). the curr_active_slave does not come into
         * the picture unless it is null. also, slave->jiffies is not needed
         * TODO: what about up/down delay in arp mode? it wasn't here before
         *       so it can wait
         */
 -      bond_for_each_slave(bond, slave, iter) {
 +      bond_for_each_slave_rcu(bond, slave, iter) {
                unsigned long trans_start = dev_trans_start(slave->dev);
  
                if (slave->link != BOND_LINK_UP) {
                        bond_arp_send_all(bond, slave);
        }
  
 +      rcu_read_unlock();
 +
        if (do_failover) {
 +              /* the bond_select_active_slave must hold RTNL
 +               * and curr_slave_lock for write.
 +               */
 +              if (!rtnl_trylock())
 +                      goto re_arm;
                block_netpoll_tx();
                write_lock_bh(&bond->curr_slave_lock);
  
  
                write_unlock_bh(&bond->curr_slave_lock);
                unblock_netpoll_tx();
 +              rtnl_unlock();
        }
  
  re_arm:
        if (bond->params.arp_interval)
                queue_delayed_work(bond->wq, &bond->arp_work,
                                   msecs_to_jiffies(bond->params.arp_interval));
 -
 -      read_unlock(&bond->lock);
  }
  
  /*
   * place for the slave.  Returns 0 if no changes are found, >0 if changes
   * to link states must be committed.
   *
 - * Called with bond->lock held for read.
 + * Called with rcu_read_lock hold.
   */
  static int bond_ab_arp_inspect(struct bonding *bond)
  {
        struct slave *slave;
        int commit = 0;
  
 -      bond_for_each_slave(bond, slave, iter) {
 +      bond_for_each_slave_rcu(bond, slave, iter) {
                slave->new_link = BOND_LINK_NOCHANGE;
                last_rx = slave_last_rx(bond, slave);
  
   * Called to commit link state changes noted by inspection step of
   * active-backup mode ARP monitor.
   *
 - * Called with RTNL and bond->lock for read.
 + * Called with RTNL hold.
   */
  static void bond_ab_arp_commit(struct bonding *bond)
  {
@@@ -2637,20 -2668,19 +2637,20 @@@ do_failover
  /*
   * Send ARP probes for active-backup mode ARP monitor.
   *
 - * Called with bond->lock held for read.
 + * Called with rcu_read_lock hold.
   */
  static void bond_ab_arp_probe(struct bonding *bond)
  {
 -      struct slave *slave, *before = NULL, *new_slave = NULL;
 +      struct slave *slave, *before = NULL, *new_slave = NULL,
 +                   *curr_arp_slave = rcu_dereference(bond->current_arp_slave);
        struct list_head *iter;
        bool found = false;
  
        read_lock(&bond->curr_slave_lock);
  
 -      if (bond->current_arp_slave && bond->curr_active_slave)
 +      if (curr_arp_slave && bond->curr_active_slave)
                pr_info("PROBE: c_arp %s && cas %s BAD\n",
 -                      bond->current_arp_slave->dev->name,
 +                      curr_arp_slave->dev->name,
                        bond->curr_active_slave->dev->name);
  
        if (bond->curr_active_slave) {
         * for becoming the curr_active_slave
         */
  
 -      if (!bond->current_arp_slave) {
 -              bond->current_arp_slave = bond_first_slave(bond);
 -              if (!bond->current_arp_slave)
 +      if (!curr_arp_slave) {
 +              curr_arp_slave = bond_first_slave_rcu(bond);
 +              if (!curr_arp_slave)
                        return;
        }
  
 -      bond_set_slave_inactive_flags(bond->current_arp_slave);
 +      bond_set_slave_inactive_flags(curr_arp_slave);
  
 -      bond_for_each_slave(bond, slave, iter) {
 +      bond_for_each_slave_rcu(bond, slave, iter) {
                if (!found && !before && IS_UP(slave->dev))
                        before = slave;
  
                        pr_info("%s: backup interface %s is now down.\n",
                                bond->dev->name, slave->dev->name);
                }
 -              if (slave == bond->current_arp_slave)
 +              if (slave == curr_arp_slave)
                        found = true;
        }
  
        bond_set_slave_active_flags(new_slave);
        bond_arp_send_all(bond, new_slave);
        new_slave->jiffies = jiffies;
 -      bond->current_arp_slave = new_slave;
 -
 +      rcu_assign_pointer(bond->current_arp_slave, new_slave);
  }
  
 -void bond_activebackup_arp_mon(struct work_struct *work)
 +static void bond_activebackup_arp_mon(struct work_struct *work)
  {
        struct bonding *bond = container_of(work, struct bonding,
                                            arp_work.work);
        bool should_notify_peers = false;
        int delta_in_ticks;
  
 -      read_lock(&bond->lock);
 -
        delta_in_ticks = msecs_to_jiffies(bond->params.arp_interval);
  
        if (!bond_has_slaves(bond))
                goto re_arm;
  
 +      rcu_read_lock();
 +
        should_notify_peers = bond_should_notify_peers(bond);
  
        if (bond_ab_arp_inspect(bond)) {
 -              read_unlock(&bond->lock);
 +              rcu_read_unlock();
  
                /* Race avoidance with bond_close flush of workqueue */
                if (!rtnl_trylock()) {
 -                      read_lock(&bond->lock);
                        delta_in_ticks = 1;
                        should_notify_peers = false;
                        goto re_arm;
                }
  
 -              read_lock(&bond->lock);
 -
                bond_ab_arp_commit(bond);
  
 -              read_unlock(&bond->lock);
                rtnl_unlock();
 -              read_lock(&bond->lock);
 +              rcu_read_lock();
        }
  
        bond_ab_arp_probe(bond);
 +      rcu_read_unlock();
  
  re_arm:
        if (bond->params.arp_interval)
                queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks);
  
 -      read_unlock(&bond->lock);
 -
        if (should_notify_peers) {
                if (!rtnl_trylock())
                        return;
@@@ -3514,7 -3550,7 +3514,7 @@@ unwind
   * it fails, it tries to find the first available slave for transmission.
   * The skb is consumed in all cases, thus the function is void.
   */
 -void bond_xmit_slave_id(struct bonding *bond, struct sk_buff *skb, int slave_id)
 +static void bond_xmit_slave_id(struct bonding *bond, struct sk_buff *skb, int slave_id)
  {
        struct list_head *iter;
        struct slave *slave;
@@@ -3671,28 -3707,33 +3671,29 @@@ static inline int bond_slave_override(s
                                      struct sk_buff *skb)
  {
        struct slave *slave = NULL;
 -      struct slave *check_slave;
        struct list_head *iter;
 -      int res = 1;
  
        if (!skb->queue_mapping)
                return 1;
  
        /* Find out if any slaves have the same mapping as this skb. */
 -      bond_for_each_slave_rcu(bond, check_slave, iter) {
 -              if (check_slave->queue_id == skb->queue_mapping) {
 -                      slave = check_slave;
 +      bond_for_each_slave_rcu(bond, slave, iter) {
 +              if (slave->queue_id == skb->queue_mapping) {
 +                      if (slave_can_tx(slave)) {
 +                              bond_dev_queue_xmit(bond, skb, slave->dev);
 +                              return 0;
 +                      }
 +                      /* If the slave isn't UP, use default transmit policy. */
                        break;
                }
        }
  
 -      /* If the slave isn't UP, use default transmit policy. */
 -      if (slave && slave->queue_id && IS_UP(slave->dev) &&
 -          (slave->link == BOND_LINK_UP)) {
 -              res = bond_dev_queue_xmit(bond, skb, slave->dev);
 -      }
 -
 -      return res;
 +      return 1;
  }
  
  
- static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb)
+ static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb,
+                            void *accel_priv)
  {
        /*
         * This helper function exists to help dev_pick_tx get the correct
@@@ -3933,29 -3974,6 +3934,29 @@@ static void bond_uninit(struct net_devi
  
  /*------------------------- Module initialization ---------------------------*/
  
 +int bond_parm_tbl_lookup(int mode, const struct bond_parm_tbl *tbl)
 +{
 +      int i;
 +
 +      for (i = 0; tbl[i].modename; i++)
 +              if (mode == tbl[i].mode)
 +                      return tbl[i].mode;
 +
 +      return -1;
 +}
 +
 +static int bond_parm_tbl_lookup_name(const char *modename,
 +                                   const struct bond_parm_tbl *tbl)
 +{
 +      int i;
 +
 +      for (i = 0; tbl[i].modename; i++)
 +              if (strcmp(modename, tbl[i].modename) == 0)
 +                      return tbl[i].mode;
 +
 +      return -1;
 +}
 +
  /*
   * Convert string input module parms.  Accept either the
   * number of the mode or its string name.  A bit complicated because
   */
  int bond_parse_parm(const char *buf, const struct bond_parm_tbl *tbl)
  {
 -      int modeint = -1, i, rv;
 -      char *p, modestr[BOND_MAX_MODENAME_LEN + 1] = { 0, };
 +      int modeint;
 +      char *p, modestr[BOND_MAX_MODENAME_LEN + 1];
  
        for (p = (char *)buf; *p; p++)
                if (!(isdigit(*p) || isspace(*p)))
                        break;
  
 -      if (*p)
 -              rv = sscanf(buf, "%20s", modestr);
 -      else
 -              rv = sscanf(buf, "%d", &modeint);
 -
 -      if (!rv)
 -              return -1;
 -
 -      for (i = 0; tbl[i].modename; i++) {
 -              if (modeint == tbl[i].mode)
 -                      return tbl[i].mode;
 -              if (strcmp(modestr, tbl[i].modename) == 0)
 -                      return tbl[i].mode;
 -      }
 +      if (*p && sscanf(buf, "%20s", modestr) != 0)
 +              return bond_parm_tbl_lookup_name(modestr, tbl);
 +      else if (sscanf(buf, "%d", &modeint) != 0)
 +              return bond_parm_tbl_lookup(modeint, tbl);
  
        return -1;
  }
@@@ -4078,8 -4106,8 +4079,8 @@@ static int bond_check_params(struct bon
                num_peer_notif = 1;
        }
  
 -      /* reset values for 802.3ad */
 -      if (bond_mode == BOND_MODE_8023AD) {
 +      /* reset values for 802.3ad/TLB/ALB */
 +      if (BOND_NO_USES_ARP(bond_mode)) {
                if (!miimon) {
                        pr_warning("Warning: miimon must be specified, otherwise bonding will not detect link failure, speed and duplex which are essential for 802.3ad operation\n");
                        pr_warning("Forcing miimon to 100msec\n");
                packets_per_slave = 1;
        }
  
 -      /* reset values for TLB/ALB */
 -      if ((bond_mode == BOND_MODE_TLB) ||
 -          (bond_mode == BOND_MODE_ALB)) {
 -              if (!miimon) {
 -                      pr_warning("Warning: miimon must be specified, otherwise bonding will not detect link failure and link speed which are essential for TLB/ALB load balancing\n");
 -                      pr_warning("Forcing miimon to 100msec\n");
 -                      miimon = BOND_DEFAULT_MIIMON;
 -              }
 -      }
 -
        if (bond_mode == BOND_MODE_ALB) {
                pr_notice("In ALB mode you might experience client disconnections upon reconnection of a link if the bonding module updelay parameter (%d msec) is incompatible with the forwarding delay time of the switch\n",
                          updelay);
                fail_over_mac_value = BOND_FOM_NONE;
        }
  
 +      if (lp_interval == 0) {
 +              pr_warning("Warning: ip_interval must be between 1 and %d, so it was reset to %d\n",
 +                         INT_MAX, BOND_ALB_DEFAULT_LP_INTERVAL);
 +              lp_interval = BOND_ALB_DEFAULT_LP_INTERVAL;
 +      }
 +
        /* fill params struct with the proper values */
        params->mode = bond_mode;
        params->xmit_policy = xmit_hashtype;
        params->all_slaves_active = all_slaves_active;
        params->resend_igmp = resend_igmp;
        params->min_links = min_links;
 -      params->lp_interval = BOND_ALB_DEFAULT_LP_INTERVAL;
 +      params->lp_interval = lp_interval;
        if (packets_per_slave > 1)
                params->packets_per_slave = reciprocal_value(packets_per_slave);
        else
index 0578fb90d8f3d46814ce1082a34979e24ddad6c9,ec6119089b82b8445cd573d961378c70b28632e5..9819a548e3b911fbd00cb7ca3ffc26286b06c828
@@@ -472,7 -472,7 +472,7 @@@ struct bnx2x_agg_info 
        u16                     vlan_tag;
        u16                     len_on_bd;
        u32                     rxhash;
 -      bool                    l4_rxhash;
 +      enum pkt_hash_types     rxhash_type;
        u16                     gro_size;
        u16                     full_page;
  };
@@@ -520,10 -520,12 +520,12 @@@ struct bnx2x_fastpath 
  #define BNX2X_FP_STATE_IDLE                 0
  #define BNX2X_FP_STATE_NAPI           (1 << 0)    /* NAPI owns this FP */
  #define BNX2X_FP_STATE_POLL           (1 << 1)    /* poll owns this FP */
- #define BNX2X_FP_STATE_NAPI_YIELD     (1 << 2)    /* NAPI yielded this FP */
- #define BNX2X_FP_STATE_POLL_YIELD     (1 << 3)    /* poll yielded this FP */
+ #define BNX2X_FP_STATE_DISABLED               (1 << 2)
+ #define BNX2X_FP_STATE_NAPI_YIELD     (1 << 3)    /* NAPI yielded this FP */
+ #define BNX2X_FP_STATE_POLL_YIELD     (1 << 4)    /* poll yielded this FP */
+ #define BNX2X_FP_OWNED        (BNX2X_FP_STATE_NAPI | BNX2X_FP_STATE_POLL)
  #define BNX2X_FP_YIELD        (BNX2X_FP_STATE_NAPI_YIELD | BNX2X_FP_STATE_POLL_YIELD)
- #define BNX2X_FP_LOCKED       (BNX2X_FP_STATE_NAPI | BNX2X_FP_STATE_POLL)
+ #define BNX2X_FP_LOCKED       (BNX2X_FP_OWNED | BNX2X_FP_STATE_DISABLED)
  #define BNX2X_FP_USER_PEND (BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_POLL_YIELD)
        /* protect state */
        spinlock_t lock;
@@@ -613,7 -615,7 +615,7 @@@ static inline bool bnx2x_fp_lock_napi(s
  {
        bool rc = true;
  
-       spin_lock(&fp->lock);
+       spin_lock_bh(&fp->lock);
        if (fp->state & BNX2X_FP_LOCKED) {
                WARN_ON(fp->state & BNX2X_FP_STATE_NAPI);
                fp->state |= BNX2X_FP_STATE_NAPI_YIELD;
                /* we don't care if someone yielded */
                fp->state = BNX2X_FP_STATE_NAPI;
        }
-       spin_unlock(&fp->lock);
+       spin_unlock_bh(&fp->lock);
        return rc;
  }
  
@@@ -631,14 -633,16 +633,16 @@@ static inline bool bnx2x_fp_unlock_napi
  {
        bool rc = false;
  
-       spin_lock(&fp->lock);
+       spin_lock_bh(&fp->lock);
        WARN_ON(fp->state &
                (BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_NAPI_YIELD));
  
        if (fp->state & BNX2X_FP_STATE_POLL_YIELD)
                rc = true;
-       fp->state = BNX2X_FP_STATE_IDLE;
-       spin_unlock(&fp->lock);
+       /* state ==> idle, unless currently disabled */
+       fp->state &= BNX2X_FP_STATE_DISABLED;
+       spin_unlock_bh(&fp->lock);
        return rc;
  }
  
@@@ -669,7 -673,9 +673,9 @@@ static inline bool bnx2x_fp_unlock_poll
  
        if (fp->state & BNX2X_FP_STATE_POLL_YIELD)
                rc = true;
-       fp->state = BNX2X_FP_STATE_IDLE;
+       /* state ==> idle, unless currently disabled */
+       fp->state &= BNX2X_FP_STATE_DISABLED;
        spin_unlock_bh(&fp->lock);
        return rc;
  }
  /* true if a socket is polling, even if it did not get the lock */
  static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp)
  {
-       WARN_ON(!(fp->state & BNX2X_FP_LOCKED));
+       WARN_ON(!(fp->state & BNX2X_FP_OWNED));
        return fp->state & BNX2X_FP_USER_PEND;
  }
+ /* false if fp is currently owned */
+ static inline bool bnx2x_fp_ll_disable(struct bnx2x_fastpath *fp)
+ {
+       int rc = true;
+       spin_lock_bh(&fp->lock);
+       if (fp->state & BNX2X_FP_OWNED)
+               rc = false;
+       fp->state |= BNX2X_FP_STATE_DISABLED;
+       spin_unlock_bh(&fp->lock);
+       return rc;
+ }
  #else
  static inline void bnx2x_fp_init_lock(struct bnx2x_fastpath *fp)
  {
@@@ -709,6 -729,10 +729,10 @@@ static inline bool bnx2x_fp_ll_polling(
  {
        return false;
  }
+ static inline bool bnx2x_fp_ll_disable(struct bnx2x_fastpath *fp)
+ {
+       return true;
+ }
  #endif /* CONFIG_NET_RX_BUSY_POLL */
  
  /* Use 2500 as a mini-jumbo MTU for FCoE */
@@@ -1549,7 -1573,6 +1573,7 @@@ struct bnx2x 
  #define INTERRUPTS_ENABLED_FLAG               (1 << 23)
  #define BC_SUPPORTS_RMMOD_CMD         (1 << 24)
  #define HAS_PHYS_PORT_ID              (1 << 25)
 +#define AER_ENABLED                   (1 << 26)
  
  #define BP_NOMCP(bp)                  ((bp)->flags & NO_MCP_FLAG)
  
@@@ -2057,6 -2080,7 +2081,6 @@@ int bnx2x_del_all_macs(struct bnx2x *bp
  void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p);
  void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
                    u8 vf_valid, int fw_sb_id, int igu_sb_id);
 -u32 bnx2x_get_pretend_reg(struct bnx2x *bp);
  int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port);
  int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port);
  int bnx2x_set_mult_gpio(struct bnx2x *bp, u8 pins, u32 mode);
@@@ -2439,8 -2463,7 +2463,8 @@@ void bnx2x_igu_clear_sb_gen(struct bnx2
  
  #define GOOD_ME_REG(me_reg) (((me_reg) & ME_REG_VF_VALID) && \
                            (!((me_reg) & ME_REG_VF_ERR)))
 -int bnx2x_nic_load_analyze_req(struct bnx2x *bp, u32 load_code);
 +int bnx2x_compare_fw_ver(struct bnx2x *bp, u32 load_code, bool print_err);
 +
  /* Congestion management fairness mode */
  #define CMNG_FNS_NONE                 0
  #define CMNG_FNS_MINMAX                       1
index cdc12ea0fc3ff686c43bb0e0db2cb4864065c179,bf811565ee245a0472cffc0ea5f70b30da053beb..9d7419e0390bd526d52e850f0d54c3863c920ffd
  #include "bnx2x_init.h"
  #include "bnx2x_sp.h"
  
 +static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp);
 +static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp);
 +static int bnx2x_alloc_fp_mem(struct bnx2x *bp);
 +static int bnx2x_poll(struct napi_struct *napi, int budget);
 +
 +static void bnx2x_add_all_napi_cnic(struct bnx2x *bp)
 +{
 +      int i;
 +
 +      /* Add NAPI objects */
 +      for_each_rx_queue_cnic(bp, i) {
 +              netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
 +                             bnx2x_poll, NAPI_POLL_WEIGHT);
 +              napi_hash_add(&bnx2x_fp(bp, i, napi));
 +      }
 +}
 +
 +static void bnx2x_add_all_napi(struct bnx2x *bp)
 +{
 +      int i;
 +
 +      /* Add NAPI objects */
 +      for_each_eth_queue(bp, i) {
 +              netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
 +                             bnx2x_poll, NAPI_POLL_WEIGHT);
 +              napi_hash_add(&bnx2x_fp(bp, i, napi));
 +      }
 +}
 +
 +static int bnx2x_calc_num_queues(struct bnx2x *bp)
 +{
 +      return  bnx2x_num_queues ?
 +               min_t(int, bnx2x_num_queues, BNX2X_MAX_QUEUES(bp)) :
 +               min_t(int, netif_get_num_default_rss_queues(),
 +                     BNX2X_MAX_QUEUES(bp));
 +}
 +
  /**
   * bnx2x_move_fp - move content of the fastpath structure.
   *
@@@ -182,7 -145,7 +182,7 @@@ static void bnx2x_shrink_eth_fp(struct 
        }
  }
  
 -int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
 +int bnx2x_load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
  
  /* free skb in the packet ring at pos idx
   * return idx of last bd freed
@@@ -197,6 -160,7 +197,7 @@@ static u16 bnx2x_free_tx_pkt(struct bnx
        struct sk_buff *skb = tx_buf->skb;
        u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
        int nbd;
+       u16 split_bd_len = 0;
  
        /* prefetch skb end pointer to speedup dev_kfree_skb() */
        prefetch(&skb->end);
        DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d  buff @(%p)->skb %p\n",
           txdata->txq_index, idx, tx_buf, skb);
  
-       /* unmap first bd */
        tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
-       dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
-                        BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
  
        nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
  #ifdef BNX2X_STOP_ON_ERROR
        --nbd;
        bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
  
-       /* ...and the TSO split header bd since they have no mapping */
+       /* TSO headers+data bds share a common mapping. See bnx2x_tx_split() */
        if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
+               tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
+               split_bd_len = BD_UNMAP_LEN(tx_data_bd);
                --nbd;
                bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
        }
  
+       /* unmap first bd */
+       dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
+                        BD_UNMAP_LEN(tx_start_bd) + split_bd_len,
+                        DMA_TO_DEVICE);
        /* now free frags */
        while (nbd > 0) {
  
@@@ -391,7 -359,7 +396,7 @@@ static inline void bnx2x_update_sge_pro
   */
  static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
                            const struct eth_fast_path_rx_cqe *cqe,
 -                          bool *l4_rxhash)
 +                          enum pkt_hash_types *rxhash_type)
  {
        /* Get Toeplitz hash from CQE */
        if ((bp->dev->features & NETIF_F_RXHASH) &&
                enum eth_rss_hash_type htype;
  
                htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE;
 -              *l4_rxhash = (htype == TCP_IPV4_HASH_TYPE) ||
 -                           (htype == TCP_IPV6_HASH_TYPE);
 +              *rxhash_type = ((htype == TCP_IPV4_HASH_TYPE) ||
 +                              (htype == TCP_IPV6_HASH_TYPE)) ?
 +                             PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3;
 +
                return le32_to_cpu(cqe->rss_hash_result);
        }
 -      *l4_rxhash = false;
 +      *rxhash_type = PKT_HASH_TYPE_NONE;
        return 0;
  }
  
@@@ -459,7 -425,7 +464,7 @@@ static void bnx2x_tpa_start(struct bnx2
        tpa_info->tpa_state = BNX2X_TPA_START;
        tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
        tpa_info->placement_offset = cqe->placement_offset;
 -      tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->l4_rxhash);
 +      tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->rxhash_type);
        if (fp->mode == TPA_MODE_GRO) {
                u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
                tpa_info->full_page = SGE_PAGES / gro_size * gro_size;
@@@ -767,7 -733,8 +772,7 @@@ static void bnx2x_tpa_stop(struct bnx2
  
                skb_reserve(skb, pad + NET_SKB_PAD);
                skb_put(skb, len);
 -              skb->rxhash = tpa_info->rxhash;
 -              skb->l4_rxhash = tpa_info->l4_rxhash;
 +              skb_set_hash(skb, tpa_info->rxhash, tpa_info->rxhash_type);
  
                skb->protocol = eth_type_trans(skb, bp->dev);
                skb->ip_summed = CHECKSUM_UNNECESSARY;
@@@ -850,7 -817,7 +855,7 @@@ void bnx2x_csum_validate(struct sk_buf
                skb->ip_summed = CHECKSUM_UNNECESSARY;
  }
  
 -int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
 +static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
  {
        struct bnx2x *bp = fp->bp;
        u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
                enum eth_rx_cqe_type cqe_fp_type;
                u16 len, pad, queue;
                u8 *data;
 -              bool l4_rxhash;
 +              u32 rxhash;
 +              enum pkt_hash_types rxhash_type;
  
  #ifdef BNX2X_STOP_ON_ERROR
                if (unlikely(bp->panic))
@@@ -1026,8 -992,8 +1031,8 @@@ reuse_rx
                skb->protocol = eth_type_trans(skb, bp->dev);
  
                /* Set Toeplitz hash for a none-LRO skb */
 -              skb->rxhash = bnx2x_get_rxhash(bp, cqe_fp, &l4_rxhash);
 -              skb->l4_rxhash = l4_rxhash;
 +              rxhash = bnx2x_get_rxhash(bp, cqe_fp, &rxhash_type);
 +              skb_set_hash(skb, rxhash, rxhash_type);
  
                skb_checksum_none_assert(skb);
  
@@@ -1520,7 -1486,7 +1525,7 @@@ static void bnx2x_free_rx_skbs(struct b
        }
  }
  
 -void bnx2x_free_skbs_cnic(struct bnx2x *bp)
 +static void bnx2x_free_skbs_cnic(struct bnx2x *bp)
  {
        bnx2x_free_tx_skbs_cnic(bp);
        bnx2x_free_rx_skbs_cnic(bp);
@@@ -1829,26 -1795,22 +1834,22 @@@ static void bnx2x_napi_disable_cnic(str
  {
        int i;
  
-       local_bh_disable();
        for_each_rx_queue_cnic(bp, i) {
                napi_disable(&bnx2x_fp(bp, i, napi));
-               while (!bnx2x_fp_lock_napi(&bp->fp[i]))
-                       mdelay(1);
+               while (!bnx2x_fp_ll_disable(&bp->fp[i]))
+                       usleep_range(1000, 2000);
        }
-       local_bh_enable();
  }
  
  static void bnx2x_napi_disable(struct bnx2x *bp)
  {
        int i;
  
-       local_bh_disable();
        for_each_eth_queue(bp, i) {
                napi_disable(&bnx2x_fp(bp, i, napi));
-               while (!bnx2x_fp_lock_napi(&bp->fp[i]))
-                       mdelay(1);
+               while (!bnx2x_fp_ll_disable(&bp->fp[i]))
+                       usleep_range(1000, 2000);
        }
-       local_bh_enable();
  }
  
  void bnx2x_netif_start(struct bnx2x *bp)
@@@ -1871,7 -1833,8 +1872,8 @@@ void bnx2x_netif_stop(struct bnx2x *bp
                bnx2x_napi_disable_cnic(bp);
  }
  
- u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
+ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
+                      void *accel_priv)
  {
        struct bnx2x *bp = netdev_priv(dev);
  
@@@ -2302,7 -2265,7 +2304,7 @@@ static int bnx2x_nic_load_request(struc
   * virtualized environments a pf from another VM may have already
   * initialized the device including loading FW
   */
 -int bnx2x_nic_load_analyze_req(struct bnx2x *bp, u32 load_code)
 +int bnx2x_compare_fw_ver(struct bnx2x *bp, u32 load_code, bool print_err)
  {
        /* is another pf loaded on this engine? */
        if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
  
                /* abort nic load if version mismatch */
                if (my_fw != loaded_fw) {
 -                      BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. Aborting\n",
 -                                loaded_fw, my_fw);
 +                      if (print_err)
 +                              BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. Aborting\n",
 +                                        loaded_fw, my_fw);
 +                      else
 +                              BNX2X_DEV_INFO("bnx2x with FW %x was already loaded which mismatches my %x FW, possibly due to MF UNDI\n",
 +                                             loaded_fw, my_fw);
                        return -EBUSY;
                }
        }
@@@ -2339,16 -2298,16 +2341,16 @@@ static int bnx2x_nic_load_no_mcp(struc
        int path = BP_PATH(bp);
  
        DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d]      %d, %d, %d\n",
 -         path, load_count[path][0], load_count[path][1],
 -         load_count[path][2]);
 -      load_count[path][0]++;
 -      load_count[path][1 + port]++;
 +         path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
 +         bnx2x_load_count[path][2]);
 +      bnx2x_load_count[path][0]++;
 +      bnx2x_load_count[path][1 + port]++;
        DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d]  %d, %d, %d\n",
 -         path, load_count[path][0], load_count[path][1],
 -         load_count[path][2]);
 -      if (load_count[path][0] == 1)
 +         path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
 +         bnx2x_load_count[path][2]);
 +      if (bnx2x_load_count[path][0] == 1)
                return FW_MSG_CODE_DRV_LOAD_COMMON;
 -      else if (load_count[path][1 + port] == 1)
 +      else if (bnx2x_load_count[path][1 + port] == 1)
                return FW_MSG_CODE_DRV_LOAD_PORT;
        else
                return FW_MSG_CODE_DRV_LOAD_FUNCTION;
@@@ -2641,7 -2600,7 +2643,7 @@@ int bnx2x_nic_load(struct bnx2x *bp, in
                                LOAD_ERROR_EXIT(bp, load_error1);
  
                        /* what did mcp say? */
 -                      rc = bnx2x_nic_load_analyze_req(bp, load_code);
 +                      rc = bnx2x_compare_fw_ver(bp, load_code, true);
                        if (rc) {
                                bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
                                LOAD_ERROR_EXIT(bp, load_error2);
@@@ -3106,7 -3065,7 +3108,7 @@@ int bnx2x_set_power_state(struct bnx2x 
  /*
   * net_device service functions
   */
 -int bnx2x_poll(struct napi_struct *napi, int budget)
 +static int bnx2x_poll(struct napi_struct *napi, int budget)
  {
        int work_done = 0;
        u8 cos;
@@@ -4233,7 -4192,7 +4235,7 @@@ static void bnx2x_free_fp_mem_at(struc
        /* end of fastpath */
  }
  
 -void bnx2x_free_fp_mem_cnic(struct bnx2x *bp)
 +static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp)
  {
        int i;
        for_each_cnic_queue(bp, i)
@@@ -4447,7 -4406,7 +4449,7 @@@ alloc_mem_err
        return 0;
  }
  
 -int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp)
 +static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp)
  {
        if (!NO_FCOE(bp))
                /* FCoE */
        return 0;
  }
  
 -int bnx2x_alloc_fp_mem(struct bnx2x *bp)
 +static int bnx2x_alloc_fp_mem(struct bnx2x *bp)
  {
        int i;
  
index 7ebbddc7290c839592db6df45ea7a303f14580c2,41f3ca5ad972b396498cbe5b7d7ad72ea9bdb1a7..17d1689aec6b83cd002bed8a62ad7b6d06cba1b6
  #include "bnx2x_sriov.h"
  
  /* This is used as a replacement for an MCP if it's not present */
 -extern int load_count[2][3]; /* per-path: 0-common, 1-port0, 2-port1 */
 -
 -extern int num_queues;
 -extern int int_mode;
 +extern int bnx2x_load_count[2][3]; /* per-path: 0-common, 1-port0, 2-port1 */
 +extern int bnx2x_num_queues;
  
  /************************ Macros ********************************/
  #define BNX2X_PCI_FREE(x, y, size) \
@@@ -415,8 -417,35 +415,8 @@@ int bnx2x_set_eth_mac(struct bnx2x *bp
   * If bp->state is OPEN, should be called with
   * netif_addr_lock_bh()
   */
 -void bnx2x_set_rx_mode(struct net_device *dev);
  void bnx2x_set_rx_mode_inner(struct bnx2x *bp);
  
 -/**
 - * bnx2x_set_storm_rx_mode - configure MAC filtering rules in a FW.
 - *
 - * @bp:               driver handle
 - *
 - * If bp->state is OPEN, should be called with
 - * netif_addr_lock_bh().
 - */
 -int bnx2x_set_storm_rx_mode(struct bnx2x *bp);
 -
 -/**
 - * bnx2x_set_q_rx_mode - configures rx_mode for a single queue.
 - *
 - * @bp:                       driver handle
 - * @cl_id:            client id
 - * @rx_mode_flags:    rx mode configuration
 - * @rx_accept_flags:  rx accept configuration
 - * @tx_accept_flags:  tx accept configuration (tx switch)
 - * @ramrod_flags:     ramrod configuration
 - */
 -int bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id,
 -                      unsigned long rx_mode_flags,
 -                      unsigned long rx_accept_flags,
 -                      unsigned long tx_accept_flags,
 -                      unsigned long ramrod_flags);
 -
  /* Parity errors related */
  void bnx2x_set_pf_load(struct bnx2x *bp);
  bool bnx2x_clear_pf_load(struct bnx2x *bp);
@@@ -495,7 -524,8 +495,8 @@@ int bnx2x_set_vf_mac(struct net_device 
  int bnx2x_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos);
  
  /* select_queue callback */
- u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb);
+ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
+                      void *accel_priv);
  
  static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
                                        struct bnx2x_fastpath *fp,
@@@ -535,6 -565,9 +536,6 @@@ int bnx2x_reload_if_running(struct net_
  
  int bnx2x_change_mac_addr(struct net_device *dev, void *p);
  
 -/* NAPI poll Rx part */
 -int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget);
 -
  /* NAPI poll Tx part */
  int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata);
  
@@@ -545,9 -578,13 +546,9 @@@ int bnx2x_resume(struct pci_dev *pdev)
  /* Release IRQ vectors */
  void bnx2x_free_irq(struct bnx2x *bp);
  
 -void bnx2x_free_fp_mem_cnic(struct bnx2x *bp);
  void bnx2x_free_fp_mem(struct bnx2x *bp);
 -int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp);
 -int bnx2x_alloc_fp_mem(struct bnx2x *bp);
  void bnx2x_init_rx_rings(struct bnx2x *bp);
  void bnx2x_init_rx_rings_cnic(struct bnx2x *bp);
 -void bnx2x_free_skbs_cnic(struct bnx2x *bp);
  void bnx2x_free_skbs(struct bnx2x *bp);
  void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw);
  void bnx2x_netif_start(struct bnx2x *bp);
@@@ -570,6 -607,15 +571,6 @@@ int bnx2x_enable_msix(struct bnx2x *bp)
   */
  int bnx2x_enable_msi(struct bnx2x *bp);
  
 -/**
 - * bnx2x_poll - NAPI callback
 - *
 - * @napi:     napi structure
 - * @budget:
 - *
 - */
 -int bnx2x_poll(struct napi_struct *napi, int budget);
 -
  /**
   * bnx2x_low_latency_recv - LL callback
   *
@@@ -816,6 -862,30 +817,6 @@@ static inline void bnx2x_free_rx_sge(st
        sge->addr_lo = 0;
  }
  
 -static inline void bnx2x_add_all_napi_cnic(struct bnx2x *bp)
 -{
 -      int i;
 -
 -      /* Add NAPI objects */
 -      for_each_rx_queue_cnic(bp, i) {
 -              netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
 -                             bnx2x_poll, NAPI_POLL_WEIGHT);
 -              napi_hash_add(&bnx2x_fp(bp, i, napi));
 -      }
 -}
 -
 -static inline void bnx2x_add_all_napi(struct bnx2x *bp)
 -{
 -      int i;
 -
 -      /* Add NAPI objects */
 -      for_each_eth_queue(bp, i) {
 -              netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
 -                             bnx2x_poll, NAPI_POLL_WEIGHT);
 -              napi_hash_add(&bnx2x_fp(bp, i, napi));
 -      }
 -}
 -
  static inline void bnx2x_del_all_napi_cnic(struct bnx2x *bp)
  {
        int i;
@@@ -849,6 -919,14 +850,6 @@@ static inline void bnx2x_disable_msi(st
        }
  }
  
 -static inline int bnx2x_calc_num_queues(struct bnx2x *bp)
 -{
 -      return  num_queues ?
 -               min_t(int, num_queues, BNX2X_MAX_QUEUES(bp)) :
 -               min_t(int, netif_get_num_default_rss_queues(),
 -                     BNX2X_MAX_QUEUES(bp));
 -}
 -
  static inline void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
  {
        int i, j;
@@@ -1095,6 -1173,8 +1096,6 @@@ static inline u8 bnx2x_fp_qzone_id(stru
                return fp->cl_id;
  }
  
 -u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp);
 -
  static inline void bnx2x_init_txdata(struct bnx2x *bp,
                                     struct bnx2x_fp_txdata *txdata, u32 cid,
                                     int txq_index, __le16 *tx_cons_sb,
@@@ -1127,6 -1207,47 +1128,6 @@@ static inline u8 bnx2x_cnic_igu_sb_id(s
        return bp->igu_base_sb;
  }
  
 -static inline void bnx2x_init_fcoe_fp(struct bnx2x *bp)
 -{
 -      struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
 -      unsigned long q_type = 0;
 -
 -      bnx2x_fcoe(bp, rx_queue) = BNX2X_NUM_ETH_QUEUES(bp);
 -      bnx2x_fcoe(bp, cl_id) = bnx2x_cnic_eth_cl_id(bp,
 -                                                   BNX2X_FCOE_ETH_CL_ID_IDX);
 -      bnx2x_fcoe(bp, cid) = BNX2X_FCOE_ETH_CID(bp);
 -      bnx2x_fcoe(bp, fw_sb_id) = DEF_SB_ID;
 -      bnx2x_fcoe(bp, igu_sb_id) = bp->igu_dsb_id;
 -      bnx2x_fcoe(bp, rx_cons_sb) = BNX2X_FCOE_L2_RX_INDEX;
 -      bnx2x_init_txdata(bp, bnx2x_fcoe(bp, txdata_ptr[0]),
 -                        fp->cid, FCOE_TXQ_IDX(bp), BNX2X_FCOE_L2_TX_INDEX,
 -                        fp);
 -
 -      DP(NETIF_MSG_IFUP, "created fcoe tx data (fp index %d)\n", fp->index);
 -
 -      /* qZone id equals to FW (per path) client id */
 -      bnx2x_fcoe(bp, cl_qzone_id) = bnx2x_fp_qzone_id(fp);
 -      /* init shortcut */
 -      bnx2x_fcoe(bp, ustorm_rx_prods_offset) =
 -              bnx2x_rx_ustorm_prods_offset(fp);
 -
 -      /* Configure Queue State object */
 -      __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
 -      __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
 -
 -      /* No multi-CoS for FCoE L2 client */
 -      BUG_ON(fp->max_cos != 1);
 -
 -      bnx2x_init_queue_obj(bp, &bnx2x_sp_obj(bp, fp).q_obj, fp->cl_id,
 -                           &fp->cid, 1, BP_FUNC(bp), bnx2x_sp(bp, q_rdata),
 -                           bnx2x_sp_mapping(bp, q_rdata), q_type);
 -
 -      DP(NETIF_MSG_IFUP,
 -         "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d fw_sb %d igu_sb %d\n",
 -         fp->index, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id,
 -         fp->igu_sb_id);
 -}
 -
  static inline int bnx2x_clean_tx_queue(struct bnx2x *bp,
                                       struct bnx2x_fp_txdata *txdata)
  {
index 974a007c427727d0799fc74eba161d7757b333d0,ec94a20d709952d1f678f69ac7778205f75e8ffe..8f9266c64c7589902c62443f79cc65a00895785f
@@@ -9,7 -9,8 +9,7 @@@
   *   GNU General Public License for more details.
   *
   *   You should have received a copy of the GNU General Public License
 - *   along with this program; if not, write to the Free Software
 - *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
 + *   along with this program; if not, see <http://www.gnu.org/licenses/>.
   *
   *   Copyright (C) 2011 John Crispin <blogic@openwrt.org>
   */
@@@ -618,7 -619,8 +618,8 @@@ ltq_etop_set_multicast_list(struct net_
  }
  
  static u16
- ltq_etop_select_queue(struct net_device *dev, struct sk_buff *skb)
+ ltq_etop_select_queue(struct net_device *dev, struct sk_buff *skb,
+                     void *accel_priv)
  {
        /* we are currently only using the first queue */
        return 0;
index 160e86d216074193865c5fdcacf6e3389b92f9e0,a7fcd593b2dbb397bdf3d2a810ff42f71d0806b6..8e8a7eb43a2ce861249d678e1f1053be9b7b0ac5
@@@ -39,7 -39,6 +39,7 @@@
  #include <linux/if_vlan.h>
  #include <linux/vmalloc.h>
  #include <linux/tcp.h>
 +#include <linux/ip.h>
  #include <linux/moduleparam.h>
  
  #include "mlx4_en.h"
@@@ -56,7 -55,7 +56,7 @@@ MODULE_PARM_DESC(inline_thold, "thresho
  
  int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
                           struct mlx4_en_tx_ring **pring, int qpn, u32 size,
 -                         u16 stride, int node)
 +                         u16 stride, int node, int queue_index)
  {
        struct mlx4_en_dev *mdev = priv->mdev;
        struct mlx4_en_tx_ring *ring;
                ring->bf_enabled = true;
  
        ring->hwtstamp_tx_type = priv->hwtstamp_config.tx_type;
 +      ring->queue_index = queue_index;
 +
 +      if (queue_index < priv->num_tx_rings_p_up && cpu_online(queue_index))
 +              cpumask_set_cpu(queue_index, &ring->affinity_mask);
  
        *pring = ring;
        return 0;
@@@ -211,9 -206,6 +211,9 @@@ int mlx4_en_activate_tx_ring(struct mlx
  
        err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, &ring->context,
                               &ring->qp, &ring->qp_state);
 +      if (!user_prio && cpu_online(ring->queue_index))
 +              netif_set_xps_queue(priv->dev, &ring->affinity_mask,
 +                                  ring->queue_index);
  
        return err;
  }
@@@ -325,7 -317,7 +325,7 @@@ static u32 mlx4_en_free_tx_desc(struct 
                        }
                }
        }
 -      dev_kfree_skb_any(skb);
 +      dev_kfree_skb(skb);
        return tx_info->nr_txbb;
  }
  
@@@ -362,9 -354,7 +362,9 @@@ int mlx4_en_free_tx_buf(struct net_devi
        return cnt;
  }
  
 -static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
 +static int mlx4_en_process_tx_cq(struct net_device *dev,
 +                               struct mlx4_en_cq *cq,
 +                               int budget)
  {
        struct mlx4_en_priv *priv = netdev_priv(dev);
        struct mlx4_cq *mcq = &cq->mcq;
        u32 bytes = 0;
        int factor = priv->cqe_factor;
        u64 timestamp = 0;
 +      int done = 0;
  
        if (!priv->port_up)
 -              return;
 +              return 0;
  
        index = cons_index & size_mask;
        cqe = &buf[(index << factor) + factor];
  
        /* Process all completed CQEs */
        while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK,
 -                      cons_index & size)) {
 +                      cons_index & size) && (done < budget)) {
                /*
                 * make sure we read the CQE after we read the
                 * ownership bit
                        txbbs_stamp = txbbs_skipped;
                        packets++;
                        bytes += ring->tx_info[ring_index].nr_bytes;
 -              } while (ring_index != new_index);
 +              } while ((++done < budget) && (ring_index != new_index));
  
                ++cons_index;
                index = cons_index & size_mask;
                netif_tx_wake_queue(ring->tx_queue);
                priv->port_stats.wake_queue++;
        }
 +      return done;
  }
  
  void mlx4_en_tx_irq(struct mlx4_cq *mcq)
        struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq);
        struct mlx4_en_priv *priv = netdev_priv(cq->dev);
  
 -      mlx4_en_process_tx_cq(cq->dev, cq);
 -      mlx4_en_arm_cq(priv, cq);
 +      if (priv->port_up)
 +              napi_schedule(&cq->napi);
 +      else
 +              mlx4_en_arm_cq(priv, cq);
  }
  
 +/* TX CQ polling - called by NAPI */
 +int mlx4_en_poll_tx_cq(struct napi_struct *napi, int budget)
 +{
 +      struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi);
 +      struct net_device *dev = cq->dev;
 +      struct mlx4_en_priv *priv = netdev_priv(dev);
 +      int done;
 +
 +      done = mlx4_en_process_tx_cq(dev, cq, budget);
 +
 +      /* If we used up all the quota - we're probably not done yet... */
 +      if (done < budget) {
 +              /* Done for now */
 +              napi_complete(napi);
 +              mlx4_en_arm_cq(priv, cq);
 +              return done;
 +      }
 +      return budget;
 +}
  
  static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv,
                                                      struct mlx4_en_tx_ring *ring,
@@@ -561,10 -528,7 +561,10 @@@ static int get_real_size(struct sk_buf
        int real_size;
  
        if (skb_is_gso(skb)) {
 -              *lso_header_size = skb_transport_offset(skb) + tcp_hdrlen(skb);
 +              if (skb->encapsulation)
 +                      *lso_header_size = (skb_inner_transport_header(skb) - skb->data) + inner_tcp_hdrlen(skb);
 +              else
 +                      *lso_header_size = skb_transport_offset(skb) + tcp_hdrlen(skb);
                real_size = CTRL_SIZE + skb_shinfo(skb)->nr_frags * DS_SIZE +
                        ALIGN(*lso_header_size + 4, DS_SIZE);
                if (unlikely(*lso_header_size != skb_headlen(skb))) {
@@@ -628,7 -592,8 +628,8 @@@ static void build_inline_wqe(struct mlx
        }
  }
  
- u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb)
+ u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb,
+                        void *accel_priv)
  {
        struct mlx4_en_priv *priv = netdev_priv(dev);
        u16 rings_p_up = priv->num_tx_rings_p_up;
@@@ -863,14 -828,6 +864,14 @@@ netdev_tx_t mlx4_en_xmit(struct sk_buf
                tx_info->inl = 1;
        }
  
 +      if (skb->encapsulation) {
 +              struct iphdr *ipv4 = (struct iphdr *)skb_inner_network_header(skb);
 +              if (ipv4->protocol == IPPROTO_TCP || ipv4->protocol == IPPROTO_UDP)
 +                      op_own |= cpu_to_be32(MLX4_WQE_CTRL_IIP | MLX4_WQE_CTRL_ILP);
 +              else
 +                      op_own |= cpu_to_be32(MLX4_WQE_CTRL_IIP);
 +      }
 +
        ring->prod += nr_txbb;
  
        /* If we used a bounce buffer then copy descriptor back into place */
index fe7bdfebf353e745e3c447b820d5c72c413c06e4,d5758adceaa2f264b74d83e99f8091b7e1ead948..3af04c3f42ea96ddfa013ee874791c2db9dd6d94
@@@ -45,7 -45,6 +45,7 @@@
  #include <linux/dcbnl.h>
  #endif
  #include <linux/cpu_rmap.h>
 +#include <linux/ptp_clock_kernel.h>
  
  #include <linux/mlx4/device.h>
  #include <linux/mlx4/qp.h>
@@@ -256,8 -255,6 +256,8 @@@ struct mlx4_en_tx_ring 
        u16 poll_cnt;
        struct mlx4_en_tx_info *tx_info;
        u8 *bounce_buf;
 +      u8 queue_index;
 +      cpumask_t affinity_mask;
        u32 last_nr_txbb;
        struct mlx4_qp qp;
        struct mlx4_qp_context context;
@@@ -376,14 -373,10 +376,14 @@@ struct mlx4_en_dev 
        u32                     priv_pdn;
        spinlock_t              uar_lock;
        u8                      mac_removed[MLX4_MAX_PORTS + 1];
 +      rwlock_t                clock_lock;
 +      u32                     nominal_c_mult;
        struct cyclecounter     cycles;
        struct timecounter      clock;
        unsigned long           last_overflow_check;
        unsigned long           overflow_period;
 +      struct ptp_clock        *ptp_clock;
 +      struct ptp_clock_info   ptp_clock_info;
  };
  
  
@@@ -441,7 -434,6 +441,7 @@@ struct mlx4_en_mc_list 
        enum mlx4_en_mclist_act action;
        u8                      addr[ETH_ALEN];
        u64                     reg_id;
 +      u64                     tunnel_reg_id;
  };
  
  struct mlx4_en_frag_info {
@@@ -573,7 -565,7 +573,7 @@@ struct mlx4_en_priv 
        struct list_head filters;
        struct hlist_head filter_hash[1 << MLX4_EN_FILTER_HASH_SHIFT];
  #endif
 -
 +      u64 tunnel_reg_id;
  };
  
  enum mlx4_en_wol {
@@@ -661,7 -653,7 +661,7 @@@ static inline bool mlx4_en_cq_unlock_po
  }
  
  /* true if a socket is polling, even if it did not get the lock */
 -static inline bool mlx4_en_cq_ll_polling(struct mlx4_en_cq *cq)
 +static inline bool mlx4_en_cq_busy_polling(struct mlx4_en_cq *cq)
  {
        WARN_ON(!(cq->state & MLX4_CQ_LOCKED));
        return cq->state & CQ_USER_PEND;
@@@ -691,7 -683,7 +691,7 @@@ static inline bool mlx4_en_cq_unlock_po
        return false;
  }
  
 -static inline bool mlx4_en_cq_ll_polling(struct mlx4_en_cq *cq)
 +static inline bool mlx4_en_cq_busy_polling(struct mlx4_en_cq *cq)
  {
        return false;
  }
@@@ -722,13 -714,13 +722,14 @@@ int mlx4_en_set_cq_moder(struct mlx4_en
  int mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
  
  void mlx4_en_tx_irq(struct mlx4_cq *mcq);
- u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb);
+ u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb,
+                        void *accel_priv);
  netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev);
  
  int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
                           struct mlx4_en_tx_ring **pring,
 -                         int qpn, u32 size, u16 stride, int node);
 +                         int qpn, u32 size, u16 stride,
 +                         int node, int queue_index);
  void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv,
                             struct mlx4_en_tx_ring **pring);
  int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
@@@ -750,7 -742,6 +751,7 @@@ int mlx4_en_process_rx_cq(struct net_de
                          struct mlx4_en_cq *cq,
                          int budget);
  int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget);
 +int mlx4_en_poll_tx_cq(struct napi_struct *napi, int budget);
  void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
                int is_tx, int rss, int qpn, int cqn, int user_prio,
                struct mlx4_qp_context *context);
@@@ -796,7 -787,6 +797,7 @@@ void mlx4_en_fill_hwtstamps(struct mlx4
                            struct skb_shared_hwtstamps *hwts,
                            u64 timestamp);
  void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev);
 +void mlx4_en_remove_timestamp(struct mlx4_en_dev *mdev);
  int mlx4_en_timestamp_config(struct net_device *dev,
                             int tx_type,
                             int rx_filter);
index 25e1492ad5287a034c90e0bc17768ad4163452d7,f2a7c7166e2408a747dd03ef0fc456ba6121486c..1accd95312240efe61804bd2fb51a0b9a1c29664
@@@ -38,8 -38,8 +38,8 @@@
  
  #define _QLCNIC_LINUX_MAJOR 5
  #define _QLCNIC_LINUX_MINOR 3
 -#define _QLCNIC_LINUX_SUBVERSION 52
 -#define QLCNIC_LINUX_VERSIONID  "5.3.52"
 +#define _QLCNIC_LINUX_SUBVERSION 54
 +#define QLCNIC_LINUX_VERSIONID  "5.3.54"
  #define QLCNIC_DRV_IDC_VER  0x01
  #define QLCNIC_DRIVER_VERSION  ((_QLCNIC_LINUX_MAJOR << 16) |\
                 (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
@@@ -115,10 -115,6 +115,10 @@@ enum qlcnic_queue_type 
  #define QLCNIC_VNIC_MODE      0xFF
  #define QLCNIC_DEFAULT_MODE   0x0
  
 +/* Virtual NIC function count */
 +#define QLC_DEFAULT_VNIC_COUNT        8
 +#define QLC_84XX_VNIC_COUNT   16
 +
  /*
   * Following are the states of the Phantom. Phantom will set them and
   * Host will read to check if the fields are correct.
@@@ -378,7 -374,7 +378,7 @@@ struct qlcnic_rx_buffer 
  
  #define QLCNIC_INTR_DEFAULT                   0x04
  #define QLCNIC_CONFIG_INTR_COALESCE           3
 -#define QLCNIC_DEV_INFO_SIZE                  1
 +#define QLCNIC_DEV_INFO_SIZE                  2
  
  struct qlcnic_nic_intr_coalesce {
        u8      type;
@@@ -466,10 -462,8 +466,10 @@@ struct qlcnic_hardware_context 
        u16 max_rx_ques;
        u16 max_mtu;
        u32 msg_enable;
 -      u16 act_pci_func;
 +      u16 total_nic_func;
        u16 max_pci_func;
 +      u32 max_vnic_func;
 +      u32 total_pci_func;
  
        u32 capabilities;
        u32 extra_capability[3];
@@@ -797,10 -791,9 +797,10 @@@ struct qlcnic_cardrsp_tx_ctx 
  #define QLCNIC_MAC_VLAN_ADD   3
  #define QLCNIC_MAC_VLAN_DEL   4
  
 -struct qlcnic_mac_list_s {
 +struct qlcnic_mac_vlan_list {
        struct list_head list;
        uint8_t mac_addr[ETH_ALEN+2];
 +      u16 vlan_id;
  };
  
  /* MAC Learn */
  #define QLCNIC_FW_CAP2_HW_LRO_IPV6            BIT_3
  #define QLCNIC_FW_CAPABILITY_SET_DRV_VER      BIT_5
  #define QLCNIC_FW_CAPABILITY_2_BEACON         BIT_7
 -#define QLCNIC_FW_CAPABILITY_2_PER_PORT_ESWITCH_CFG   BIT_8
 +#define QLCNIC_FW_CAPABILITY_2_PER_PORT_ESWITCH_CFG   BIT_9
  
  /* module types */
  #define LINKEVENT_MODULE_NOT_PRESENT                  1
@@@ -970,9 -963,6 +970,9 @@@ struct qlcnic_ipaddr 
  #define QLCNIC_BEACON_EANBLE          0xC
  #define QLCNIC_BEACON_DISABLE         0xD
  
 +#define QLCNIC_BEACON_ON              2
 +#define QLCNIC_BEACON_OFF             0
 +
  #define QLCNIC_MSIX_TBL_SPACE         8192
  #define QLCNIC_PCI_REG_MSIX_TBL       0x44
  #define QLCNIC_MSIX_TBL_PGSIZE                4096
@@@ -1082,7 -1072,6 +1082,7 @@@ struct qlcnic_adapter 
        u64 dev_rst_time;
        bool drv_mac_learn;
        bool fdb_mac_learn;
 +      u8 rx_mac_learn;
        unsigned long vlans[BITS_TO_LONGS(VLAN_N_VID)];
        u8 flash_mfg_id;
        struct qlcnic_npar_info *npars;
@@@ -1271,7 -1260,7 +1271,7 @@@ struct qlcnic_pci_func_cfg 
        u16     port_num;
        u8      pci_func;
        u8      func_state;
 -      u8      def_mac_addr[6];
 +      u8      def_mac_addr[ETH_ALEN];
  };
  
  struct qlcnic_npar_func_cfg {
@@@ -1644,15 -1633,14 +1644,15 @@@ int qlcnic_set_default_offload_settings
  int qlcnic_reset_npar_config(struct qlcnic_adapter *);
  int qlcnic_set_eswitch_port_config(struct qlcnic_adapter *);
  void qlcnic_add_lb_filter(struct qlcnic_adapter *, struct sk_buff *, int, u16);
 -int qlcnic_get_beacon_state(struct qlcnic_adapter *, u8 *);
  int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter);
  int qlcnic_read_mac_addr(struct qlcnic_adapter *);
  int qlcnic_setup_netdev(struct qlcnic_adapter *, struct net_device *, int);
  void qlcnic_set_netdev_features(struct qlcnic_adapter *,
                                struct qlcnic_esw_func_cfg *);
  void qlcnic_sriov_vf_schedule_multi(struct net_device *);
 -void qlcnic_vf_add_mc_list(struct net_device *, u16);
 +int qlcnic_is_valid_nic_func(struct qlcnic_adapter *, u8);
 +int qlcnic_get_pci_func_type(struct qlcnic_adapter *, u16, u16 *, u16 *,
 +                           u16 *);
  
  /*
   * QLOGIC Board information
@@@ -1723,6 -1711,7 +1723,7 @@@ int qlcnic_83xx_init_mailbox_work(struc
  void qlcnic_83xx_detach_mailbox_work(struct qlcnic_adapter *);
  void qlcnic_83xx_reinit_mbx_work(struct qlcnic_mailbox *mbx);
  void qlcnic_83xx_free_mailbox(struct qlcnic_mailbox *mbx);
+ void qlcnic_update_stats(struct qlcnic_adapter *);
  
  /* Adapter hardware abstraction */
  struct qlcnic_hardware_ops {
                                               pci_channel_state_t);
        pci_ers_result_t (*io_slot_reset) (struct pci_dev *);
        void (*io_resume) (struct pci_dev *);
 +      void (*get_beacon_state)(struct qlcnic_adapter *);
  };
  
  extern struct qlcnic_nic_template qlcnic_vf_ops;
@@@ -1997,11 -1985,6 +1998,11 @@@ static inline void qlcnic_set_mac_filte
                adapter->ahw->hw_ops->set_mac_filter_count(adapter);
  }
  
 +static inline void qlcnic_get_beacon_state(struct qlcnic_adapter *adapter)
 +{
 +      adapter->ahw->hw_ops->get_beacon_state(adapter);
 +}
 +
  static inline void qlcnic_read_phys_port_id(struct qlcnic_adapter *adapter)
  {
        if (adapter->ahw->hw_ops->read_phys_port_id)
@@@ -2157,26 -2140,4 +2158,26 @@@ static inline bool qlcnic_sriov_vf_chec
  
        return status;
  }
 +
 +static inline bool qlcnic_83xx_pf_check(struct qlcnic_adapter *adapter)
 +{
 +      unsigned short device = adapter->pdev->device;
 +
 +      return (device == PCI_DEVICE_ID_QLOGIC_QLE834X) ? true : false;
 +}
 +
 +static inline bool qlcnic_83xx_vf_check(struct qlcnic_adapter *adapter)
 +{
 +      unsigned short device = adapter->pdev->device;
 +
 +      return (device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X) ? true : false;
 +}
 +
 +static inline u32 qlcnic_get_vnic_func_count(struct qlcnic_adapter *adapter)
 +{
 +      if (qlcnic_84xx_check(adapter))
 +              return QLC_84XX_VNIC_COUNT;
 +      else
 +              return QLC_DEFAULT_VNIC_COUNT;
 +}
  #endif                                /* __QLCNIC_H_ */
index 45fa6eff56c93e030e4dd7e1c810c1c884ceec36,6b08194aa0d4900f8e29a4c5eca433f1d1709d9c..18ced0fb6cf05324a8f3869bfd90f1907b377ad2
@@@ -167,27 -167,35 +167,35 @@@ static const char qlcnic_gstrings_test[
  
  #define QLCNIC_TEST_LEN       ARRAY_SIZE(qlcnic_gstrings_test)
  
- static inline int qlcnic_82xx_statistics(void)
+ static inline int qlcnic_82xx_statistics(struct qlcnic_adapter *adapter)
  {
-       return ARRAY_SIZE(qlcnic_device_gstrings_stats) +
-              ARRAY_SIZE(qlcnic_83xx_mac_stats_strings);
+       return ARRAY_SIZE(qlcnic_gstrings_stats) +
+              ARRAY_SIZE(qlcnic_83xx_mac_stats_strings) +
+              QLCNIC_TX_STATS_LEN * adapter->drv_tx_rings;
  }
  
- static inline int qlcnic_83xx_statistics(void)
+ static inline int qlcnic_83xx_statistics(struct qlcnic_adapter *adapter)
  {
-       return ARRAY_SIZE(qlcnic_83xx_tx_stats_strings) +
+       return ARRAY_SIZE(qlcnic_gstrings_stats) +
+              ARRAY_SIZE(qlcnic_83xx_tx_stats_strings) +
               ARRAY_SIZE(qlcnic_83xx_mac_stats_strings) +
-              ARRAY_SIZE(qlcnic_83xx_rx_stats_strings);
+              ARRAY_SIZE(qlcnic_83xx_rx_stats_strings) +
+              QLCNIC_TX_STATS_LEN * adapter->drv_tx_rings;
  }
  
  static int qlcnic_dev_statistics_len(struct qlcnic_adapter *adapter)
  {
-       if (qlcnic_82xx_check(adapter))
-               return qlcnic_82xx_statistics();
-       else if (qlcnic_83xx_check(adapter))
-               return qlcnic_83xx_statistics();
-       else
-               return -1;
+       int len = -1;
+       if (qlcnic_82xx_check(adapter)) {
+               len = qlcnic_82xx_statistics(adapter);
+               if (adapter->flags & QLCNIC_ESWITCH_ENABLED)
+                       len += ARRAY_SIZE(qlcnic_device_gstrings_stats);
+       } else if (qlcnic_83xx_check(adapter)) {
+               len = qlcnic_83xx_statistics(adapter);
+       }
+       return len;
  }
  
  #define       QLCNIC_TX_INTR_NOT_CONFIGURED   0X78563412
@@@ -221,7 -229,7 +229,7 @@@ static const u32 ext_diag_registers[] 
        -1
  };
  
 -#define QLCNIC_MGMT_API_VERSION       2
 +#define QLCNIC_MGMT_API_VERSION       3
  #define QLCNIC_ETHTOOL_REGS_VER       4
  
  static inline int qlcnic_get_ring_regs_len(struct qlcnic_adapter *adapter)
@@@ -519,9 -527,6 +527,9 @@@ qlcnic_get_regs(struct net_device *dev
        regs_buff[0] = (0xcafe0000 | (QLCNIC_DEV_INFO_SIZE & 0xffff));
        regs_buff[1] = QLCNIC_MGMT_API_VERSION;
  
 +      if (adapter->ahw->capabilities & QLC_83XX_ESWITCH_CAPABILITY)
 +              regs_buff[2] = adapter->ahw->max_vnic_func;
 +
        if (qlcnic_82xx_check(adapter))
                i = qlcnic_82xx_get_registers(adapter, regs_buff);
        else
@@@ -923,18 -928,13 +931,13 @@@ static int qlcnic_eeprom_test(struct ne
  
  static int qlcnic_get_sset_count(struct net_device *dev, int sset)
  {
-       int len;
  
        struct qlcnic_adapter *adapter = netdev_priv(dev);
        switch (sset) {
        case ETH_SS_TEST:
                return QLCNIC_TEST_LEN;
        case ETH_SS_STATS:
-               len = qlcnic_dev_statistics_len(adapter) + QLCNIC_STATS_LEN;
-               if ((adapter->flags & QLCNIC_ESWITCH_ENABLED) ||
-                   qlcnic_83xx_check(adapter))
-                       return len;
-               return qlcnic_82xx_statistics();
+               return qlcnic_dev_statistics_len(adapter);
        default:
                return -EOPNOTSUPP;
        }
@@@ -1270,7 -1270,7 +1273,7 @@@ static u64 *qlcnic_fill_stats(u64 *data
        return data;
  }
  
static void qlcnic_update_stats(struct qlcnic_adapter *adapter)
+ void qlcnic_update_stats(struct qlcnic_adapter *adapter)
  {
        struct qlcnic_host_tx_ring *tx_ring;
        int ring;
index a57dfe4ad40e71ea82584d23e7f026fe17fa8001,550791b8fbae98404f4be790520d0c4985871802..eec7b412477c1bd01d19591a2312b77e644c7735
@@@ -308,12 -308,12 +308,12 @@@ int qlcnic_read_mac_addr(struct qlcnic_
  
  static void qlcnic_delete_adapter_mac(struct qlcnic_adapter *adapter)
  {
 -      struct qlcnic_mac_list_s *cur;
 +      struct qlcnic_mac_vlan_list *cur;
        struct list_head *head;
  
        list_for_each(head, &adapter->mac_list) {
 -              cur = list_entry(head, struct qlcnic_mac_list_s, list);
 -              if (!memcmp(adapter->mac_addr, cur->mac_addr, ETH_ALEN)) {
 +              cur = list_entry(head, struct qlcnic_mac_vlan_list, list);
 +              if (ether_addr_equal_unaligned(adapter->mac_addr, cur->mac_addr)) {
                        qlcnic_sre_macaddr_change(adapter, cur->mac_addr,
                                                  0, QLCNIC_MAC_DEL);
                        list_del(&cur->list);
@@@ -337,7 -337,7 +337,7 @@@ static int qlcnic_set_mac(struct net_de
        if (!is_valid_ether_addr(addr->sa_data))
                return -EINVAL;
  
 -      if (!memcmp(adapter->mac_addr, addr->sa_data, ETH_ALEN))
 +      if (ether_addr_equal_unaligned(adapter->mac_addr, addr->sa_data))
                return 0;
  
        if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
@@@ -546,7 -546,6 +546,7 @@@ static struct qlcnic_hardware_ops qlcni
        .io_error_detected              = qlcnic_82xx_io_error_detected,
        .io_slot_reset                  = qlcnic_82xx_io_slot_reset,
        .io_resume                      = qlcnic_82xx_io_resume,
 +      .get_beacon_state               = qlcnic_82xx_get_beacon_state,
  };
  
  static int qlcnic_check_multi_tx_capability(struct qlcnic_adapter *adapter)
@@@ -647,7 -646,8 +647,7 @@@ int qlcnic_enable_msix(struct qlcnic_ad
                        } else {
                                adapter->ahw->num_msix = num_msix;
                                if (qlcnic_check_multi_tx(adapter) &&
 -                                  !adapter->ahw->diag_test &&
 -                                  (adapter->drv_tx_rings > 1))
 +                                  !adapter->ahw->diag_test)
                                        drv_sds_rings = num_msix - drv_tx_rings;
                                else
                                        drv_sds_rings = num_msix;
@@@ -800,26 -800,25 +800,26 @@@ static void qlcnic_cleanup_pci_map(stru
  
  static int qlcnic_get_act_pci_func(struct qlcnic_adapter *adapter)
  {
 +      struct qlcnic_hardware_context *ahw = adapter->ahw;
        struct qlcnic_pci_info *pci_info;
        int ret;
  
        if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
 -              switch (adapter->ahw->port_type) {
 +              switch (ahw->port_type) {
                case QLCNIC_GBE:
 -                      adapter->ahw->act_pci_func = QLCNIC_NIU_MAX_GBE_PORTS;
 +                      ahw->total_nic_func = QLCNIC_NIU_MAX_GBE_PORTS;
                        break;
                case QLCNIC_XGBE:
 -                      adapter->ahw->act_pci_func = QLCNIC_NIU_MAX_XG_PORTS;
 +                      ahw->total_nic_func = QLCNIC_NIU_MAX_XG_PORTS;
                        break;
                }
                return 0;
        }
  
 -      if (adapter->ahw->op_mode == QLCNIC_MGMT_FUNC)
 +      if (ahw->op_mode == QLCNIC_MGMT_FUNC)
                return 0;
  
 -      pci_info = kcalloc(QLCNIC_MAX_PCI_FUNC, sizeof(*pci_info), GFP_KERNEL);
 +      pci_info = kcalloc(ahw->max_vnic_func, sizeof(*pci_info), GFP_KERNEL);
        if (!pci_info)
                return -ENOMEM;
  
@@@ -847,13 -846,12 +847,13 @@@ static bool qlcnic_port_eswitch_cfg_cap
  
  int qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
  {
 +      struct qlcnic_hardware_context *ahw = adapter->ahw;
        struct qlcnic_pci_info *pci_info;
        int i, id = 0, ret = 0, j = 0;
        u16 act_pci_func;
        u8 pfn;
  
 -      pci_info = kcalloc(QLCNIC_MAX_PCI_FUNC, sizeof(*pci_info), GFP_KERNEL);
 +      pci_info = kcalloc(ahw->max_vnic_func, sizeof(*pci_info), GFP_KERNEL);
        if (!pci_info)
                return -ENOMEM;
  
        if (ret)
                goto err_pci_info;
  
 -      act_pci_func = adapter->ahw->act_pci_func;
 +      act_pci_func = ahw->total_nic_func;
  
        adapter->npars = kzalloc(sizeof(struct qlcnic_npar_info) *
                                 act_pci_func, GFP_KERNEL);
                goto err_npars;
        }
  
 -      for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
 +      for (i = 0; i < ahw->max_vnic_func; i++) {
                pfn = pci_info[i].id;
  
 -              if (pfn >= QLCNIC_MAX_PCI_FUNC) {
 +              if (pfn >= ahw->max_vnic_func) {
                        ret = QL_STATUS_INVALID_PARAM;
                        goto err_eswitch;
                }
@@@ -1348,7 -1346,7 +1348,7 @@@ int qlcnic_set_default_offload_settings
        if (adapter->need_fw_reset)
                return 0;
  
 -      for (i = 0; i < adapter->ahw->act_pci_func; i++) {
 +      for (i = 0; i < adapter->ahw->total_nic_func; i++) {
                if (!adapter->npars[i].eswitch_status)
                        continue;
  
@@@ -1411,7 -1409,7 +1411,7 @@@ int qlcnic_reset_npar_config(struct qlc
                        return 0;
  
        /* Set the NPAR config data after FW reset */
 -      for (i = 0; i < adapter->ahw->act_pci_func; i++) {
 +      for (i = 0; i < adapter->ahw->total_nic_func; i++) {
                npar = &adapter->npars[i];
                pci_func = npar->pci_func;
                if (!adapter->npars[i].eswitch_status)
@@@ -2037,7 -2035,7 +2037,7 @@@ qlcnic_reset_context(struct qlcnic_adap
  void qlcnic_82xx_set_mac_filter_count(struct qlcnic_adapter *adapter)
  {
        struct qlcnic_hardware_context *ahw = adapter->ahw;
 -      u16 act_pci_fn = ahw->act_pci_func;
 +      u16 act_pci_fn = ahw->total_nic_func;
        u16 count;
  
        ahw->max_mc_count = QLCNIC_MAX_MC_COUNT;
@@@ -2213,6 -2211,7 +2213,6 @@@ qlcnic_probe(struct pci_dev *pdev, cons
        struct qlcnic_hardware_context *ahw;
        int err, pci_using_dac = -1;
        char board_name[QLCNIC_MAX_BOARD_NAME_LEN + 19]; /* MAC + ": " + name */
 -      struct qlcnic_dcb *dcb;
  
        if (pdev->is_virtfn)
                return -ENODEV;
                goto err_out_free_wq;
  
        adapter->dev_rst_time = jiffies;
 -      adapter->ahw->revision_id = pdev->revision;
 +      ahw->revision_id = pdev->revision;
 +      ahw->max_vnic_func = qlcnic_get_vnic_func_count(adapter);
        if (qlcnic_mac_learn == FDB_MAC_LEARN)
                adapter->fdb_mac_learn = true;
        else if (qlcnic_mac_learn == DRV_MAC_LEARN)
  
                adapter->flags |= QLCNIC_NEED_FLR;
  
 -              dcb = adapter->dcb;
 -
 -              if (dcb && qlcnic_dcb_attach(dcb))
 -                      qlcnic_clear_dcb_ops(dcb);
        } else if (qlcnic_83xx_check(adapter)) {
                qlcnic_83xx_check_vf(adapter, ent);
                adapter->portnum = adapter->ahw->pci_func;
                goto err_out_free_hw;
        }
  
 +      qlcnic_dcb_enable(adapter->dcb);
 +
        if (qlcnic_read_mac_addr(adapter))
                dev_warn(&pdev->dev, "failed to read mac addr\n");
  
@@@ -2498,11 -2498,13 +2498,11 @@@ static void qlcnic_remove(struct pci_de
        qlcnic_cancel_idc_work(adapter);
        ahw = adapter->ahw;
  
 -      qlcnic_dcb_free(adapter->dcb);
 -
        unregister_netdev(netdev);
        qlcnic_sriov_cleanup(adapter);
  
        if (qlcnic_83xx_check(adapter)) {
 -              qlcnic_83xx_register_nic_idc_func(adapter, 0);
 +              qlcnic_83xx_initialize_nic(adapter, 0);
                cancel_delayed_work_sync(&adapter->idc_aen_work);
                qlcnic_83xx_free_mbx_intr(adapter);
                qlcnic_83xx_detach_mailbox_work(adapter);
                kfree(ahw->fw_info);
        }
  
 +      qlcnic_dcb_free(adapter->dcb);
 +
        qlcnic_detach(adapter);
  
        if (adapter->npars != NULL)
@@@ -2640,7 -2640,7 +2640,7 @@@ void qlcnic_alloc_lb_filters_mem(struc
        if (adapter->fhash.fmax && adapter->fhash.fhead)
                return;
  
 -      act_pci_func = adapter->ahw->act_pci_func;
 +      act_pci_func = adapter->ahw->total_nic_func;
        spin_lock_init(&adapter->mac_learn_lock);
        spin_lock_init(&adapter->rx_mac_learn_lock);
  
@@@ -2780,6 -2780,9 +2780,9 @@@ static struct net_device_stats *qlcnic_
        struct qlcnic_adapter *adapter = netdev_priv(netdev);
        struct net_device_stats *stats = &netdev->stats;
  
+       if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
+               qlcnic_update_stats(adapter);
        stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts;
        stats->tx_packets = adapter->stats.xmitfinished;
        stats->rx_bytes = adapter->stats.rxbytes + adapter->stats.lrobytes;
@@@ -3723,6 -3726,12 +3726,6 @@@ int qlcnic_validate_rings(struct qlcnic
                return -EINVAL;
        }
  
 -      if (ring_cnt < 2) {
 -              netdev_err(netdev,
 -                         "%s rings value should not be lower than 2\n", buf);
 -              return -EINVAL;
 -      }
 -
        if (!is_power_of_2(ring_cnt)) {
                netdev_err(netdev, "%s rings value should be a power of 2\n",
                           buf);
@@@ -3780,7 -3789,8 +3783,7 @@@ int qlcnic_setup_rings(struct qlcnic_ad
        }
  
        if (qlcnic_83xx_check(adapter)) {
 -              /* register for NIC IDC AEN Events */
 -              qlcnic_83xx_register_nic_idc_func(adapter, 1);
 +              qlcnic_83xx_initialize_nic(adapter, 1);
                err = qlcnic_83xx_setup_mbx_intr(adapter);
                qlcnic_83xx_disable_mbx_poll(adapter);
                if (err) {
index 570495be77f3a69eff6e9f4daa72c7f0ce658ce7,0e9fb3301b1136e333cdd3d9bc62bcacc89dd4bb..023237a657207995e367ec365269d155043377c0
@@@ -187,8 -187,10 +187,8 @@@ struct tile_net_priv 
        int echannel;
        /* mPIPE instance, 0 or 1. */
        int instance;
 -#ifdef CONFIG_PTP_1588_CLOCK_TILEGX
        /* The timestamp config. */
        struct hwtstamp_config stamp_cfg;
 -#endif
  };
  
  static struct mpipe_data {
        int first_bucket;
        int num_buckets;
  
 -#ifdef CONFIG_PTP_1588_CLOCK_TILEGX
        /* PTP-specific data. */
        struct ptp_clock *ptp_clock;
        struct ptp_clock_info caps;
  
        /* Lock for ptp accessors. */
        struct mutex ptp_lock;
 -#endif
  
  } mpipe_data[NR_MPIPE_MAX] = {
        [0 ... (NR_MPIPE_MAX - 1)] {
@@@ -447,17 -451,20 +447,17 @@@ static void tile_net_provide_needed_buf
  static void tile_rx_timestamp(struct tile_net_priv *priv, struct sk_buff *skb,
                              gxio_mpipe_idesc_t *idesc)
  {
 -#ifdef CONFIG_PTP_1588_CLOCK_TILEGX
        if (unlikely(priv->stamp_cfg.rx_filter != HWTSTAMP_FILTER_NONE)) {
                struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
                memset(shhwtstamps, 0, sizeof(*shhwtstamps));
                shhwtstamps->hwtstamp = ktime_set(idesc->time_stamp_sec,
                                                  idesc->time_stamp_ns);
        }
 -#endif
  }
  
  /* Get TX timestamp, and store it in the skb. */
  static void tile_tx_timestamp(struct sk_buff *skb, int instance)
  {
 -#ifdef CONFIG_PTP_1588_CLOCK_TILEGX
        struct skb_shared_info *shtx = skb_shinfo(skb);
        if (unlikely((shtx->tx_flags & SKBTX_HW_TSTAMP) != 0)) {
                struct mpipe_data *md = &mpipe_data[instance];
                shhwtstamps.hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec);
                skb_tstamp_tx(skb, &shhwtstamps);
        }
 -#endif
  }
  
  /* Use ioctl() to enable or disable TX or RX timestamping. */
 -static int tile_hwtstamp_ioctl(struct net_device *dev, struct ifreq *rq,
 -                             int cmd)
 +static int tile_hwtstamp_set(struct net_device *dev, struct ifreq *rq)
  {
 -#ifdef CONFIG_PTP_1588_CLOCK_TILEGX
        struct hwtstamp_config config;
        struct tile_net_priv *priv = netdev_priv(dev);
  
  
        priv->stamp_cfg = config;
        return 0;
 -#else
 -      return -EOPNOTSUPP;
 -#endif
 +}
 +
 +static int tile_hwtstamp_get(struct net_device *dev, struct ifreq *rq)
 +{
 +      struct tile_net_priv *priv = netdev_priv(dev);
 +
 +      if (copy_to_user(rq->ifr_data, &priv->stamp_cfg,
 +                       sizeof(priv->stamp_cfg)))
 +              return -EFAULT;
 +
 +      return 0;
  }
  
  static inline bool filter_packet(struct net_device *dev, void *buf)
@@@ -812,6 -814,8 +812,6 @@@ static enum hrtimer_restart tile_net_ha
        return HRTIMER_NORESTART;
  }
  
 -#ifdef CONFIG_PTP_1588_CLOCK_TILEGX
 -
  /* PTP clock operations. */
  
  static int ptp_mpipe_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
@@@ -878,9 -882,12 +878,9 @@@ static struct ptp_clock_info ptp_mpipe_
        .enable         = ptp_mpipe_enable,
  };
  
 -#endif /* CONFIG_PTP_1588_CLOCK_TILEGX */
 -
  /* Sync mPIPE's timestamp up with Linux system time and register PTP clock. */
  static void register_ptp_clock(struct net_device *dev, struct mpipe_data *md)
  {
 -#ifdef CONFIG_PTP_1588_CLOCK_TILEGX
        struct timespec ts;
  
        getnstimeofday(&ts);
        if (IS_ERR(md->ptp_clock))
                netdev_err(dev, "ptp_clock_register failed %ld\n",
                           PTR_ERR(md->ptp_clock));
 -#endif
  }
  
  /* Initialize PTP fields in a new device. */
  static void init_ptp_dev(struct tile_net_priv *priv)
  {
 -#ifdef CONFIG_PTP_1588_CLOCK_TILEGX
        priv->stamp_cfg.rx_filter = HWTSTAMP_FILTER_NONE;
        priv->stamp_cfg.tx_type = HWTSTAMP_TX_OFF;
 -#endif
  }
  
  /* Helper functions for "tile_net_update()". */
@@@ -2070,7 -2080,8 +2070,8 @@@ static int tile_net_tx(struct sk_buff *
  }
  
  /* Return subqueue id on this core (one per core). */
- static u16 tile_net_select_queue(struct net_device *dev, struct sk_buff *skb)
+ static u16 tile_net_select_queue(struct net_device *dev, struct sk_buff *skb,
+                                void *accel_priv)
  {
        return smp_processor_id();
  }
@@@ -2088,9 -2099,7 +2089,9 @@@ static void tile_net_tx_timeout(struct 
  static int tile_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
  {
        if (cmd == SIOCSHWTSTAMP)
 -              return tile_hwtstamp_ioctl(dev, rq, cmd);
 +              return tile_hwtstamp_set(dev, rq);
 +      if (cmd == SIOCGHWTSTAMP)
 +              return tile_hwtstamp_get(dev, rq);
  
        return -EOPNOTSUPP;
  }
diff --combined drivers/net/macvlan.c
index 09ababe54a5b050ba247f9e61a0e34ade148d515,bc8faaec33f5afb0bbf7efdd88cd082122c3cc7f..8433de4509c75a35cb65e7abdacb4f8968cb4008
@@@ -120,7 -120,7 +120,7 @@@ static int macvlan_broadcast_one(struc
        struct net_device *dev = vlan->dev;
  
        if (local)
 -              return vlan->forward(dev, skb);
 +              return dev_forward_skb(dev, skb);
  
        skb->dev = dev;
        if (ether_addr_equal_64bits(eth->h_dest, dev->broadcast))
        else
                skb->pkt_type = PACKET_MULTICAST;
  
 -      return vlan->receive(skb);
 +      return netif_rx(skb);
  }
  
  static u32 macvlan_hash_mix(const struct macvlan_dev *vlan)
@@@ -251,7 -251,7 +251,7 @@@ static rx_handler_result_t macvlan_hand
        skb->dev = dev;
        skb->pkt_type = PACKET_HOST;
  
 -      ret = vlan->receive(skb);
 +      ret = netif_rx(skb);
  
  out:
        macvlan_count_rx(vlan, len, ret == NET_RX_SUCCESS, 0);
@@@ -290,8 -290,8 +290,8 @@@ xmit_world
        return dev_queue_xmit(skb);
  }
  
 -netdev_tx_t macvlan_start_xmit(struct sk_buff *skb,
 -                             struct net_device *dev)
 +static netdev_tx_t macvlan_start_xmit(struct sk_buff *skb,
 +                                    struct net_device *dev)
  {
        unsigned int len = skb->len;
        int ret;
  
        if (vlan->fwd_priv) {
                skb->dev = vlan->lowerdev;
-               ret = dev_hard_start_xmit(skb, skb->dev, NULL, vlan->fwd_priv);
+               ret = dev_queue_xmit_accel(skb, vlan->fwd_priv);
        } else {
                ret = macvlan_queue_xmit(skb, dev);
        }
  
        if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
 -              struct macvlan_pcpu_stats *pcpu_stats;
 +              struct vlan_pcpu_stats *pcpu_stats;
  
                pcpu_stats = this_cpu_ptr(vlan->pcpu_stats);
                u64_stats_update_begin(&pcpu_stats->syncp);
        }
        return ret;
  }
 -EXPORT_SYMBOL_GPL(macvlan_start_xmit);
  
  static int macvlan_hard_header(struct sk_buff *skb, struct net_device *dev,
                               unsigned short type, const void *daddr,
@@@ -337,6 -338,8 +337,8 @@@ static const struct header_ops macvlan_
        .cache_update   = eth_header_cache_update,
  };
  
+ static struct rtnl_link_ops macvlan_link_ops;
  static int macvlan_open(struct net_device *dev)
  {
        struct macvlan_dev *vlan = netdev_priv(dev);
                goto hash_add;
        }
  
-       if (lowerdev->features & NETIF_F_HW_L2FW_DOFFLOAD) {
+       if (lowerdev->features & NETIF_F_HW_L2FW_DOFFLOAD &&
+           dev->rtnl_link_ops == &macvlan_link_ops) {
                vlan->fwd_priv =
                      lowerdev->netdev_ops->ndo_dfwd_add_station(lowerdev, dev);
  
                 */
                if (IS_ERR_OR_NULL(vlan->fwd_priv)) {
                        vlan->fwd_priv = NULL;
-               } else {
-                       dev->features &= ~NETIF_F_LLTX;
+               } else
                        return 0;
-               }
        }
  
        err = -EBUSY;
@@@ -545,12 -547,12 +546,12 @@@ static int macvlan_init(struct net_devi
  
        macvlan_set_lockdep_class(dev);
  
 -      vlan->pcpu_stats = alloc_percpu(struct macvlan_pcpu_stats);
 +      vlan->pcpu_stats = alloc_percpu(struct vlan_pcpu_stats);
        if (!vlan->pcpu_stats)
                return -ENOMEM;
  
        for_each_possible_cpu(i) {
 -              struct macvlan_pcpu_stats *mvlstats;
 +              struct vlan_pcpu_stats *mvlstats;
                mvlstats = per_cpu_ptr(vlan->pcpu_stats, i);
                u64_stats_init(&mvlstats->syncp);
        }
@@@ -576,7 -578,7 +577,7 @@@ static struct rtnl_link_stats64 *macvla
        struct macvlan_dev *vlan = netdev_priv(dev);
  
        if (vlan->pcpu_stats) {
 -              struct macvlan_pcpu_stats *p;
 +              struct vlan_pcpu_stats *p;
                u64 rx_packets, rx_bytes, rx_multicast, tx_packets, tx_bytes;
                u32 rx_errors = 0, tx_dropped = 0;
                unsigned int start;
@@@ -698,8 -700,7 +699,7 @@@ static netdev_features_t macvlan_fix_fe
        features = netdev_increment_features(vlan->lowerdev->features,
                                             features,
                                             mask);
-       if (!vlan->fwd_priv)
-               features |= NETIF_F_LLTX;
+       features |= NETIF_F_LLTX;
  
        return features;
  }
@@@ -813,7 -814,10 +813,7 @@@ static int macvlan_validate(struct nlat
  }
  
  int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
 -                         struct nlattr *tb[], struct nlattr *data[],
 -                         int (*receive)(struct sk_buff *skb),
 -                         int (*forward)(struct net_device *dev,
 -                                        struct sk_buff *skb))
 +                         struct nlattr *tb[], struct nlattr *data[])
  {
        struct macvlan_dev *vlan = netdev_priv(dev);
        struct macvlan_port *port;
        if (lowerdev == NULL)
                return -ENODEV;
  
 -      /* When creating macvlans on top of other macvlans - use
 +      /* When creating macvlans or macvtaps on top of other macvlans - use
         * the real device as the lowerdev.
         */
 -      if (lowerdev->rtnl_link_ops == dev->rtnl_link_ops) {
 -              struct macvlan_dev *lowervlan = netdev_priv(lowerdev);
 -              lowerdev = lowervlan->lowerdev;
 -      }
 +      if (netif_is_macvlan(lowerdev))
 +              lowerdev = macvlan_dev_real_dev(lowerdev);
  
        if (!tb[IFLA_MTU])
                dev->mtu = lowerdev->mtu;
        vlan->lowerdev = lowerdev;
        vlan->dev      = dev;
        vlan->port     = port;
 -      vlan->receive  = receive;
 -      vlan->forward  = forward;
        vlan->set_features = MACVLAN_FEATURES;
  
        vlan->mode     = MACVLAN_MODE_VEPA;
@@@ -899,7 -907,9 +899,7 @@@ EXPORT_SYMBOL_GPL(macvlan_common_newlin
  static int macvlan_newlink(struct net *src_net, struct net_device *dev,
                           struct nlattr *tb[], struct nlattr *data[])
  {
 -      return macvlan_common_newlink(src_net, dev, tb, data,
 -                                    netif_rx,
 -                                    dev_forward_skb);
 +      return macvlan_common_newlink(src_net, dev, tb, data);
  }
  
  void macvlan_dellink(struct net_device *dev, struct list_head *head)
diff --combined drivers/net/tun.c
index 09f66624eaca1eea40793f4c6c5c057f837a2e20,ecec8029c5e84c557374817429c29a505f699c0d..34cca74c99ed4f36060f1de55af2ffb523b67b0b
@@@ -110,7 -110,7 +110,7 @@@ struct tap_filter 
        unsigned char   addr[FLT_EXACT_COUNT][ETH_ALEN];
  };
  
 -/* DEFAULT_MAX_NUM_RSS_QUEUES were choosed to let the rx/tx queues allocated for
 +/* DEFAULT_MAX_NUM_RSS_QUEUES were chosen to let the rx/tx queues allocated for
   * the netdevice to be fit in one page. So we can make sure the success of
   * memory allocation. TODO: increase the limit. */
  #define MAX_TAP_QUEUES DEFAULT_MAX_NUM_RSS_QUEUES
  #define TUN_FLOW_EXPIRE (3 * HZ)
  
  /* A tun_file connects an open character device to a tuntap netdevice. It
 - * also contains all socket related strctures (except sock_fprog and tap_filter)
 + * also contains all socket related structures (except sock_fprog and tap_filter)
   * to serve as one transmit queue for tuntap device. The sock_fprog and
   * tap_filter were kept in tun_struct since they were used for filtering for the
   * netdevice not for a specific queue (at least I didn't see the requirement for
@@@ -152,7 -152,6 +152,7 @@@ struct tun_flow_entry 
        struct tun_struct *tun;
  
        u32 rxhash;
 +      u32 rps_rxhash;
        int queue_index;
        unsigned long updated;
  };
@@@ -221,7 -220,6 +221,7 @@@ static struct tun_flow_entry *tun_flow_
                          rxhash, queue_index);
                e->updated = jiffies;
                e->rxhash = rxhash;
 +              e->rps_rxhash = 0;
                e->queue_index = queue_index;
                e->tun = tun;
                hlist_add_head_rcu(&e->hash_link, head);
@@@ -234,7 -232,6 +234,7 @@@ static void tun_flow_delete(struct tun_
  {
        tun_debug(KERN_INFO, tun, "delete flow: hash %u index %u\n",
                  e->rxhash, e->queue_index);
 +      sock_rps_reset_flow_hash(e->rps_rxhash);
        hlist_del_rcu(&e->hash_link);
        kfree_rcu(e, rcu);
        --tun->flow_count;
@@@ -328,7 -325,6 +328,7 @@@ static void tun_flow_update(struct tun_
                /* TODO: keep queueing to old queue until it's empty? */
                e->queue_index = queue_index;
                e->updated = jiffies;
 +              sock_rps_record_flow_hash(e->rps_rxhash);
        } else {
                spin_lock_bh(&tun->lock);
                if (!tun_flow_find(head, rxhash) &&
@@@ -345,26 -341,15 +345,27 @@@ unlock
        rcu_read_unlock();
  }
  
 +/**
 + * Save the hash received in the stack receive path and update the
 + * flow_hash table accordingly.
 + */
 +static inline void tun_flow_save_rps_rxhash(struct tun_flow_entry *e, u32 hash)
 +{
 +      if (unlikely(e->rps_rxhash != hash)) {
 +              sock_rps_reset_flow_hash(e->rps_rxhash);
 +              e->rps_rxhash = hash;
 +      }
 +}
 +
  /* We try to identify a flow through its rxhash first. The reason that
 - * we do not check rxq no. is becuase some cards(e.g 82599), chooses
 + * we do not check rxq no. is because some cards(e.g 82599), chooses
   * the rxq based on the txq where the last packet of the flow comes. As
   * the userspace application move between processors, we may get a
   * different rxq no. here. If we could not get rxhash, then we would
   * hope the rxq no. may help here.
   */
- static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb)
+ static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb,
+                           void *accel_priv)
  {
        struct tun_struct *tun = netdev_priv(dev);
        struct tun_flow_entry *e;
        rcu_read_lock();
        numqueues = ACCESS_ONCE(tun->numqueues);
  
 -      txq = skb_get_rxhash(skb);
 +      txq = skb_get_hash(skb);
        if (txq) {
                e = tun_flow_find(&tun->flows[tun_hashfn(txq)], txq);
 -              if (e)
 +              if (e) {
 +                      tun_flow_save_rps_rxhash(e, txq);
                        txq = e->queue_index;
 -              else
 +              else
                        /* use multiply and shift instead of expensive divide */
                        txq = ((u64)txq * numqueues) >> 32;
        } else if (likely(skb_rx_queue_recorded(skb))) {
@@@ -548,7 -532,7 +549,7 @@@ static int tun_attach(struct tun_struc
  
        err = 0;
  
 -      /* Re-attach the filter to presist device */
 +      /* Re-attach the filter to persist device */
        if (!skip_filter && (tun->filter_attached == true)) {
                err = sk_attach_filter(&tun->fprog, tfile->socket.sk);
                if (!err)
@@@ -745,22 -729,6 +746,22 @@@ static netdev_tx_t tun_net_xmit(struct 
        if (txq >= tun->numqueues)
                goto drop;
  
 +      if (tun->numqueues == 1) {
 +              /* Select queue was not called for the skbuff, so we extract the
 +               * RPS hash and save it into the flow_table here.
 +               */
 +              __u32 rxhash;
 +
 +              rxhash = skb_get_hash(skb);
 +              if (rxhash) {
 +                      struct tun_flow_entry *e;
 +                      e = tun_flow_find(&tun->flows[tun_hashfn(rxhash)],
 +                                      rxhash);
 +                      if (e)
 +                              tun_flow_save_rps_rxhash(e, rxhash);
 +              }
 +      }
 +
        tun_debug(KERN_INFO, tun, "tun_net_xmit %d\n", skb->len);
  
        BUG_ON(!tfile);
@@@ -852,9 -820,9 +853,9 @@@ static void tun_poll_controller(struct 
         * Tun only receives frames when:
         * 1) the char device endpoint gets data from user space
         * 2) the tun socket gets a sendmsg call from user space
 -       * Since both of those are syncronous operations, we are guaranteed
 +       * Since both of those are synchronous operations, we are guaranteed
         * never to have pending data when we poll for it
 -       * so theres nothing to do here but return.
 +       * so there is nothing to do here but return.
         * We need this though so netpoll recognizes us as an interface that
         * supports polling, which enables bridge devices in virt setups to
         * still use netconsole
@@@ -1179,7 -1147,7 +1180,7 @@@ static ssize_t tun_get_user(struct tun_
        skb_reset_network_header(skb);
        skb_probe_transport_header(skb, 0);
  
 -      rxhash = skb_get_rxhash(skb);
 +      rxhash = skb_get_hash(skb);
        netif_rx_ni(skb);
  
        tun->dev->stats.rx_packets++;
@@@ -1324,7 -1292,8 +1325,7 @@@ done
  }
  
  static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
 -                         struct kiocb *iocb, const struct iovec *iv,
 -                         ssize_t len, int noblock)
 +                         const struct iovec *iv, ssize_t len, int noblock)
  {
        DECLARE_WAITQUEUE(wait, current);
        struct sk_buff *skb;
@@@ -1387,7 -1356,7 +1388,7 @@@ static ssize_t tun_chr_aio_read(struct 
                goto out;
        }
  
 -      ret = tun_do_read(tun, tfile, iocb, iv, len,
 +      ret = tun_do_read(tun, tfile, iv, len,
                          file->f_flags & O_NONBLOCK);
        ret = min_t(ssize_t, ret, len);
        if (ret > 0)
@@@ -1488,7 -1457,7 +1489,7 @@@ static int tun_recvmsg(struct kiocb *io
                                         SOL_PACKET, TUN_TX_TIMESTAMP);
                goto out;
        }
 -      ret = tun_do_read(tun, tfile, iocb, m->msg_iov, total_len,
 +      ret = tun_do_read(tun, tfile, m->msg_iov, total_len,
                          flags & MSG_DONTWAIT);
        if (ret > total_len) {
                m->msg_flags |= MSG_TRUNC;
diff --combined drivers/net/usb/usbnet.c
index 56c175ebae3ca9b55a40e019e61b5a59840d35de,aba04f56176008aa12d0eaf72590d239f0affce8..4671da755e7b87f60758259021bcc5ab7f7c6cd1
@@@ -14,7 -14,8 +14,7 @@@
   * GNU General Public License for more details.
   *
   * You should have received a copy of the GNU General Public License
 - * along with this program; if not, write to the Free Software
 - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 + * along with this program; if not, see <http://www.gnu.org/licenses/>.
   */
  
  /*
@@@ -1244,7 -1245,7 +1244,7 @@@ static int build_dma_sg(const struct sk
                return -ENOMEM;
  
        urb->num_sgs = num_sgs;
-       sg_init_table(urb->sg, urb->num_sgs);
+       sg_init_table(urb->sg, urb->num_sgs + 1);
  
        sg_set_buf(&urb->sg[s++], skb->data, skb_headlen(skb));
        total_len += skb_headlen(skb);
index e58b8af56c045f0095d6d41405b3bc3f2ce83dab,e6272546395a982d6d3e6e23239eaddc1bc3bf60..3040924f5f3cf187bb12fcc78969548588031346
@@@ -5,7 -5,7 +5,7 @@@
   *
   * GPL LICENSE SUMMARY
   *
 - * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
 + * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
   *
   * This program is free software; you can redistribute it and/or modify
   * it under the terms of version 2 of the GNU General Public License as
@@@ -30,7 -30,7 +30,7 @@@
   *
   * BSD LICENSE
   *
 - * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
 + * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
   * All rights reserved.
   *
   * Redistribution and use in source and binary forms, with or without
@@@ -297,9 -297,6 +297,9 @@@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_c
        {IWL_PCI_DEVICE(0x08B2, 0x4370, iwl7260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x08B2, 0x4360, iwl7260_2n_cfg)},
        {IWL_PCI_DEVICE(0x08B1, 0x5070, iwl7260_2ac_cfg)},
 +      {IWL_PCI_DEVICE(0x08B1, 0x5072, iwl7260_2ac_cfg)},
 +      {IWL_PCI_DEVICE(0x08B1, 0x5170, iwl7260_2ac_cfg)},
 +      {IWL_PCI_DEVICE(0x08B1, 0x5770, iwl7260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x08B1, 0x4020, iwl7260_2n_cfg)},
        {IWL_PCI_DEVICE(0x08B1, 0x402A, iwl7260_2n_cfg)},
        {IWL_PCI_DEVICE(0x08B2, 0x4220, iwl7260_2n_cfg)},
        {IWL_PCI_DEVICE(0x08B4, 0x8270, iwl3160_2ac_cfg)},
        {IWL_PCI_DEVICE(0x08B3, 0x8470, iwl3160_2ac_cfg)},
        {IWL_PCI_DEVICE(0x08B3, 0x8570, iwl3160_2ac_cfg)},
 +      {IWL_PCI_DEVICE(0x08B3, 0x1070, iwl3160_2ac_cfg)},
 +      {IWL_PCI_DEVICE(0x08B3, 0x1170, iwl3160_2ac_cfg)},
  
  /* 7265 Series */
        {IWL_PCI_DEVICE(0x095A, 0x5010, iwl7265_2ac_cfg)},
        {IWL_PCI_DEVICE(0x095B, 0x5310, iwl7265_2ac_cfg)},
        {IWL_PCI_DEVICE(0x095B, 0x5302, iwl7265_2ac_cfg)},
        {IWL_PCI_DEVICE(0x095B, 0x5210, iwl7265_2ac_cfg)},
-       {IWL_PCI_DEVICE(0x095B, 0x5012, iwl7265_2ac_cfg)},
-       {IWL_PCI_DEVICE(0x095B, 0x500A, iwl7265_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x095A, 0x5012, iwl7265_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x095A, 0x500A, iwl7265_2ac_cfg)},
        {IWL_PCI_DEVICE(0x095A, 0x5410, iwl7265_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x095A, 0x5400, iwl7265_2ac_cfg)},
        {IWL_PCI_DEVICE(0x095A, 0x1010, iwl7265_2ac_cfg)},
        {IWL_PCI_DEVICE(0x095A, 0x5000, iwl7265_2n_cfg)},
        {IWL_PCI_DEVICE(0x095B, 0x5200, iwl7265_2n_cfg)},
        {IWL_PCI_DEVICE(0x095A, 0x5002, iwl7265_n_cfg)},
        {IWL_PCI_DEVICE(0x095B, 0x5202, iwl7265_n_cfg)},
        {IWL_PCI_DEVICE(0x095A, 0x9010, iwl7265_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x095A, 0x9110, iwl7265_2ac_cfg)},
        {IWL_PCI_DEVICE(0x095A, 0x9210, iwl7265_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x095A, 0x9510, iwl7265_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x095A, 0x9310, iwl7265_2ac_cfg)},
        {IWL_PCI_DEVICE(0x095A, 0x9410, iwl7265_2ac_cfg)},
        {IWL_PCI_DEVICE(0x095A, 0x5020, iwl7265_2n_cfg)},
        {IWL_PCI_DEVICE(0x095A, 0x502A, iwl7265_2n_cfg)},
        {IWL_PCI_DEVICE(0x095A, 0x5420, iwl7265_2n_cfg)},
        {IWL_PCI_DEVICE(0x095A, 0x5090, iwl7265_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x095A, 0x5190, iwl7265_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x095A, 0x5590, iwl7265_2ac_cfg)},
        {IWL_PCI_DEVICE(0x095B, 0x5290, iwl7265_2ac_cfg)},
        {IWL_PCI_DEVICE(0x095A, 0x5490, iwl7265_2ac_cfg)},
  #endif /* CONFIG_IWLMVM */
index 9c0cc8ded0216de43d09300dbed19cfa9fe2e3b3,a1b32ee9594a6b9b899caa03cea53f3d9a0ddf90..fa41a773b79bab34f40e889122e8e9cc26127209
@@@ -159,7 -159,7 +159,7 @@@ static const struct ieee80211_regdomai
        .reg_rules = {
                REG_RULE(2412-10, 2462+10, 40, 0, 20, 0),
                REG_RULE(5725-10, 5850+10, 40, 0, 30,
 -                      NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_IBSS),
 +                       NL80211_RRF_NO_IR),
        }
  };
  
@@@ -353,6 -353,7 +353,6 @@@ struct mac80211_hwsim_data 
        } ps;
        bool ps_poll_pending;
        struct dentry *debugfs;
 -      struct dentry *debugfs_ps;
  
        struct sk_buff_head pending;    /* packets pending */
        /*
         * radio can be in more then one group.
         */
        u64 group;
 -      struct dentry *debugfs_group;
  
        int power_level;
  
@@@ -1491,7 -1493,7 +1491,7 @@@ static void hw_scan_work(struct work_st
                    req->channels[hwsim->scan_chan_idx]->center_freq);
  
        hwsim->tmp_chan = req->channels[hwsim->scan_chan_idx];
 -      if (hwsim->tmp_chan->flags & IEEE80211_CHAN_PASSIVE_SCAN ||
 +      if (hwsim->tmp_chan->flags & IEEE80211_CHAN_NO_IR ||
            !req->n_ssids) {
                dwell = 120;
        } else {
@@@ -1740,7 -1742,9 +1740,7 @@@ static void mac80211_hwsim_free(void
        spin_unlock_bh(&hwsim_radio_lock);
  
        list_for_each_entry_safe(data, tmpdata, &tmplist, list) {
 -              debugfs_remove(data->debugfs_group);
 -              debugfs_remove(data->debugfs_ps);
 -              debugfs_remove(data->debugfs);
 +              debugfs_remove_recursive(data->debugfs);
                ieee80211_unregister_hw(data->hw);
                device_release_driver(data->dev);
                device_unregister(data->dev);
@@@ -1897,17 -1901,6 +1897,17 @@@ static int hwsim_fops_ps_write(void *da
  DEFINE_SIMPLE_ATTRIBUTE(hwsim_fops_ps, hwsim_fops_ps_read, hwsim_fops_ps_write,
                        "%llu\n");
  
 +static int hwsim_write_simulate_radar(void *dat, u64 val)
 +{
 +      struct mac80211_hwsim_data *data = dat;
 +
 +      ieee80211_radar_detected(data->hw);
 +
 +      return 0;
 +}
 +
 +DEFINE_SIMPLE_ATTRIBUTE(hwsim_simulate_radar, NULL,
 +                      hwsim_write_simulate_radar, "%llu\n");
  
  static int hwsim_fops_group_read(void *dat, u64 *val)
  {
@@@ -2018,7 -2011,7 +2018,7 @@@ static int hwsim_tx_info_frame_received
           (hwsim_flags & HWSIM_TX_STAT_ACK)) {
                if (skb->len >= 16) {
                        hdr = (struct ieee80211_hdr *) skb->data;
-                       mac80211_hwsim_monitor_ack(txi->rate_driver_data[0],
+                       mac80211_hwsim_monitor_ack(data2->channel,
                                                   hdr->addr2);
                }
                txi->flags |= IEEE80211_TX_STAT_ACK;
@@@ -2208,28 -2201,11 +2208,28 @@@ static const struct ieee80211_iface_lim
        { .max = 1, .types = BIT(NL80211_IFTYPE_P2P_DEVICE) },
  };
  
 -static struct ieee80211_iface_combination hwsim_if_comb = {
 -      .limits = hwsim_if_limits,
 -      .n_limits = ARRAY_SIZE(hwsim_if_limits),
 -      .max_interfaces = 2048,
 -      .num_different_channels = 1,
 +static const struct ieee80211_iface_limit hwsim_if_dfs_limits[] = {
 +      { .max = 8, .types = BIT(NL80211_IFTYPE_AP) },
 +};
 +
 +static struct ieee80211_iface_combination hwsim_if_comb[] = {
 +      {
 +              .limits = hwsim_if_limits,
 +              .n_limits = ARRAY_SIZE(hwsim_if_limits),
 +              .max_interfaces = 2048,
 +              .num_different_channels = 1,
 +      },
 +      {
 +              .limits = hwsim_if_dfs_limits,
 +              .n_limits = ARRAY_SIZE(hwsim_if_dfs_limits),
 +              .max_interfaces = 8,
 +              .num_different_channels = 1,
 +              .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
 +                                     BIT(NL80211_CHAN_WIDTH_20) |
 +                                     BIT(NL80211_CHAN_WIDTH_40) |
 +                                     BIT(NL80211_CHAN_WIDTH_80) |
 +                                     BIT(NL80211_CHAN_WIDTH_160),
 +      }
  };
  
  static int __init init_mac80211_hwsim(void)
                return -EINVAL;
  
        if (channels > 1) {
 -              hwsim_if_comb.num_different_channels = channels;
 +              hwsim_if_comb[0].num_different_channels = channels;
                mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
                mac80211_hwsim_ops.cancel_hw_scan =
                        mac80211_hwsim_cancel_hw_scan;
                hw->wiphy->n_addresses = 2;
                hw->wiphy->addresses = data->addresses;
  
 -              hw->wiphy->iface_combinations = &hwsim_if_comb;
 -              hw->wiphy->n_iface_combinations = 1;
 +              hw->wiphy->iface_combinations = hwsim_if_comb;
 +              hw->wiphy->n_iface_combinations = ARRAY_SIZE(hwsim_if_comb);
  
                if (channels > 1) {
                        hw->wiphy->max_scan_ssids = 255;
                        hw->wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN;
                        hw->wiphy->max_remain_on_channel_duration = 1000;
 +                      /* For channels > 1 DFS is not allowed */
 +                      hw->wiphy->n_iface_combinations = 1;
                }
  
                INIT_DELAYED_WORK(&data->roc_done, hw_roc_done);
                            IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
                            IEEE80211_HW_AMPDU_AGGREGATION |
                            IEEE80211_HW_WANT_MONITOR_VIF |
 -                          IEEE80211_HW_QUEUE_CONTROL;
 +                          IEEE80211_HW_QUEUE_CONTROL |
 +                          IEEE80211_HW_SUPPORTS_HT_CCK_RATES;
                if (rctbl)
                        hw->flags |= IEEE80211_HW_SUPPORTS_RC_TABLE;
  
                        sband->vht_cap.cap =
                                IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 |
                                IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ |
 +                              IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ |
                                IEEE80211_VHT_CAP_RXLDPC |
                                IEEE80211_VHT_CAP_SHORT_GI_80 |
                                IEEE80211_VHT_CAP_SHORT_GI_160 |
                        break;
                case HWSIM_REGTEST_WORLD_ROAM:
                        if (i == 0) {
 -                              hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY;
 +                              hw->wiphy->regulatory_flags |=
 +                                      REGULATORY_CUSTOM_REG;
                                wiphy_apply_custom_regulatory(hw->wiphy,
                                        &hwsim_world_regdom_custom_01);
                        }
                        break;
                case HWSIM_REGTEST_CUSTOM_WORLD:
 -                      hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY;
 +                      hw->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG;
                        wiphy_apply_custom_regulatory(hw->wiphy,
                                &hwsim_world_regdom_custom_01);
                        break;
                case HWSIM_REGTEST_CUSTOM_WORLD_2:
                        if (i == 0) {
 -                              hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY;
 +                              hw->wiphy->regulatory_flags |=
 +                                      REGULATORY_CUSTOM_REG;
                                wiphy_apply_custom_regulatory(hw->wiphy,
                                        &hwsim_world_regdom_custom_01);
                        } else if (i == 1) {
 -                              hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY;
 +                              hw->wiphy->regulatory_flags |=
 +                                      REGULATORY_CUSTOM_REG;
                                wiphy_apply_custom_regulatory(hw->wiphy,
                                        &hwsim_world_regdom_custom_02);
                        }
                        break;
                case HWSIM_REGTEST_STRICT_ALL:
 -                      hw->wiphy->flags |= WIPHY_FLAG_STRICT_REGULATORY;
 +                      hw->wiphy->regulatory_flags |= REGULATORY_STRICT_REG;
                        break;
                case HWSIM_REGTEST_STRICT_FOLLOW:
                case HWSIM_REGTEST_STRICT_AND_DRIVER_REG:
                        if (i == 0)
 -                              hw->wiphy->flags |= WIPHY_FLAG_STRICT_REGULATORY;
 +                              hw->wiphy->regulatory_flags |=
 +                                      REGULATORY_STRICT_REG;
                        break;
                case HWSIM_REGTEST_ALL:
                        if (i == 0) {
 -                              hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY;
 +                              hw->wiphy->regulatory_flags |=
 +                                      REGULATORY_CUSTOM_REG;
                                wiphy_apply_custom_regulatory(hw->wiphy,
                                        &hwsim_world_regdom_custom_01);
                        } else if (i == 1) {
 -                              hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY;
 +                              hw->wiphy->regulatory_flags |=
 +                                      REGULATORY_CUSTOM_REG;
                                wiphy_apply_custom_regulatory(hw->wiphy,
                                        &hwsim_world_regdom_custom_02);
                        } else if (i == 4)
 -                              hw->wiphy->flags |= WIPHY_FLAG_STRICT_REGULATORY;
 +                              hw->wiphy->regulatory_flags |=
 +                                      REGULATORY_STRICT_REG;
                        break;
                default:
                        break;
  
                data->debugfs = debugfs_create_dir("hwsim",
                                                   hw->wiphy->debugfsdir);
 -              data->debugfs_ps = debugfs_create_file("ps", 0666,
 -                                                     data->debugfs, data,
 -                                                     &hwsim_fops_ps);
 -              data->debugfs_group = debugfs_create_file("group", 0666,
 -                                                      data->debugfs, data,
 -                                                      &hwsim_fops_group);
 +              debugfs_create_file("ps", 0666, data->debugfs, data,
 +                                  &hwsim_fops_ps);
 +              debugfs_create_file("group", 0666, data->debugfs, data,
 +                                  &hwsim_fops_group);
 +              if (channels == 1)
 +                      debugfs_create_file("dfs_simulate_radar", 0222,
 +                                          data->debugfs,
 +                                          data, &hwsim_simulate_radar);
  
                tasklet_hrtimer_init(&data->beacon_timer,
                                     mac80211_hwsim_beacon,
 -                                   CLOCK_REALTIME, HRTIMER_MODE_ABS);
 +                                   CLOCK_MONOTONIC_RAW, HRTIMER_MODE_ABS);
  
                list_add_tail(&data->list, &hwsim_radios);
        }
index 2d6f5e1721cfc5428823896a20f9963e9d843ce0,8bb8988c435cf04b0280aef69d4e9d01acc24fee..4d79761b9c87e3121dee2c61151d54a5d5002cbe
@@@ -648,7 -648,6 +648,7 @@@ mwifiex_hard_start_xmit(struct sk_buff 
        tx_info = MWIFIEX_SKB_TXCB(skb);
        tx_info->bss_num = priv->bss_num;
        tx_info->bss_type = priv->bss_type;
 +      tx_info->pkt_len = skb->len;
  
        /* Record the current time the packet was queued; used to
         * determine the amount of time the packet was queued in
@@@ -747,9 -746,10 +747,10 @@@ static struct net_device_stats *mwifiex
  }
  
  static u16
- mwifiex_netdev_select_wmm_queue(struct net_device *dev, struct sk_buff *skb)
+ mwifiex_netdev_select_wmm_queue(struct net_device *dev, struct sk_buff *skb,
+                               void *accel_priv)
  {
 -      skb->priority = cfg80211_classify8021d(skb);
 +      skb->priority = cfg80211_classify8021d(skb, NULL);
        return mwifiex_1d_to_wmm_queue[skb->priority];
  }
  
@@@ -992,8 -992,12 +993,8 @@@ int mwifiex_remove_card(struct mwifiex_
                rtnl_unlock();
        }
  
 -      priv = adapter->priv[0];
 -      if (!priv || !priv->wdev)
 -              goto exit_remove;
 -
 -      wiphy_unregister(priv->wdev->wiphy);
 -      wiphy_free(priv->wdev->wiphy);
 +      wiphy_unregister(adapter->wiphy);
 +      wiphy_free(adapter->wiphy);
  
        mwifiex_terminate_workqueue(adapter);
  
index a2a70cc70e7b0643b69a9b180907f190c25133b1,ce2a1f5f9a1e0226d933d28606823239e4467526..5c88ab19b3eba03d15ab3335e4bc7ba54140c38e
@@@ -769,7 -769,8 +769,8 @@@ struct netdev_phys_port_id 
   *        (can also return NETDEV_TX_LOCKED iff NETIF_F_LLTX)
   *    Required can not be NULL.
   *
-  * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb);
+  * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb,
+  *                         void *accel_priv);
   *    Called to decide which queue to when device supports multiple
   *    transmit queues.
   *
@@@ -990,7 -991,8 +991,8 @@@ struct net_device_ops 
        netdev_tx_t             (*ndo_start_xmit) (struct sk_buff *skb,
                                                   struct net_device *dev);
        u16                     (*ndo_select_queue)(struct net_device *dev,
-                                                   struct sk_buff *skb);
+                                                   struct sk_buff *skb,
+                                                   void *accel_priv);
        void                    (*ndo_change_rx_flags)(struct net_device *dev,
                                                       int flags);
        void                    (*ndo_set_rx_mode)(struct net_device *dev);
@@@ -1282,9 -1284,6 +1284,9 @@@ struct net_device 
  #endif
  #if IS_ENABLED(CONFIG_NET_DSA)
        struct dsa_switch_tree  *dsa_ptr;       /* dsa specific data */
 +#endif
 +#if IS_ENABLED(CONFIG_TIPC)
 +      struct tipc_bearer __rcu *tipc_ptr;     /* TIPC specific data */
  #endif
        void                    *atalk_ptr;     /* AppleTalk link       */
        struct in_device __rcu  *ip_ptr;        /* IPv4 specific data   */
        union {
                void                            *ml_priv;
                struct pcpu_lstats __percpu     *lstats; /* loopback stats */
 -              struct pcpu_tstats __percpu     *tstats; /* tunnel stats */
 +              struct pcpu_sw_netstats __percpu        *tstats;
                struct pcpu_dstats __percpu     *dstats; /* dummy stats */
                struct pcpu_vstats __percpu     *vstats; /* veth stats */
        };
        /* max exchange id for FCoE LRO by ddp */
        unsigned int            fcoe_ddp_xid;
  #endif
 -#if IS_ENABLED(CONFIG_NETPRIO_CGROUP)
 +#if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
        struct netprio_map __rcu *priomap;
  #endif
        /* phy device may attach itself for hardware timestamping */
@@@ -1532,7 -1531,8 +1534,8 @@@ static inline void netdev_for_each_tx_q
  }
  
  struct netdev_queue *netdev_pick_tx(struct net_device *dev,
-                                   struct sk_buff *skb);
+                                   struct sk_buff *skb,
+                                   void *accel_priv);
  u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb);
  
  /*
@@@ -1632,10 -1632,7 +1635,10 @@@ struct napi_gro_cb 
        int data_offset;
  
        /* This is non-zero if the packet cannot be merged with the new skb. */
 -      int flush;
 +      u16     flush;
 +
 +      /* Save the IP ID here and check when we get to the transport layer */
 +      u16     flush_id;
  
        /* Number of segments aggregated. */
        u16     count;
        /* Used in ipv6_gro_receive() */
        int     proto;
  
 +      /* used to support CHECKSUM_COMPLETE for tunneling protocols */
 +      __wsum  csum;
 +
        /* used in skb_gro_receive() slow path */
        struct sk_buff *last;
  };
@@@ -1682,7 -1676,7 +1685,7 @@@ struct offload_callbacks 
        int                     (*gso_send_check)(struct sk_buff *skb);
        struct sk_buff          **(*gro_receive)(struct sk_buff **head,
                                               struct sk_buff *skb);
 -      int                     (*gro_complete)(struct sk_buff *skb);
 +      int                     (*gro_complete)(struct sk_buff *skb, int nhoff);
  };
  
  struct packet_offload {
        struct list_head         list;
  };
  
 +/* often modified stats are per cpu, other are shared (netdev->stats) */
 +struct pcpu_sw_netstats {
 +      u64     rx_packets;
 +      u64     rx_bytes;
 +      u64     tx_packets;
 +      u64     tx_bytes;
 +      struct u64_stats_sync   syncp;
 +};
 +
  #include <linux/notifier.h>
  
  /* netdevice notifier chain. Please remember to update the rtnetlink
@@@ -1756,6 -1741,8 +1759,6 @@@ netdev_notifier_info_to_dev(const struc
        return info->dev;
  }
  
 -int call_netdevice_notifiers_info(unsigned long val, struct net_device *dev,
 -                                struct netdev_notifier_info *info);
  int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
  
  
@@@ -1822,6 -1809,7 +1825,6 @@@ void dev_remove_pack(struct packet_typ
  void __dev_remove_pack(struct packet_type *pt);
  void dev_add_offload(struct packet_offload *po);
  void dev_remove_offload(struct packet_offload *po);
 -void __dev_remove_offload(struct packet_offload *po);
  
  struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short flags,
                                        unsigned short mask);
@@@ -1834,6 -1822,7 +1837,7 @@@ int dev_close(struct net_device *dev)
  void dev_disable_lro(struct net_device *dev);
  int dev_loopback_xmit(struct sk_buff *newskb);
  int dev_queue_xmit(struct sk_buff *skb);
+ int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv);
  int register_netdevice(struct net_device *dev);
  void unregister_netdevice_queue(struct net_device *dev, struct list_head *head);
  void unregister_netdevice_many(struct list_head *head);
@@@ -1906,14 -1895,6 +1910,14 @@@ static inline void *skb_gro_network_hea
               skb_network_offset(skb);
  }
  
 +static inline void skb_gro_postpull_rcsum(struct sk_buff *skb,
 +                                      const void *start, unsigned int len)
 +{
 +      if (skb->ip_summed == CHECKSUM_COMPLETE)
 +              NAPI_GRO_CB(skb)->csum = csum_sub(NAPI_GRO_CB(skb)->csum,
 +                                                csum_partial(start, len, 0));
 +}
 +
  static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
                                  unsigned short type,
                                  const void *daddr, const void *saddr,
@@@ -2400,52 -2381,17 +2404,52 @@@ static inline int netif_copy_real_num_q
  #define DEFAULT_MAX_NUM_RSS_QUEUES    (8)
  int netif_get_num_default_rss_queues(void);
  
 -/* Use this variant when it is known for sure that it
 - * is executing from hardware interrupt context or with hardware interrupts
 - * disabled.
 - */
 -void dev_kfree_skb_irq(struct sk_buff *skb);
 +enum skb_free_reason {
 +      SKB_REASON_CONSUMED,
 +      SKB_REASON_DROPPED,
 +};
 +
 +void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason);
 +void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason);
  
 -/* Use this variant in places where it could be invoked
 - * from either hardware interrupt or other context, with hardware interrupts
 - * either disabled or enabled.
 +/*
 + * It is not allowed to call kfree_skb() or consume_skb() from hardware
 + * interrupt context or with hardware interrupts being disabled.
 + * (in_irq() || irqs_disabled())
 + *
 + * We provide four helpers that can be used in following contexts :
 + *
 + * dev_kfree_skb_irq(skb) when caller drops a packet from irq context,
 + *  replacing kfree_skb(skb)
 + *
 + * dev_consume_skb_irq(skb) when caller consumes a packet from irq context.
 + *  Typically used in place of consume_skb(skb) in TX completion path
 + *
 + * dev_kfree_skb_any(skb) when caller doesn't know its current irq context,
 + *  replacing kfree_skb(skb)
 + *
 + * dev_consume_skb_any(skb) when caller doesn't know its current irq context,
 + *  and consumed a packet. Used in place of consume_skb(skb)
   */
 -void dev_kfree_skb_any(struct sk_buff *skb);
 +static inline void dev_kfree_skb_irq(struct sk_buff *skb)
 +{
 +      __dev_kfree_skb_irq(skb, SKB_REASON_DROPPED);
 +}
 +
 +static inline void dev_consume_skb_irq(struct sk_buff *skb)
 +{
 +      __dev_kfree_skb_irq(skb, SKB_REASON_CONSUMED);
 +}
 +
 +static inline void dev_kfree_skb_any(struct sk_buff *skb)
 +{
 +      __dev_kfree_skb_any(skb, SKB_REASON_DROPPED);
 +}
 +
 +static inline void dev_consume_skb_any(struct sk_buff *skb)
 +{
 +      __dev_kfree_skb_any(skb, SKB_REASON_CONSUMED);
 +}
  
  int netif_rx(struct sk_buff *skb);
  int netif_rx_ni(struct sk_buff *skb);
@@@ -2454,8 -2400,6 +2458,8 @@@ gro_result_t napi_gro_receive(struct na
  void napi_gro_flush(struct napi_struct *napi, bool flush_old);
  struct sk_buff *napi_get_frags(struct napi_struct *napi);
  gro_result_t napi_gro_frags(struct napi_struct *napi);
 +struct packet_offload *gro_find_receive_by_type(__be16 type);
 +struct packet_offload *gro_find_complete_by_type(__be16 type);
  
  static inline void napi_free_frags(struct napi_struct *napi)
  {
@@@ -2486,7 -2430,7 +2490,7 @@@ int dev_change_carrier(struct net_devic
  int dev_get_phys_port_id(struct net_device *dev,
                         struct netdev_phys_port_id *ppid);
  int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
-                       struct netdev_queue *txq, void *accel_priv);
+                       struct netdev_queue *txq);
  int dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
  
  extern int            netdev_budget;
@@@ -2841,10 -2785,17 +2845,10 @@@ int register_netdev(struct net_device *
  void unregister_netdev(struct net_device *dev);
  
  /* General hardware address lists handling functions */
 -int __hw_addr_add_multiple(struct netdev_hw_addr_list *to_list,
 -                         struct netdev_hw_addr_list *from_list,
 -                         int addr_len, unsigned char addr_type);
 -void __hw_addr_del_multiple(struct netdev_hw_addr_list *to_list,
 -                          struct netdev_hw_addr_list *from_list,
 -                          int addr_len, unsigned char addr_type);
  int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
                   struct netdev_hw_addr_list *from_list, int addr_len);
  void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
                      struct netdev_hw_addr_list *from_list, int addr_len);
 -void __hw_addr_flush(struct netdev_hw_addr_list *list);
  void __hw_addr_init(struct netdev_hw_addr_list *list);
  
  /* Functions used for device addresses handling */
@@@ -2852,6 -2803,10 +2856,6 @@@ int dev_addr_add(struct net_device *dev
                 unsigned char addr_type);
  int dev_addr_del(struct net_device *dev, const unsigned char *addr,
                 unsigned char addr_type);
 -int dev_addr_add_multiple(struct net_device *to_dev,
 -                        struct net_device *from_dev, unsigned char addr_type);
 -int dev_addr_del_multiple(struct net_device *to_dev,
 -                        struct net_device *from_dev, unsigned char addr_type);
  void dev_addr_flush(struct net_device *dev);
  int dev_addr_init(struct net_device *dev);
  
@@@ -2898,6 -2853,7 +2902,6 @@@ extern int              weight_p
  extern int            bpf_jit_enable;
  
  bool netdev_has_upper_dev(struct net_device *dev, struct net_device *upper_dev);
 -bool netdev_has_any_upper_dev(struct net_device *dev);
  struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev,
                                                     struct list_head **iter);
  
@@@ -2926,7 -2882,6 +2930,7 @@@ void *netdev_lower_get_next_private_rcu
             priv = netdev_lower_get_next_private_rcu(dev, &(iter)))
  
  void *netdev_adjacent_get_private(struct list_head *adj_list);
 +void *netdev_lower_get_first_private_rcu(struct net_device *dev);
  struct net_device *netdev_master_upper_dev_get(struct net_device *dev);
  struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev);
  int netdev_upper_dev_link(struct net_device *dev, struct net_device *upper_dev);
@@@ -2937,6 -2892,8 +2941,6 @@@ int netdev_master_upper_dev_link_privat
                                         void *private);
  void netdev_upper_dev_unlink(struct net_device *dev,
                             struct net_device *upper_dev);
 -void *netdev_lower_dev_get_private_rcu(struct net_device *dev,
 -                                     struct net_device *lower_dev);
  void *netdev_lower_dev_get_private(struct net_device *dev,
                                   struct net_device *lower_dev);
  int skb_checksum_help(struct sk_buff *skb);
diff --combined net/core/dev.c
index 87312dcf0aa848412f200e00f5bab26433b8b080,0ce469e5ec8057c674901b404db13061fe8b7392..2bee80591f9ab5a1845a3b8c8c2ba46f9dea5ea1
@@@ -480,7 -480,7 +480,7 @@@ EXPORT_SYMBOL(dev_add_offload)
   *    and must not be freed until after all the CPU's have gone
   *    through a quiescent state.
   */
 -void __dev_remove_offload(struct packet_offload *po)
 +static void __dev_remove_offload(struct packet_offload *po)
  {
        struct list_head *head = &offload_base;
        struct packet_offload *po1;
  out:
        spin_unlock(&offload_lock);
  }
 -EXPORT_SYMBOL(__dev_remove_offload);
  
  /**
   *    dev_remove_offload       - remove packet offload handler
@@@ -1565,14 -1566,14 +1565,14 @@@ EXPORT_SYMBOL(unregister_netdevice_noti
   *    are as for raw_notifier_call_chain().
   */
  
 -int call_netdevice_notifiers_info(unsigned long val, struct net_device *dev,
 -                                struct netdev_notifier_info *info)
 +static int call_netdevice_notifiers_info(unsigned long val,
 +                                       struct net_device *dev,
 +                                       struct netdev_notifier_info *info)
  {
        ASSERT_RTNL();
        netdev_notifier_info_init(info, dev);
        return raw_notifier_call_chain(&netdev_chain, val, info);
  }
 -EXPORT_SYMBOL(call_netdevice_notifiers_info);
  
  /**
   *    call_netdevice_notifiers - call all network notifier blocks
@@@ -2144,42 -2145,30 +2144,42 @@@ void __netif_schedule(struct Qdisc *q
  }
  EXPORT_SYMBOL(__netif_schedule);
  
 -void dev_kfree_skb_irq(struct sk_buff *skb)
 +struct dev_kfree_skb_cb {
 +      enum skb_free_reason reason;
 +};
 +
 +static struct dev_kfree_skb_cb *get_kfree_skb_cb(const struct sk_buff *skb)
 +{
 +      return (struct dev_kfree_skb_cb *)skb->cb;
 +}
 +
 +void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason)
  {
 -      if (atomic_dec_and_test(&skb->users)) {
 -              struct softnet_data *sd;
 -              unsigned long flags;
 +      unsigned long flags;
  
 -              local_irq_save(flags);
 -              sd = &__get_cpu_var(softnet_data);
 -              skb->next = sd->completion_queue;
 -              sd->completion_queue = skb;
 -              raise_softirq_irqoff(NET_TX_SOFTIRQ);
 -              local_irq_restore(flags);
 +      if (likely(atomic_read(&skb->users) == 1)) {
 +              smp_rmb();
 +              atomic_set(&skb->users, 0);
 +      } else if (likely(!atomic_dec_and_test(&skb->users))) {
 +              return;
        }
 +      get_kfree_skb_cb(skb)->reason = reason;
 +      local_irq_save(flags);
 +      skb->next = __this_cpu_read(softnet_data.completion_queue);
 +      __this_cpu_write(softnet_data.completion_queue, skb);
 +      raise_softirq_irqoff(NET_TX_SOFTIRQ);
 +      local_irq_restore(flags);
  }
 -EXPORT_SYMBOL(dev_kfree_skb_irq);
 +EXPORT_SYMBOL(__dev_kfree_skb_irq);
  
 -void dev_kfree_skb_any(struct sk_buff *skb)
 +void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason)
  {
        if (in_irq() || irqs_disabled())
 -              dev_kfree_skb_irq(skb);
 +              __dev_kfree_skb_irq(skb, reason);
        else
                dev_kfree_skb(skb);
  }
 -EXPORT_SYMBOL(dev_kfree_skb_any);
 +EXPORT_SYMBOL(__dev_kfree_skb_any);
  
  
  /**
@@@ -2453,8 -2442,13 +2453,8 @@@ static void dev_gso_skb_destructor(stru
  {
        struct dev_gso_cb *cb;
  
 -      do {
 -              struct sk_buff *nskb = skb->next;
 -
 -              skb->next = nskb->next;
 -              nskb->next = NULL;
 -              kfree_skb(nskb);
 -      } while (skb->next);
 +      kfree_skb_list(skb->next);
 +      skb->next = NULL;
  
        cb = DEV_GSO_CB(skb);
        if (cb->destructor)
@@@ -2529,8 -2523,23 +2529,8 @@@ netdev_features_t netif_skb_features(st
  }
  EXPORT_SYMBOL(netif_skb_features);
  
 -/*
 - * Returns true if either:
 - *    1. skb has frag_list and the device doesn't support FRAGLIST, or
 - *    2. skb is fragmented and the device does not support SG.
 - */
 -static inline int skb_needs_linearize(struct sk_buff *skb,
 -                                    netdev_features_t features)
 -{
 -      return skb_is_nonlinear(skb) &&
 -                      ((skb_has_frag_list(skb) &&
 -                              !(features & NETIF_F_FRAGLIST)) ||
 -                      (skb_shinfo(skb)->nr_frags &&
 -                              !(features & NETIF_F_SG)));
 -}
 -
  int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
-                       struct netdev_queue *txq, void *accel_priv)
+                       struct netdev_queue *txq)
  {
        const struct net_device_ops *ops = dev->netdev_ops;
        int rc = NETDEV_TX_OK;
                        dev_queue_xmit_nit(skb, dev);
  
                skb_len = skb->len;
-               if (accel_priv)
-                       rc = ops->ndo_dfwd_start_xmit(skb, dev, accel_priv);
-               else
                        rc = ops->ndo_start_xmit(skb, dev);
  
                trace_net_dev_xmit(skb, rc, dev, skb_len);
-               if (rc == NETDEV_TX_OK && txq)
+               if (rc == NETDEV_TX_OK)
                        txq_trans_update(txq);
                return rc;
        }
@@@ -2618,10 -2624,7 +2615,7 @@@ gso
                        dev_queue_xmit_nit(nskb, dev);
  
                skb_len = nskb->len;
-               if (accel_priv)
-                       rc = ops->ndo_dfwd_start_xmit(nskb, dev, accel_priv);
-               else
-                       rc = ops->ndo_start_xmit(nskb, dev);
+               rc = ops->ndo_start_xmit(nskb, dev);
                trace_net_dev_xmit(nskb, rc, dev, skb_len);
                if (unlikely(rc != NETDEV_TX_OK)) {
                        if (rc & ~NETDEV_TX_MASK)
@@@ -2741,7 -2744,7 +2735,7 @@@ static inline int __dev_xmit_skb(struc
        return rc;
  }
  
 -#if IS_ENABLED(CONFIG_NETPRIO_CGROUP)
 +#if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
  static void skb_update_prio(struct sk_buff *skb)
  {
        struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap);
@@@ -2802,7 -2805,7 +2796,7 @@@ EXPORT_SYMBOL(dev_loopback_xmit)
   *      the BH enable code must have IRQs enabled so that it will not deadlock.
   *          --BLG
   */
- int dev_queue_xmit(struct sk_buff *skb)
+ int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
  {
        struct net_device *dev = skb->dev;
        struct netdev_queue *txq;
  
        skb_update_prio(skb);
  
-       txq = netdev_pick_tx(dev, skb);
+       txq = netdev_pick_tx(dev, skb, accel_priv);
        q = rcu_dereference_bh(txq->qdisc);
  
  #ifdef CONFIG_NET_CLS_ACT
  
                        if (!netif_xmit_stopped(txq)) {
                                __this_cpu_inc(xmit_recursion);
-                               rc = dev_hard_start_xmit(skb, dev, txq, NULL);
+                               rc = dev_hard_start_xmit(skb, dev, txq);
                                __this_cpu_dec(xmit_recursion);
                                if (dev_xmit_complete(rc)) {
                                        HARD_TX_UNLOCK(dev, txq);
        rcu_read_unlock_bh();
        return rc;
  }
+ int dev_queue_xmit(struct sk_buff *skb)
+ {
+       return __dev_queue_xmit(skb, NULL);
+ }
  EXPORT_SYMBOL(dev_queue_xmit);
  
+ int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv)
+ {
+       return __dev_queue_xmit(skb, accel_priv);
+ }
+ EXPORT_SYMBOL(dev_queue_xmit_accel);
  
  /*=======================================================================
                        Receiver routines
@@@ -3000,7 -3014,7 +3005,7 @@@ static int get_rps_cpu(struct net_devic
        }
  
        skb_reset_network_header(skb);
 -      if (!skb_get_rxhash(skb))
 +      if (!skb_get_hash(skb))
                goto done;
  
        flow_table = rcu_dereference(rxqueue->rps_flow_table);
@@@ -3145,7 -3159,7 +3150,7 @@@ static bool skb_flow_limit(struct sk_bu
        rcu_read_lock();
        fl = rcu_dereference(sd->flow_limit);
        if (fl) {
 -              new_flow = skb_get_rxhash(skb) & (fl->num_buckets - 1);
 +              new_flow = skb_get_hash(skb) & (fl->num_buckets - 1);
                old_flow = fl->history[fl->history_head];
                fl->history[fl->history_head] = new_flow;
  
@@@ -3297,10 -3311,7 +3302,10 @@@ static void net_tx_action(struct softir
                        clist = clist->next;
  
                        WARN_ON(atomic_read(&skb->users));
 -                      trace_kfree_skb(skb, net_tx_action);
 +                      if (likely(get_kfree_skb_cb(skb)->reason == SKB_REASON_CONSUMED))
 +                              trace_consume_skb(skb);
 +                      else
 +                              trace_kfree_skb(skb, net_tx_action);
                        __kfree_skb(skb);
                }
        }
@@@ -3746,7 -3757,7 +3751,7 @@@ static int napi_gro_complete(struct sk_
                if (ptype->type != type || !ptype->callbacks.gro_complete)
                        continue;
  
 -              err = ptype->callbacks.gro_complete(skb);
 +              err = ptype->callbacks.gro_complete(skb, 0);
                break;
        }
        rcu_read_unlock();
@@@ -3812,23 -3823,6 +3817,23 @@@ static void gro_list_prepare(struct nap
        }
  }
  
 +static void skb_gro_reset_offset(struct sk_buff *skb)
 +{
 +      const struct skb_shared_info *pinfo = skb_shinfo(skb);
 +      const skb_frag_t *frag0 = &pinfo->frags[0];
 +
 +      NAPI_GRO_CB(skb)->data_offset = 0;
 +      NAPI_GRO_CB(skb)->frag0 = NULL;
 +      NAPI_GRO_CB(skb)->frag0_len = 0;
 +
 +      if (skb_mac_header(skb) == skb_tail_pointer(skb) &&
 +          pinfo->nr_frags &&
 +          !PageHighMem(skb_frag_page(frag0))) {
 +              NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
 +              NAPI_GRO_CB(skb)->frag0_len = skb_frag_size(frag0);
 +      }
 +}
 +
  static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
  {
        struct sk_buff **pp = NULL;
        if (skb_is_gso(skb) || skb_has_frag_list(skb))
                goto normal;
  
 +      skb_gro_reset_offset(skb);
        gro_list_prepare(napi, skb);
 +      NAPI_GRO_CB(skb)->csum = skb->csum; /* Needed for CHECKSUM_COMPLETE */
  
        rcu_read_lock();
        list_for_each_entry_rcu(ptype, head, list) {
        if (same_flow)
                goto ok;
  
 -      if (NAPI_GRO_CB(skb)->flush || napi->gro_count >= MAX_GRO_SKBS)
 +      if (NAPI_GRO_CB(skb)->flush)
                goto normal;
  
 -      napi->gro_count++;
 +      if (unlikely(napi->gro_count >= MAX_GRO_SKBS)) {
 +              struct sk_buff *nskb = napi->gro_list;
 +
 +              /* locate the end of the list to select the 'oldest' flow */
 +              while (nskb->next) {
 +                      pp = &nskb->next;
 +                      nskb = *pp;
 +              }
 +              *pp = NULL;
 +              nskb->next = NULL;
 +              napi_gro_complete(nskb);
 +      } else {
 +              napi->gro_count++;
 +      }
        NAPI_GRO_CB(skb)->count = 1;
        NAPI_GRO_CB(skb)->age = jiffies;
        skb_shinfo(skb)->gso_size = skb_gro_len(skb);
@@@ -3936,31 -3915,6 +3941,31 @@@ normal
        goto pull;
  }
  
 +struct packet_offload *gro_find_receive_by_type(__be16 type)
 +{
 +      struct list_head *offload_head = &offload_base;
 +      struct packet_offload *ptype;
 +
 +      list_for_each_entry_rcu(ptype, offload_head, list) {
 +              if (ptype->type != type || !ptype->callbacks.gro_receive)
 +                      continue;
 +              return ptype;
 +      }
 +      return NULL;
 +}
 +
 +struct packet_offload *gro_find_complete_by_type(__be16 type)
 +{
 +      struct list_head *offload_head = &offload_base;
 +      struct packet_offload *ptype;
 +
 +      list_for_each_entry_rcu(ptype, offload_head, list) {
 +              if (ptype->type != type || !ptype->callbacks.gro_complete)
 +                      continue;
 +              return ptype;
 +      }
 +      return NULL;
 +}
  
  static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
  {
        return ret;
  }
  
 -static void skb_gro_reset_offset(struct sk_buff *skb)
 -{
 -      const struct skb_shared_info *pinfo = skb_shinfo(skb);
 -      const skb_frag_t *frag0 = &pinfo->frags[0];
 -
 -      NAPI_GRO_CB(skb)->data_offset = 0;
 -      NAPI_GRO_CB(skb)->frag0 = NULL;
 -      NAPI_GRO_CB(skb)->frag0_len = 0;
 -
 -      if (skb_mac_header(skb) == skb_tail_pointer(skb) &&
 -          pinfo->nr_frags &&
 -          !PageHighMem(skb_frag_page(frag0))) {
 -              NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
 -              NAPI_GRO_CB(skb)->frag0_len = skb_frag_size(frag0);
 -      }
 -}
 -
  gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
  {
 -      skb_gro_reset_offset(skb);
 -
        return napi_skb_finish(dev_gro_receive(napi, skb), skb);
  }
  EXPORT_SYMBOL(napi_gro_receive);
@@@ -4013,7 -3986,8 +4018,7 @@@ struct sk_buff *napi_get_frags(struct n
  
        if (!skb) {
                skb = netdev_alloc_skb_ip_align(napi->dev, GRO_MAX_HEAD);
 -              if (skb)
 -                      napi->skb = skb;
 +              napi->skb = skb;
        }
        return skb;
  }
@@@ -4024,7 -3998,12 +4029,7 @@@ static gro_result_t napi_frags_finish(s
  {
        switch (ret) {
        case GRO_NORMAL:
 -      case GRO_HELD:
 -              skb->protocol = eth_type_trans(skb, skb->dev);
 -
 -              if (ret == GRO_HELD)
 -                      skb_gro_pull(skb, -ETH_HLEN);
 -              else if (netif_receive_skb(skb))
 +              if (netif_receive_skb(skb))
                        ret = GRO_DROP;
                break;
  
                napi_reuse_skb(napi, skb);
                break;
  
 +      case GRO_HELD:
        case GRO_MERGED:
                break;
        }
  static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
  {
        struct sk_buff *skb = napi->skb;
 -      struct ethhdr *eth;
 -      unsigned int hlen;
 -      unsigned int off;
  
        napi->skb = NULL;
  
 -      skb_reset_mac_header(skb);
 -      skb_gro_reset_offset(skb);
 -
 -      off = skb_gro_offset(skb);
 -      hlen = off + sizeof(*eth);
 -      eth = skb_gro_header_fast(skb, off);
 -      if (skb_gro_header_hard(skb, hlen)) {
 -              eth = skb_gro_header_slow(skb, hlen, off);
 -              if (unlikely(!eth)) {
 -                      napi_reuse_skb(napi, skb);
 -                      skb = NULL;
 -                      goto out;
 -              }
 +      if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr)))) {
 +              napi_reuse_skb(napi, skb);
 +              return NULL;
        }
 +      skb->protocol = eth_type_trans(skb, skb->dev);
  
 -      skb_gro_pull(skb, sizeof(*eth));
 -
 -      /*
 -       * This works because the only protocols we care about don't require
 -       * special handling.  We'll fix it up properly at the end.
 -       */
 -      skb->protocol = eth->h_proto;
 -
 -out:
        return skb;
  }
  
@@@ -4068,7 -4067,7 +4073,7 @@@ gro_result_t napi_gro_frags(struct napi
  EXPORT_SYMBOL(napi_gro_frags);
  
  /*
 - * net_rps_action sends any pending IPI's for rps.
 + * net_rps_action_and_irq_enable sends any pending IPI's for rps.
   * Note: called with local irq disabled, but exits with local irq enabled.
   */
  static void net_rps_action_and_irq_enable(struct softnet_data *sd)
@@@ -4273,10 -4272,17 +4278,10 @@@ EXPORT_SYMBOL(netif_napi_add)
  
  void netif_napi_del(struct napi_struct *napi)
  {
 -      struct sk_buff *skb, *next;
 -
        list_del_init(&napi->dev_list);
        napi_free_frags(napi);
  
 -      for (skb = napi->gro_list; skb; skb = next) {
 -              next = skb->next;
 -              skb->next = NULL;
 -              kfree_skb(skb);
 -      }
 -
 +      kfree_skb_list(napi->gro_list);
        napi->gro_list = NULL;
        napi->gro_count = 0;
  }
@@@ -4393,6 -4399,19 +4398,6 @@@ struct netdev_adjacent 
        struct rcu_head rcu;
  };
  
 -static struct netdev_adjacent *__netdev_find_adj_rcu(struct net_device *dev,
 -                                                   struct net_device *adj_dev,
 -                                                   struct list_head *adj_list)
 -{
 -      struct netdev_adjacent *adj;
 -
 -      list_for_each_entry_rcu(adj, adj_list, list) {
 -              if (adj->dev == adj_dev)
 -                      return adj;
 -      }
 -      return NULL;
 -}
 -
  static struct netdev_adjacent *__netdev_find_adj(struct net_device *dev,
                                                 struct net_device *adj_dev,
                                                 struct list_head *adj_list)
@@@ -4431,12 -4450,13 +4436,12 @@@ EXPORT_SYMBOL(netdev_has_upper_dev)
   * Find out if a device is linked to an upper device and return true in case
   * it is. The caller must hold the RTNL lock.
   */
 -bool netdev_has_any_upper_dev(struct net_device *dev)
 +static bool netdev_has_any_upper_dev(struct net_device *dev)
  {
        ASSERT_RTNL();
  
        return !list_empty(&dev->all_adj_list.upper);
  }
 -EXPORT_SYMBOL(netdev_has_any_upper_dev);
  
  /**
   * netdev_master_upper_dev_get - Get master upper device
@@@ -4555,27 -4575,6 +4560,27 @@@ void *netdev_lower_get_next_private_rcu
  }
  EXPORT_SYMBOL(netdev_lower_get_next_private_rcu);
  
 +/**
 + * netdev_lower_get_first_private_rcu - Get the first ->private from the
 + *                                   lower neighbour list, RCU
 + *                                   variant
 + * @dev: device
 + *
 + * Gets the first netdev_adjacent->private from the dev's lower neighbour
 + * list. The caller must hold RCU read lock.
 + */
 +void *netdev_lower_get_first_private_rcu(struct net_device *dev)
 +{
 +      struct netdev_adjacent *lower;
 +
 +      lower = list_first_or_null_rcu(&dev->adj_list.lower,
 +                      struct netdev_adjacent, list);
 +      if (lower)
 +              return lower->private;
 +      return NULL;
 +}
 +EXPORT_SYMBOL(netdev_lower_get_first_private_rcu);
 +
  /**
   * netdev_master_upper_dev_get_rcu - Get master upper device
   * @dev: device
@@@ -4668,9 -4667,9 +4673,9 @@@ free_adj
        return ret;
  }
  
 -void __netdev_adjacent_dev_remove(struct net_device *dev,
 -                                struct net_device *adj_dev,
 -                                struct list_head *dev_list)
 +static void __netdev_adjacent_dev_remove(struct net_device *dev,
 +                                       struct net_device *adj_dev,
 +                                       struct list_head *dev_list)
  {
        struct netdev_adjacent *adj;
        char linkname[IFNAMSIZ+7];
        kfree_rcu(adj, rcu);
  }
  
 -int __netdev_adjacent_dev_link_lists(struct net_device *dev,
 -                                   struct net_device *upper_dev,
 -                                   struct list_head *up_list,
 -                                   struct list_head *down_list,
 -                                   void *private, bool master)
 +static int __netdev_adjacent_dev_link_lists(struct net_device *dev,
 +                                          struct net_device *upper_dev,
 +                                          struct list_head *up_list,
 +                                          struct list_head *down_list,
 +                                          void *private, bool master)
  {
        int ret;
  
        return 0;
  }
  
 -int __netdev_adjacent_dev_link(struct net_device *dev,
 -                             struct net_device *upper_dev)
 +static int __netdev_adjacent_dev_link(struct net_device *dev,
 +                                    struct net_device *upper_dev)
  {
        return __netdev_adjacent_dev_link_lists(dev, upper_dev,
                                                &dev->all_adj_list.upper,
                                                NULL, false);
  }
  
 -void __netdev_adjacent_dev_unlink_lists(struct net_device *dev,
 -                                      struct net_device *upper_dev,
 -                                      struct list_head *up_list,
 -                                      struct list_head *down_list)
 +static void __netdev_adjacent_dev_unlink_lists(struct net_device *dev,
 +                                             struct net_device *upper_dev,
 +                                             struct list_head *up_list,
 +                                             struct list_head *down_list)
  {
        __netdev_adjacent_dev_remove(dev, upper_dev, up_list);
        __netdev_adjacent_dev_remove(upper_dev, dev, down_list);
  }
  
 -void __netdev_adjacent_dev_unlink(struct net_device *dev,
 -                                struct net_device *upper_dev)
 +static void __netdev_adjacent_dev_unlink(struct net_device *dev,
 +                                       struct net_device *upper_dev)
  {
        __netdev_adjacent_dev_unlink_lists(dev, upper_dev,
                                           &dev->all_adj_list.upper,
                                           &upper_dev->all_adj_list.lower);
  }
  
 -int __netdev_adjacent_dev_link_neighbour(struct net_device *dev,
 -                                       struct net_device *upper_dev,
 -                                       void *private, bool master)
 +static int __netdev_adjacent_dev_link_neighbour(struct net_device *dev,
 +                                              struct net_device *upper_dev,
 +                                              void *private, bool master)
  {
        int ret = __netdev_adjacent_dev_link(dev, upper_dev);
  
        return 0;
  }
  
 -void __netdev_adjacent_dev_unlink_neighbour(struct net_device *dev,
 -                                          struct net_device *upper_dev)
 +static void __netdev_adjacent_dev_unlink_neighbour(struct net_device *dev,
 +                                                 struct net_device *upper_dev)
  {
        __netdev_adjacent_dev_unlink(dev, upper_dev);
        __netdev_adjacent_dev_unlink_lists(dev, upper_dev,
@@@ -4968,6 -4967,21 +4973,6 @@@ void netdev_upper_dev_unlink(struct net
  }
  EXPORT_SYMBOL(netdev_upper_dev_unlink);
  
 -void *netdev_lower_dev_get_private_rcu(struct net_device *dev,
 -                                     struct net_device *lower_dev)
 -{
 -      struct netdev_adjacent *lower;
 -
 -      if (!lower_dev)
 -              return NULL;
 -      lower = __netdev_find_adj_rcu(dev, lower_dev, &dev->adj_list.lower);
 -      if (!lower)
 -              return NULL;
 -
 -      return lower->private;
 -}
 -EXPORT_SYMBOL(netdev_lower_dev_get_private_rcu);
 -
  void *netdev_lower_dev_get_private(struct net_device *dev,
                                   struct net_device *lower_dev)
  {
@@@ -5300,17 -5314,6 +5305,17 @@@ int dev_change_flags(struct net_device 
  }
  EXPORT_SYMBOL(dev_change_flags);
  
 +static int __dev_set_mtu(struct net_device *dev, int new_mtu)
 +{
 +      const struct net_device_ops *ops = dev->netdev_ops;
 +
 +      if (ops->ndo_change_mtu)
 +              return ops->ndo_change_mtu(dev, new_mtu);
 +
 +      dev->mtu = new_mtu;
 +      return 0;
 +}
 +
  /**
   *    dev_set_mtu - Change maximum transfer unit
   *    @dev: device
   */
  int dev_set_mtu(struct net_device *dev, int new_mtu)
  {
 -      const struct net_device_ops *ops = dev->netdev_ops;
 -      int err;
 +      int err, orig_mtu;
  
        if (new_mtu == dev->mtu)
                return 0;
        if (!netif_device_present(dev))
                return -ENODEV;
  
 -      err = 0;
 -      if (ops->ndo_change_mtu)
 -              err = ops->ndo_change_mtu(dev, new_mtu);
 -      else
 -              dev->mtu = new_mtu;
 +      orig_mtu = dev->mtu;
 +      err = __dev_set_mtu(dev, new_mtu);
  
 -      if (!err)
 -              call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
 +      if (!err) {
 +              err = call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
 +              err = notifier_to_errno(err);
 +              if (err) {
 +                      /* setting mtu back and notifying everyone again,
 +                       * so that they have a chance to revert changes.
 +                       */
 +                      __dev_set_mtu(dev, orig_mtu);
 +                      call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
 +              }
 +      }
        return err;
  }
  EXPORT_SYMBOL(dev_set_mtu);
@@@ -5838,8 -5836,13 +5843,8 @@@ int register_netdevice(struct net_devic
        dev->features |= NETIF_F_SOFT_FEATURES;
        dev->wanted_features = dev->features & dev->hw_features;
  
 -      /* Turn on no cache copy if HW is doing checksum */
        if (!(dev->flags & IFF_LOOPBACK)) {
                dev->hw_features |= NETIF_F_NOCACHE_COPY;
 -              if (dev->features & NETIF_F_ALL_CSUM) {
 -                      dev->wanted_features |= NETIF_F_NOCACHE_COPY;
 -                      dev->features |= NETIF_F_NOCACHE_COPY;
 -              }
        }
  
        /* Make NETIF_F_HIGHDMA inheritable to VLAN devices.
index b324bfa3485cfa992160ab43a050ebe15b0d7e59,2fc5beaf578349cd543621a460bf2bb4eaa9d221..87577d447554336b33067ab6e2373c6bdd25b93d
@@@ -202,12 -202,12 +202,12 @@@ static __always_inline u32 __flow_hash_
  }
  
  /*
 - * __skb_get_rxhash: calculate a flow hash based on src/dst addresses
 + * __skb_get_hash: calculate a flow hash based on src/dst addresses
   * and src/dst port numbers.  Sets rxhash in skb to non-zero hash value
   * on success, zero indicates no valid hash.  Also, sets l4_rxhash in skb
   * if hash is a canonical 4-tuple hash over transport ports.
   */
 -void __skb_get_rxhash(struct sk_buff *skb)
 +void __skb_get_hash(struct sk_buff *skb)
  {
        struct flow_keys keys;
        u32 hash;
  
        skb->rxhash = hash;
  }
 -EXPORT_SYMBOL(__skb_get_rxhash);
 +EXPORT_SYMBOL(__skb_get_hash);
  
  /*
   * Returns a Tx hash based on the given packet descriptor a Tx queues' number
@@@ -395,17 -395,21 +395,21 @@@ u16 __netdev_pick_tx(struct net_device 
  EXPORT_SYMBOL(__netdev_pick_tx);
  
  struct netdev_queue *netdev_pick_tx(struct net_device *dev,
-                                   struct sk_buff *skb)
+                                   struct sk_buff *skb,
+                                   void *accel_priv)
  {
        int queue_index = 0;
  
        if (dev->real_num_tx_queues != 1) {
                const struct net_device_ops *ops = dev->netdev_ops;
                if (ops->ndo_select_queue)
-                       queue_index = ops->ndo_select_queue(dev, skb);
+                       queue_index = ops->ndo_select_queue(dev, skb,
+                                                           accel_priv);
                else
                        queue_index = __netdev_pick_tx(dev, skb);
-               queue_index = dev_cap_txqueue(dev, queue_index);
+               if (!accel_priv)
+                       queue_index = dev_cap_txqueue(dev, queue_index);
        }
  
        skb_set_queue_mapping(skb, queue_index);
diff --combined net/ipv6/addrconf.c
index a9fa6c1feed5db4c4ee3946d163ddc92ae0288de,abe46a4228ce96655d1aacec77052a89e70cfdd4..b0cd122e5fb426b00466fd21af94b4d9ad72fc4f
@@@ -442,8 -442,6 +442,8 @@@ static int inet6_netconf_msgsize_devcon
        if (type == -1 || type == NETCONFA_MC_FORWARDING)
                size += nla_total_size(4);
  #endif
 +      if (type == -1 || type == NETCONFA_PROXY_NEIGH)
 +              size += nla_total_size(4);
  
        return size;
  }
@@@ -477,10 -475,6 +477,10 @@@ static int inet6_netconf_fill_devconf(s
                        devconf->mc_forwarding) < 0)
                goto nla_put_failure;
  #endif
 +      if ((type == -1 || type == NETCONFA_PROXY_NEIGH) &&
 +          nla_put_s32(skb, NETCONFA_PROXY_NEIGH, devconf->proxy_ndp) < 0)
 +              goto nla_put_failure;
 +
        return nlmsg_end(skb, nlh);
  
  nla_put_failure:
@@@ -515,7 -509,6 +515,7 @@@ errout
  static const struct nla_policy devconf_ipv6_policy[NETCONFA_MAX+1] = {
        [NETCONFA_IFINDEX]      = { .len = sizeof(int) },
        [NETCONFA_FORWARDING]   = { .len = sizeof(int) },
 +      [NETCONFA_PROXY_NEIGH]  = { .len = sizeof(int) },
  };
  
  static int inet6_netconf_get_devconf(struct sk_buff *in_skb,
@@@ -841,8 -834,6 +841,8 @@@ ipv6_add_addr(struct inet6_dev *idev, c
                goto out;
        }
  
 +      neigh_parms_data_state_setall(idev->nd_parms);
 +
        ifa->addr = *addr;
        if (peer_addr)
                ifa->peer_addr = *peer_addr;
@@@ -995,9 -986,12 +995,9 @@@ static void ipv6_del_addr(struct inet6_
         * --yoshfuji
         */
        if ((ifp->flags & IFA_F_PERMANENT) && onlink < 1) {
 -              struct in6_addr prefix;
                struct rt6_info *rt;
  
 -              ipv6_addr_prefix(&prefix, &ifp->addr, ifp->prefix_len);
 -
 -              rt = addrconf_get_prefix_route(&prefix,
 +              rt = addrconf_get_prefix_route(&ifp->addr,
                                               ifp->prefix_len,
                                               ifp->idev->dev,
                                               0, RTF_GATEWAY | RTF_DEFAULT);
@@@ -1030,7 -1024,7 +1030,7 @@@ static int ipv6_create_tempaddr(struct 
        u32 addr_flags;
        unsigned long now = jiffies;
  
 -      write_lock(&idev->lock);
 +      write_lock_bh(&idev->lock);
        if (ift) {
                spin_lock_bh(&ift->lock);
                memcpy(&addr.s6_addr[8], &ift->addr.s6_addr[8], 8);
  retry:
        in6_dev_hold(idev);
        if (idev->cnf.use_tempaddr <= 0) {
 -              write_unlock(&idev->lock);
 +              write_unlock_bh(&idev->lock);
                pr_info("%s: use_tempaddr is disabled\n", __func__);
                in6_dev_put(idev);
                ret = -1;
        if (ifp->regen_count++ >= idev->cnf.regen_max_retry) {
                idev->cnf.use_tempaddr = -1;    /*XXX*/
                spin_unlock_bh(&ifp->lock);
 -              write_unlock(&idev->lock);
 +              write_unlock_bh(&idev->lock);
                pr_warn("%s: regeneration time exceeded - disabled temporary address support\n",
                        __func__);
                in6_dev_put(idev);
  
        regen_advance = idev->cnf.regen_max_retry *
                        idev->cnf.dad_transmits *
 -                      idev->nd_parms->retrans_time / HZ;
 -      write_unlock(&idev->lock);
 +                      NEIGH_VAR(idev->nd_parms, RETRANS_TIME) / HZ;
 +      write_unlock_bh(&idev->lock);
  
        /* A temporary address is created only if this calculated Preferred
         * Lifetime is greater than REGEN_ADVANCE time units.  In particular,
                in6_dev_put(idev);
                pr_info("%s: retry temporary address regeneration\n", __func__);
                tmpaddr = &addr;
 -              write_lock(&idev->lock);
 +              write_lock_bh(&idev->lock);
                goto retry;
        }
  
@@@ -1413,7 -1407,7 +1413,7 @@@ try_nextdev
  EXPORT_SYMBOL(ipv6_dev_get_saddr);
  
  int __ipv6_get_lladdr(struct inet6_dev *idev, struct in6_addr *addr,
 -                    unsigned char banned_flags)
 +                    u32 banned_flags)
  {
        struct inet6_ifaddr *ifp;
        int err = -EADDRNOTAVAIL;
  }
  
  int ipv6_get_lladdr(struct net_device *dev, struct in6_addr *addr,
 -                  unsigned char banned_flags)
 +                  u32 banned_flags)
  {
        struct inet6_dev *idev;
        int err = -EADDRNOTAVAIL;
@@@ -1822,7 -1816,6 +1822,7 @@@ static int ipv6_generate_eui64(u8 *eui
                return addrconf_ifid_sit(eui, dev);
        case ARPHRD_IPGRE:
                return addrconf_ifid_gre(eui, dev);
 +      case ARPHRD_6LOWPAN:
        case ARPHRD_IEEE802154:
                return addrconf_ifid_eui64(eui, dev);
        case ARPHRD_IEEE1394:
@@@ -1895,8 -1888,7 +1895,8 @@@ static void ipv6_regen_rndid(unsigned l
  
        expires = jiffies +
                idev->cnf.temp_prefered_lft * HZ -
 -              idev->cnf.regen_max_retry * idev->cnf.dad_transmits * idev->nd_parms->retrans_time -
 +              idev->cnf.regen_max_retry * idev->cnf.dad_transmits *
 +              NEIGH_VAR(idev->nd_parms, RETRANS_TIME) -
                idev->cnf.max_desync_factor * HZ;
        if (time_before(expires, jiffies)) {
                pr_warn("%s: too short regeneration interval; timer disabled for %s\n",
@@@ -2024,73 -2016,6 +2024,73 @@@ static struct inet6_dev *addrconf_add_d
        return idev;
  }
  
 +static void manage_tempaddrs(struct inet6_dev *idev,
 +                           struct inet6_ifaddr *ifp,
 +                           __u32 valid_lft, __u32 prefered_lft,
 +                           bool create, unsigned long now)
 +{
 +      u32 flags;
 +      struct inet6_ifaddr *ift;
 +
 +      read_lock_bh(&idev->lock);
 +      /* update all temporary addresses in the list */
 +      list_for_each_entry(ift, &idev->tempaddr_list, tmp_list) {
 +              int age, max_valid, max_prefered;
 +
 +              if (ifp != ift->ifpub)
 +                      continue;
 +
 +              /* RFC 4941 section 3.3:
 +               * If a received option will extend the lifetime of a public
 +               * address, the lifetimes of temporary addresses should
 +               * be extended, subject to the overall constraint that no
 +               * temporary addresses should ever remain "valid" or "preferred"
 +               * for a time longer than (TEMP_VALID_LIFETIME) or
 +               * (TEMP_PREFERRED_LIFETIME - DESYNC_FACTOR), respectively.
 +               */
 +              age = (now - ift->cstamp) / HZ;
 +              max_valid = idev->cnf.temp_valid_lft - age;
 +              if (max_valid < 0)
 +                      max_valid = 0;
 +
 +              max_prefered = idev->cnf.temp_prefered_lft -
 +                             idev->cnf.max_desync_factor - age;
 +              if (max_prefered < 0)
 +                      max_prefered = 0;
 +
 +              if (valid_lft > max_valid)
 +                      valid_lft = max_valid;
 +
 +              if (prefered_lft > max_prefered)
 +                      prefered_lft = max_prefered;
 +
 +              spin_lock(&ift->lock);
 +              flags = ift->flags;
 +              ift->valid_lft = valid_lft;
 +              ift->prefered_lft = prefered_lft;
 +              ift->tstamp = now;
 +              if (prefered_lft > 0)
 +                      ift->flags &= ~IFA_F_DEPRECATED;
 +
 +              spin_unlock(&ift->lock);
 +              if (!(flags&IFA_F_TENTATIVE))
 +                      ipv6_ifa_notify(0, ift);
 +      }
 +
 +      if ((create || list_empty(&idev->tempaddr_list)) &&
 +          idev->cnf.use_tempaddr > 0) {
 +              /* When a new public address is created as described
 +               * in [ADDRCONF], also create a new temporary address.
 +               * Also create a temporary address if it's enabled but
 +               * no temporary address currently exists.
 +               */
 +              read_unlock_bh(&idev->lock);
 +              ipv6_create_tempaddr(ifp, NULL);
 +      } else {
 +              read_unlock_bh(&idev->lock);
 +      }
 +}
 +
  void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len, bool sllao)
  {
        struct prefix_info *pinfo;
@@@ -2245,7 -2170,6 +2245,7 @@@ ok
                                return;
                        }
  
 +                      ifp->flags |= IFA_F_MANAGETEMPADDR;
                        update_lft = 0;
                        create = 1;
                        ifp->cstamp = jiffies;
                }
  
                if (ifp) {
 -                      int flags;
 +                      u32 flags;
                        unsigned long now;
 -                      struct inet6_ifaddr *ift;
                        u32 stored_lft;
  
                        /* update lifetime (RFC2462 5.5.3 e) */
                        } else
                                spin_unlock(&ifp->lock);
  
 -                      read_lock_bh(&in6_dev->lock);
 -                      /* update all temporary addresses in the list */
 -                      list_for_each_entry(ift, &in6_dev->tempaddr_list,
 -                                          tmp_list) {
 -                              int age, max_valid, max_prefered;
 -
 -                              if (ifp != ift->ifpub)
 -                                      continue;
 -
 -                              /*
 -                               * RFC 4941 section 3.3:
 -                               * If a received option will extend the lifetime
 -                               * of a public address, the lifetimes of
 -                               * temporary addresses should be extended,
 -                               * subject to the overall constraint that no
 -                               * temporary addresses should ever remain
 -                               * "valid" or "preferred" for a time longer than
 -                               * (TEMP_VALID_LIFETIME) or
 -                               * (TEMP_PREFERRED_LIFETIME - DESYNC_FACTOR),
 -                               * respectively.
 -                               */
 -                              age = (now - ift->cstamp) / HZ;
 -                              max_valid = in6_dev->cnf.temp_valid_lft - age;
 -                              if (max_valid < 0)
 -                                      max_valid = 0;
 -
 -                              max_prefered = in6_dev->cnf.temp_prefered_lft -
 -                                             in6_dev->cnf.max_desync_factor -
 -                                             age;
 -                              if (max_prefered < 0)
 -                                      max_prefered = 0;
 -
 -                              if (valid_lft > max_valid)
 -                                      valid_lft = max_valid;
 -
 -                              if (prefered_lft > max_prefered)
 -                                      prefered_lft = max_prefered;
 -
 -                              spin_lock(&ift->lock);
 -                              flags = ift->flags;
 -                              ift->valid_lft = valid_lft;
 -                              ift->prefered_lft = prefered_lft;
 -                              ift->tstamp = now;
 -                              if (prefered_lft > 0)
 -                                      ift->flags &= ~IFA_F_DEPRECATED;
 -
 -                              spin_unlock(&ift->lock);
 -                              if (!(flags&IFA_F_TENTATIVE))
 -                                      ipv6_ifa_notify(0, ift);
 -                      }
 -
 -                      if ((create || list_empty(&in6_dev->tempaddr_list)) && in6_dev->cnf.use_tempaddr > 0) {
 -                              /*
 -                               * When a new public address is created as
 -                               * described in [ADDRCONF], also create a new
 -                               * temporary address. Also create a temporary
 -                               * address if it's enabled but no temporary
 -                               * address currently exists.
 -                               */
 -                              read_unlock_bh(&in6_dev->lock);
 -                              ipv6_create_tempaddr(ifp, NULL);
 -                      } else {
 -                              read_unlock_bh(&in6_dev->lock);
 -                      }
 +                      manage_tempaddrs(in6_dev, ifp, valid_lft, prefered_lft,
 +                                       create, now);
  
                        in6_ifa_put(ifp);
                        addrconf_verify(0);
@@@ -2376,11 -2363,10 +2376,11 @@@ err_exit
  /*
   *    Manual configuration of address on an interface
   */
 -static int inet6_addr_add(struct net *net, int ifindex, const struct in6_addr *pfx,
 +static int inet6_addr_add(struct net *net, int ifindex,
 +                        const struct in6_addr *pfx,
                          const struct in6_addr *peer_pfx,
 -                        unsigned int plen, __u8 ifa_flags, __u32 prefered_lft,
 -                        __u32 valid_lft)
 +                        unsigned int plen, __u32 ifa_flags,
 +                        __u32 prefered_lft, __u32 valid_lft)
  {
        struct inet6_ifaddr *ifp;
        struct inet6_dev *idev;
        if (!valid_lft || prefered_lft > valid_lft)
                return -EINVAL;
  
 +      if (ifa_flags & IFA_F_MANAGETEMPADDR && plen != 64)
 +              return -EINVAL;
 +
        dev = __dev_get_by_index(net, ifindex);
        if (!dev)
                return -ENODEV;
                 * manually configured addresses
                 */
                addrconf_dad_start(ifp);
 +              if (ifa_flags & IFA_F_MANAGETEMPADDR)
 +                      manage_tempaddrs(idev, ifp, valid_lft, prefered_lft,
 +                                       true, jiffies);
                in6_ifa_put(ifp);
                addrconf_verify(0);
                return 0;
@@@ -2529,7 -2509,8 +2529,8 @@@ static void add_addr(struct inet6_dev *
        struct inet6_ifaddr *ifp;
  
        ifp = ipv6_add_addr(idev, addr, NULL, plen,
-                           scope, IFA_F_PERMANENT, 0, 0);
+                           scope, IFA_F_PERMANENT,
+                           INFINITY_LIFE_TIME, INFINITY_LIFE_TIME);
        if (!IS_ERR(ifp)) {
                spin_lock_bh(&ifp->lock);
                ifp->flags &= ~IFA_F_TENTATIVE;
@@@ -2657,7 -2638,8 +2658,8 @@@ static void addrconf_add_linklocal(stru
  #endif
  
  
-       ifp = ipv6_add_addr(idev, addr, NULL, 64, IFA_LINK, addr_flags, 0, 0);
+       ifp = ipv6_add_addr(idev, addr, NULL, 64, IFA_LINK, addr_flags,
+                           INFINITY_LIFE_TIME, INFINITY_LIFE_TIME);
        if (!IS_ERR(ifp)) {
                addrconf_prefix_route(&ifp->addr, ifp->prefix_len, idev->dev, 0, 0);
                addrconf_dad_start(ifp);
@@@ -2678,8 -2660,7 +2680,8 @@@ static void addrconf_dev_config(struct 
            (dev->type != ARPHRD_INFINIBAND) &&
            (dev->type != ARPHRD_IEEE802154) &&
            (dev->type != ARPHRD_IEEE1394) &&
 -          (dev->type != ARPHRD_TUNNEL6)) {
 +          (dev->type != ARPHRD_TUNNEL6) &&
 +          (dev->type != ARPHRD_6LOWPAN)) {
                /* Alas, we support only Ethernet autoconfiguration. */
                return;
        }
@@@ -3197,8 -3178,7 +3199,8 @@@ static void addrconf_dad_timer(unsigne
        }
  
        ifp->dad_probes--;
 -      addrconf_mod_dad_timer(ifp, ifp->idev->nd_parms->retrans_time);
 +      addrconf_mod_dad_timer(ifp,
 +                             NEIGH_VAR(ifp->idev->nd_parms, RETRANS_TIME));
        spin_unlock(&ifp->lock);
        write_unlock(&idev->lock);
  
@@@ -3378,7 -3358,7 +3380,7 @@@ static int if6_seq_show(struct seq_fil
                   ifp->idev->dev->ifindex,
                   ifp->prefix_len,
                   ifp->scope,
 -                 ifp->flags,
 +                 (u8) ifp->flags,
                   ifp->idev->dev->name);
        return 0;
  }
@@@ -3525,7 -3505,7 +3527,7 @@@ restart
                                   !(ifp->flags&IFA_F_TENTATIVE)) {
                                unsigned long regen_advance = ifp->idev->cnf.regen_max_retry *
                                        ifp->idev->cnf.dad_transmits *
 -                                      ifp->idev->nd_parms->retrans_time / HZ;
 +                                      NEIGH_VAR(ifp->idev->nd_parms, RETRANS_TIME) / HZ;
  
                                if (age >= ifp->prefered_lft - regen_advance) {
                                        struct inet6_ifaddr *ifpub = ifp->ifpub;
@@@ -3600,7 -3580,6 +3602,7 @@@ static const struct nla_policy ifa_ipv6
        [IFA_ADDRESS]           = { .len = sizeof(struct in6_addr) },
        [IFA_LOCAL]             = { .len = sizeof(struct in6_addr) },
        [IFA_CACHEINFO]         = { .len = sizeof(struct ifa_cacheinfo) },
 +      [IFA_FLAGS]             = { .len = sizeof(u32) },
  };
  
  static int
@@@ -3624,21 -3603,16 +3626,21 @@@ inet6_rtm_deladdr(struct sk_buff *skb, 
        return inet6_addr_del(net, ifm->ifa_index, pfx, ifm->ifa_prefixlen);
  }
  
 -static int inet6_addr_modify(struct inet6_ifaddr *ifp, u8 ifa_flags,
 +static int inet6_addr_modify(struct inet6_ifaddr *ifp, u32 ifa_flags,
                             u32 prefered_lft, u32 valid_lft)
  {
        u32 flags;
        clock_t expires;
        unsigned long timeout;
 +      bool was_managetempaddr;
  
        if (!valid_lft || (prefered_lft > valid_lft))
                return -EINVAL;
  
 +      if (ifa_flags & IFA_F_MANAGETEMPADDR &&
 +          (ifp->flags & IFA_F_TEMPORARY || ifp->prefix_len != 64))
 +              return -EINVAL;
 +
        timeout = addrconf_timeout_fixup(valid_lft, HZ);
        if (addrconf_finite_timeout(timeout)) {
                expires = jiffies_to_clock_t(timeout * HZ);
        }
  
        spin_lock_bh(&ifp->lock);
 -      ifp->flags = (ifp->flags & ~(IFA_F_DEPRECATED | IFA_F_PERMANENT | IFA_F_NODAD | IFA_F_HOMEADDRESS)) | ifa_flags;
 +      was_managetempaddr = ifp->flags & IFA_F_MANAGETEMPADDR;
 +      ifp->flags &= ~(IFA_F_DEPRECATED | IFA_F_PERMANENT | IFA_F_NODAD |
 +                      IFA_F_HOMEADDRESS | IFA_F_MANAGETEMPADDR);
 +      ifp->flags |= ifa_flags;
        ifp->tstamp = jiffies;
        ifp->valid_lft = valid_lft;
        ifp->prefered_lft = prefered_lft;
  
        addrconf_prefix_route(&ifp->addr, ifp->prefix_len, ifp->idev->dev,
                              expires, flags);
 +
 +      if (was_managetempaddr || ifp->flags & IFA_F_MANAGETEMPADDR) {
 +              if (was_managetempaddr && !(ifp->flags & IFA_F_MANAGETEMPADDR))
 +                      valid_lft = prefered_lft = 0;
 +              manage_tempaddrs(ifp->idev, ifp, valid_lft, prefered_lft,
 +                               !was_managetempaddr, jiffies);
 +      }
 +
        addrconf_verify(0);
  
        return 0;
@@@ -3695,7 -3658,7 +3697,7 @@@ inet6_rtm_newaddr(struct sk_buff *skb, 
        struct inet6_ifaddr *ifa;
        struct net_device *dev;
        u32 valid_lft = INFINITY_LIFE_TIME, preferred_lft = INFINITY_LIFE_TIME;
 -      u8 ifa_flags;
 +      u32 ifa_flags;
        int err;
  
        err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv6_policy);
        if (dev == NULL)
                return -ENODEV;
  
 +      ifa_flags = tb[IFA_FLAGS] ? nla_get_u32(tb[IFA_FLAGS]) : ifm->ifa_flags;
 +
        /* We ignore other flags so far. */
 -      ifa_flags = ifm->ifa_flags & (IFA_F_NODAD | IFA_F_HOMEADDRESS);
 +      ifa_flags &= IFA_F_NODAD | IFA_F_HOMEADDRESS | IFA_F_MANAGETEMPADDR;
  
        ifa = ipv6_get_ifaddr(net, pfx, dev, 1);
        if (ifa == NULL) {
        return err;
  }
  
 -static void put_ifaddrmsg(struct nlmsghdr *nlh, u8 prefixlen, u8 flags,
 +static void put_ifaddrmsg(struct nlmsghdr *nlh, u8 prefixlen, u32 flags,
                          u8 scope, int ifindex)
  {
        struct ifaddrmsg *ifm;
@@@ -3792,8 -3753,7 +3794,8 @@@ static inline int inet6_ifaddr_msgsize(
        return NLMSG_ALIGN(sizeof(struct ifaddrmsg))
               + nla_total_size(16) /* IFA_LOCAL */
               + nla_total_size(16) /* IFA_ADDRESS */
 -             + nla_total_size(sizeof(struct ifa_cacheinfo));
 +             + nla_total_size(sizeof(struct ifa_cacheinfo))
 +             + nla_total_size(4)  /* IFA_FLAGS */;
  }
  
  static int inet6_fill_ifaddr(struct sk_buff *skb, struct inet6_ifaddr *ifa,
        if (put_cacheinfo(skb, ifa->cstamp, ifa->tstamp, preferred, valid) < 0)
                goto error;
  
 +      if (nla_put_u32(skb, IFA_FLAGS, ifa->flags) < 0)
 +              goto error;
 +
        return nlmsg_end(skb, nlh);
  
  error:
@@@ -4248,7 -4205,7 +4250,7 @@@ static int inet6_fill_ifla6_attrs(struc
        ci.max_reasm_len = IPV6_MAXPLEN;
        ci.tstamp = cstamp_delta(idev->tstamp);
        ci.reachable_time = jiffies_to_msecs(idev->nd_parms->reachable_time);
 -      ci.retrans_time = jiffies_to_msecs(idev->nd_parms->retrans_time);
 +      ci.retrans_time = jiffies_to_msecs(NEIGH_VAR(idev->nd_parms, RETRANS_TIME));
        if (nla_put(skb, IFLA_INET6_CACHEINFO, sizeof(ci), &ci))
                goto nla_put_failure;
        nla = nla_reserve(skb, IFLA_INET6_CONF, DEVCONF_MAX * sizeof(s32));
@@@ -4741,46 -4698,6 +4743,46 @@@ int addrconf_sysctl_disable(struct ctl_
        return ret;
  }
  
 +static
 +int addrconf_sysctl_proxy_ndp(struct ctl_table *ctl, int write,
 +                            void __user *buffer, size_t *lenp, loff_t *ppos)
 +{
 +      int *valp = ctl->data;
 +      int ret;
 +      int old, new;
 +
 +      old = *valp;
 +      ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
 +      new = *valp;
 +
 +      if (write && old != new) {
 +              struct net *net = ctl->extra2;
 +
 +              if (!rtnl_trylock())
 +                      return restart_syscall();
 +
 +              if (valp == &net->ipv6.devconf_dflt->proxy_ndp)
 +                      inet6_netconf_notify_devconf(net, NETCONFA_PROXY_NEIGH,
 +                                                   NETCONFA_IFINDEX_DEFAULT,
 +                                                   net->ipv6.devconf_dflt);
 +              else if (valp == &net->ipv6.devconf_all->proxy_ndp)
 +                      inet6_netconf_notify_devconf(net, NETCONFA_PROXY_NEIGH,
 +                                                   NETCONFA_IFINDEX_ALL,
 +                                                   net->ipv6.devconf_all);
 +              else {
 +                      struct inet6_dev *idev = ctl->extra1;
 +
 +                      inet6_netconf_notify_devconf(net, NETCONFA_PROXY_NEIGH,
 +                                                   idev->dev->ifindex,
 +                                                   &idev->cnf);
 +              }
 +              rtnl_unlock();
 +      }
 +
 +      return ret;
 +}
 +
 +
  static struct addrconf_sysctl_table
  {
        struct ctl_table_header *sysctl_header;
                        .data           = &ipv6_devconf.proxy_ndp,
                        .maxlen         = sizeof(int),
                        .mode           = 0644,
 -                      .proc_handler   = proc_dointvec,
 +                      .proc_handler   = addrconf_sysctl_proxy_ndp,
                },
                {
                        .procname       = "accept_source_route",
@@@ -5083,7 -5000,7 +5085,7 @@@ static void __addrconf_sysctl_unregiste
  
  static void addrconf_sysctl_register(struct inet6_dev *idev)
  {
 -      neigh_sysctl_register(idev->dev, idev->nd_parms, "ipv6",
 +      neigh_sysctl_register(idev->dev, idev->nd_parms,
                              &ndisc_ifinfo_sysctl_change);
        __addrconf_sysctl_register(dev_net(idev->dev), idev->dev->name,
                                        idev, &idev->cnf);
@@@ -5216,7 -5133,9 +5218,7 @@@ int __init addrconf_init(void
  
        addrconf_verify(0);
  
 -      err = rtnl_af_register(&inet6_ops);
 -      if (err < 0)
 -              goto errout_af;
 +      rtnl_af_register(&inet6_ops);
  
        err = __rtnl_register(PF_INET6, RTM_GETLINK, NULL, inet6_dump_ifinfo,
                              NULL);
        return 0;
  errout:
        rtnl_af_unregister(&inet6_ops);
 -errout_af:
        unregister_netdevice_notifier(&ipv6_dev_notf);
  errlo:
        unregister_pernet_subsys(&addrconf_ops);
diff --combined net/ipv6/ip6_vti.c
index b50acd5e75d25349001ea0eb475ba5bce7e628bb,7b42d5ef868deaa193094a85a00b502aeaba0bdd..2d19272b8ceea6ade3b935904a7e7903d20a2a2a
@@@ -24,6 -24,7 +24,6 @@@
  #include <linux/if.h>
  #include <linux/in.h>
  #include <linux/ip.h>
 -#include <linux/if_tunnel.h>
  #include <linux/net.h>
  #include <linux/in6.h>
  #include <linux/netdevice.h>
@@@ -291,7 -292,7 +291,7 @@@ static int vti6_rcv(struct sk_buff *skb
  
        if ((t = vti6_tnl_lookup(dev_net(skb->dev), &ipv6h->saddr,
                                 &ipv6h->daddr)) != NULL) {
 -              struct pcpu_tstats *tstats;
 +              struct pcpu_sw_netstats *tstats;
  
                if (t->parms.proto != IPPROTO_IPV6 && t->parms.proto != 0) {
                        rcu_read_unlock();
@@@ -731,12 -732,18 +731,18 @@@ static void vti6_dev_setup(struct net_d
  static inline int vti6_dev_init_gen(struct net_device *dev)
  {
        struct ip6_tnl *t = netdev_priv(dev);
+       int i;
  
        t->dev = dev;
        t->net = dev_net(dev);
 -      dev->tstats = alloc_percpu(struct pcpu_tstats);
 +      dev->tstats = alloc_percpu(struct pcpu_sw_netstats);
        if (!dev->tstats)
                return -ENOMEM;
 -              struct pcpu_tstats *stats;
+       for_each_possible_cpu(i) {
++              struct pcpu_sw_netstats *stats;
+               stats = per_cpu_ptr(dev->tstats, i);
+               u64_stats_init(&stats->syncp);
+       }
        return 0;
  }
  
diff --combined net/mac80211/iface.c
index b2c83c0f06d027b80da1c4fe93e4028b0a379413,a0757913046eab8eee0c99104d8900db4e338c3e..3dfd20a453aba250fff726d1733f0dac6c763b3c
@@@ -401,8 -401,6 +401,8 @@@ int ieee80211_add_virtual_monitor(struc
        snprintf(sdata->name, IFNAMSIZ, "%s-monitor",
                 wiphy_name(local->hw.wiphy));
  
 +      sdata->encrypt_headroom = IEEE80211_ENCRYPT_HEADROOM;
 +
        ieee80211_set_default_queues(sdata);
  
        ret = drv_add_interface(local, sdata);
                return ret;
        }
  
 +      mutex_lock(&local->mtx);
        ret = ieee80211_vif_use_channel(sdata, &local->monitor_chandef,
                                        IEEE80211_CHANCTX_EXCLUSIVE);
 +      mutex_unlock(&local->mtx);
        if (ret) {
                drv_remove_interface(local, sdata);
                kfree(sdata);
@@@ -458,9 -454,7 +458,9 @@@ void ieee80211_del_virtual_monitor(stru
  
        synchronize_net();
  
 +      mutex_lock(&local->mtx);
        ieee80211_vif_release_channel(sdata);
 +      mutex_unlock(&local->mtx);
  
        drv_remove_interface(local, sdata);
  
@@@ -755,7 -749,6 +755,7 @@@ static void ieee80211_do_stop(struct ie
        u32 hw_reconf_flags = 0;
        int i, flushed;
        struct ps_data *ps;
 +      struct cfg80211_chan_def chandef;
  
        clear_bit(SDATA_STATE_RUNNING, &sdata->state);
  
         * This is relevant only in WDS mode, in all other modes we've
         * already removed all stations when disconnecting or similar,
         * so warn otherwise.
 -       *
 -       * We call sta_info_flush_cleanup() later, to combine RCU waits.
         */
 -      flushed = sta_info_flush_defer(sdata);
 +      flushed = sta_info_flush(sdata);
        WARN_ON_ONCE((sdata->vif.type != NL80211_IFTYPE_WDS && flushed > 0) ||
                     (sdata->vif.type == NL80211_IFTYPE_WDS && flushed != 1));
  
        cancel_delayed_work_sync(&sdata->dfs_cac_timer_work);
  
        if (sdata->wdev.cac_started) {
 +              chandef = sdata->vif.bss_conf.chandef;
                WARN_ON(local->suspended);
 -              mutex_lock(&local->iflist_mtx);
 +              mutex_lock(&local->mtx);
                ieee80211_vif_release_channel(sdata);
 -              mutex_unlock(&local->iflist_mtx);
 -              cfg80211_cac_event(sdata->dev, NL80211_RADAR_CAC_ABORTED,
 +              mutex_unlock(&local->mtx);
 +              cfg80211_cac_event(sdata->dev, &chandef,
 +                                 NL80211_RADAR_CAC_ABORTED,
                                   GFP_KERNEL);
        }
  
                cancel_work_sync(&sdata->work);
                /*
                 * When we get here, the interface is marked down.
 +               * Free the remaining keys, if there are any
 +               * (shouldn't be, except maybe in WDS mode?)
                 *
 -               * sta_info_flush_cleanup() requires rcu_barrier()
 -               * first to wait for the station call_rcu() calls
 -               * to complete, and we also need synchronize_rcu()
 -               * to wait for the RX path in case it is using the
 -               * interface and enqueuing frames at this very time on
 +               * Force the key freeing to always synchronize_net()
 +               * to wait for the RX path in case it is using this
 +               * interface enqueuing frames * at this very time on
                 * another CPU.
                 */
 -              synchronize_rcu();
 -              rcu_barrier();
 -              sta_info_flush_cleanup(sdata);
 -
 -              /*
 -               * Free all remaining keys, there shouldn't be any,
 -               * except maybe in WDS mode?
 -               */
 -              ieee80211_free_keys(sdata);
 +              ieee80211_free_keys(sdata, true);
  
                /* fall through */
        case NL80211_IFTYPE_AP:
@@@ -1012,6 -1013,17 +1012,6 @@@ static void ieee80211_set_multicast_lis
                        atomic_dec(&local->iff_promiscs);
                sdata->flags ^= IEEE80211_SDATA_PROMISC;
        }
 -
 -      /*
 -       * TODO: If somebody needs this on AP interfaces,
 -       *       it can be enabled easily but multicast
 -       *       addresses from VLANs need to be synced.
 -       */
 -      if (sdata->vif.type != NL80211_IFTYPE_MONITOR &&
 -          sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
 -          sdata->vif.type != NL80211_IFTYPE_AP)
 -              drv_set_multicast_list(local, sdata, &dev->mc);
 -
        spin_lock_bh(&local->filter_lock);
        __hw_addr_sync(&local->mc_list, &dev->mc, dev->addr_len);
        spin_unlock_bh(&local->filter_lock);
   */
  static void ieee80211_teardown_sdata(struct ieee80211_sub_if_data *sdata)
  {
 -      int flushed;
        int i;
  
        /* free extra data */
 -      ieee80211_free_keys(sdata);
 +      ieee80211_free_keys(sdata, false);
  
        ieee80211_debugfs_remove_netdev(sdata);
  
  
        if (ieee80211_vif_is_mesh(&sdata->vif))
                mesh_rmc_free(sdata);
 -
 -      flushed = sta_info_flush(sdata);
 -      WARN_ON(flushed);
  }
  
  static void ieee80211_uninit(struct net_device *dev)
  }
  
  static u16 ieee80211_netdev_select_queue(struct net_device *dev,
-                                        struct sk_buff *skb)
+                                        struct sk_buff *skb,
+                                        void *accel_priv)
  {
        return ieee80211_select_queue(IEEE80211_DEV_TO_SUB_IF(dev), skb);
  }
@@@ -1062,7 -1079,8 +1063,8 @@@ static const struct net_device_ops ieee
  };
  
  static u16 ieee80211_monitor_select_queue(struct net_device *dev,
-                                         struct sk_buff *skb)
+                                         struct sk_buff *skb,
+                                         void *accel_priv)
  {
        struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
        struct ieee80211_local *local = sdata->local;
@@@ -1254,7 -1272,6 +1256,7 @@@ static void ieee80211_setup_sdata(struc
  
        sdata->control_port_protocol = cpu_to_be16(ETH_P_PAE);
        sdata->control_port_no_encrypt = false;
 +      sdata->encrypt_headroom = IEEE80211_ENCRYPT_HEADROOM;
  
        sdata->noack_map = 0;
  
@@@ -1480,8 -1497,8 +1482,8 @@@ static void ieee80211_assign_perm_addr(
                        bool used = false;
  
                        list_for_each_entry(sdata, &local->interfaces, list) {
 -                              if (memcmp(local->hw.wiphy->addresses[i].addr,
 -                                         sdata->vif.addr, ETH_ALEN) == 0) {
 +                              if (ether_addr_equal(local->hw.wiphy->addresses[i].addr,
 +                                                   sdata->vif.addr)) {
                                        used = true;
                                        break;
                                }
                        val += inc;
  
                        list_for_each_entry(sdata, &local->interfaces, list) {
 -                              if (memcmp(tmp_addr, sdata->vif.addr,
 -                                                      ETH_ALEN) == 0) {
 +                              if (ether_addr_equal(tmp_addr, sdata->vif.addr)) {
                                        used = true;
                                        break;
                                }
        mutex_unlock(&local->iflist_mtx);
  }
  
 -static void ieee80211_cleanup_sdata_stas_wk(struct work_struct *wk)
 -{
 -      struct ieee80211_sub_if_data *sdata;
 -
 -      sdata = container_of(wk, struct ieee80211_sub_if_data, cleanup_stations_wk);
 -
 -      ieee80211_cleanup_sdata_stas(sdata);
 -}
 -
  int ieee80211_if_add(struct ieee80211_local *local, const char *name,
                     struct wireless_dev **new_wdev, enum nl80211_iftype type,
                     struct vif_params *params)
  
        INIT_LIST_HEAD(&sdata->key_list);
  
 -      spin_lock_init(&sdata->cleanup_stations_lock);
 -      INIT_LIST_HEAD(&sdata->cleanup_stations);
 -      INIT_WORK(&sdata->cleanup_stations_wk, ieee80211_cleanup_sdata_stas_wk);
        INIT_DELAYED_WORK(&sdata->dfs_cac_timer_work,
                          ieee80211_dfs_cac_timer_work);
        INIT_DELAYED_WORK(&sdata->dec_tailroom_needed_wk,
        sdata->ap_power_level = IEEE80211_UNSET_POWER_LEVEL;
        sdata->user_power_level = local->user_power_level;
  
 +      sdata->encrypt_headroom = IEEE80211_ENCRYPT_HEADROOM;
 +
        /* setup type-dependent data */
        ieee80211_setup_sdata(sdata, type);
  
diff --combined net/mac80211/tx.c
index 377cf974d97d15b41773b0ea7aed9ae7ebc1edba,ca7fa7f0613dba22cdc0e82d8a96e1de9ff011dc..ef3555e16cf99c7d6eada6af45232c1be6a78cdf
@@@ -19,7 -19,6 +19,7 @@@
  #include <linux/bitmap.h>
  #include <linux/rcupdate.h>
  #include <linux/export.h>
 +#include <linux/time.h>
  #include <net/net_namespace.h>
  #include <net/ieee80211_radiotap.h>
  #include <net/cfg80211.h>
@@@ -464,7 -463,6 +464,6 @@@ ieee80211_tx_h_unicast_ps_buf(struct ie
  {
        struct sta_info *sta = tx->sta;
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
-       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
        struct ieee80211_local *local = tx->local;
  
        if (unlikely(!sta))
                     !(info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER))) {
                int ac = skb_get_queue_mapping(tx->skb);
  
-               /* only deauth, disassoc and action are bufferable MMPDUs */
-               if (ieee80211_is_mgmt(hdr->frame_control) &&
-                   !ieee80211_is_deauth(hdr->frame_control) &&
-                   !ieee80211_is_disassoc(hdr->frame_control) &&
-                   !ieee80211_is_action(hdr->frame_control)) {
-                       info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER;
-                       return TX_CONTINUE;
-               }
                ps_dbg(sta->sdata, "STA %pM aid %d: PS buffer for AC %d\n",
                       sta->sta.addr, sta->sta.aid, ac);
                if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER)
  static ieee80211_tx_result debug_noinline
  ieee80211_tx_h_ps_buf(struct ieee80211_tx_data *tx)
  {
+       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
+       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
        if (unlikely(tx->flags & IEEE80211_TX_PS_BUFFERED))
                return TX_CONTINUE;
  
+       /* only deauth, disassoc and action are bufferable MMPDUs */
+       if (ieee80211_is_mgmt(hdr->frame_control) &&
+           !ieee80211_is_deauth(hdr->frame_control) &&
+           !ieee80211_is_disassoc(hdr->frame_control) &&
+           !ieee80211_is_action(hdr->frame_control)) {
+               if (tx->flags & IEEE80211_TX_UNICAST)
+                       info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER;
+               return TX_CONTINUE;
+       }
        if (tx->flags & IEEE80211_TX_UNICAST)
                return ieee80211_tx_h_unicast_ps_buf(tx);
        else
@@@ -558,8 -560,7 +561,8 @@@ ieee80211_tx_h_select_key(struct ieee80
  
        if (unlikely(info->flags & IEEE80211_TX_INTFL_DONT_ENCRYPT))
                tx->key = NULL;
 -      else if (tx->sta && (key = rcu_dereference(tx->sta->ptk)))
 +      else if (tx->sta &&
 +               (key = rcu_dereference(tx->sta->ptk[tx->sta->ptk_idx])))
                tx->key = key;
        else if (ieee80211_is_mgmt(hdr->frame_control) &&
                 is_multicast_ether_addr(hdr->addr1) &&
@@@ -842,16 -843,15 +845,16 @@@ static int ieee80211_fragment(struct ie
                rem -= fraglen;
                tmp = dev_alloc_skb(local->tx_headroom +
                                    frag_threshold +
 -                                  IEEE80211_ENCRYPT_HEADROOM +
 +                                  tx->sdata->encrypt_headroom +
                                    IEEE80211_ENCRYPT_TAILROOM);
                if (!tmp)
                        return -ENOMEM;
  
                __skb_queue_tail(&tx->skbs, tmp);
  
 -              skb_reserve(tmp, local->tx_headroom +
 -                               IEEE80211_ENCRYPT_HEADROOM);
 +              skb_reserve(tmp,
 +                          local->tx_headroom + tx->sdata->encrypt_headroom);
 +
                /* copy control information */
                memcpy(tmp->cb, skb->cb, sizeof(tmp->cb));
  
@@@ -1488,7 -1488,7 +1491,7 @@@ void ieee80211_xmit(struct ieee80211_su
  
        headroom = local->tx_headroom;
        if (may_encrypt)
 -              headroom += IEEE80211_ENCRYPT_HEADROOM;
 +              headroom += sdata->encrypt_headroom;
        headroom -= skb_headroom(skb);
        headroom = max_t(int, 0, headroom);
  
@@@ -1727,7 -1727,8 +1730,7 @@@ netdev_tx_t ieee80211_monitor_start_xmi
         * radar detection by itself. We can do that later by adding a
         * monitor flag interfaces used for AP support.
         */
 -      if ((chan->flags & (IEEE80211_CHAN_NO_IBSS | IEEE80211_CHAN_RADAR |
 -                          IEEE80211_CHAN_PASSIVE_SCAN)))
 +      if ((chan->flags & (IEEE80211_CHAN_NO_IR | IEEE80211_CHAN_RADAR)))
                goto fail_rcu;
  
        ieee80211_xmit(sdata, skb, chan->band);
@@@ -1742,26 -1743,6 +1745,26 @@@ fail
        return NETDEV_TX_OK; /* meaning, we dealt with the skb */
  }
  
 +/*
 + * Measure Tx frame arrival time for Tx latency statistics calculation
 + * A single Tx frame latency should be measured from when it is entering the
 + * Kernel until we receive Tx complete confirmation indication and the skb is
 + * freed.
 + */
 +static void ieee80211_tx_latency_start_msrmnt(struct ieee80211_local *local,
 +                                            struct sk_buff *skb)
 +{
 +      struct timespec skb_arv;
 +      struct ieee80211_tx_latency_bin_ranges *tx_latency;
 +
 +      tx_latency = rcu_dereference(local->tx_latency);
 +      if (!tx_latency)
 +              return;
 +
 +      ktime_get_ts(&skb_arv);
 +      skb->tstamp = ktime_set(skb_arv.tv_sec, skb_arv.tv_nsec);
 +}
 +
  /**
   * ieee80211_subif_start_xmit - netif start_xmit function for Ethernet-type
   * subinterfaces (wlan#, WDS, and VLAN interfaces)
@@@ -1812,9 -1793,6 +1815,9 @@@ netdev_tx_t ieee80211_subif_start_xmit(
  
        rcu_read_lock();
  
 +      /* Measure frame arrival for Tx latency statistics calculation */
 +      ieee80211_tx_latency_start_msrmnt(local, skb);
 +
        switch (sdata->vif.type) {
        case NL80211_IFTYPE_AP_VLAN:
                sta = rcu_dereference(sdata->u.vlan.sta);
         */
  
        if (head_need > 0 || skb_cloned(skb)) {
 -              head_need += IEEE80211_ENCRYPT_HEADROOM;
 +              head_need += sdata->encrypt_headroom;
                head_need += local->tx_headroom;
                head_need = max_t(int, 0, head_need);
                if (ieee80211_skb_resize(sdata, skb, head_need, true)) {
        if (ieee80211_is_data_qos(fc)) {
                __le16 *qos_control;
  
 -              qos_control = (__le16*) skb_push(skb, 2);
 +              qos_control = (__le16 *) skb_push(skb, 2);
                memcpy(skb_push(skb, hdrlen - 2), &hdr, hdrlen - 2);
                /*
                 * Maybe we could actually set some fields here, for now just
@@@ -2323,7 -2301,7 +2326,7 @@@ static void __ieee80211_beacon_add_tim(
        if (atomic_read(&ps->num_sta_ps) > 0)
                /* in the hope that this is faster than
                 * checking byte-for-byte */
 -              have_bits = !bitmap_empty((unsigned long*)ps->tim,
 +              have_bits = !bitmap_empty((unsigned long *)ps->tim,
                                          IEEE80211_MAX_AID+1);
  
        if (ps->dtim_count == 0)
@@@ -2549,8 -2527,7 +2552,8 @@@ struct sk_buff *ieee80211_beacon_get_ti
                         */
                        skb = dev_alloc_skb(local->tx_headroom +
                                            beacon->head_len +
 -                                          beacon->tail_len + 256);
 +                                          beacon->tail_len + 256 +
 +                                          local->hw.extra_beacon_tailroom);
                        if (!skb)
                                goto out;
  
                        ieee80211_update_csa(sdata, presp);
  
  
 -              skb = dev_alloc_skb(local->tx_headroom + presp->head_len);
 +              skb = dev_alloc_skb(local->tx_headroom + presp->head_len +
 +                                  local->hw.extra_beacon_tailroom);
                if (!skb)
                        goto out;
                skb_reserve(skb, local->tx_headroom);
                        ieee80211_update_csa(sdata, bcn);
  
                if (ifmsh->sync_ops)
 -                      ifmsh->sync_ops->adjust_tbtt(
 -                                              sdata);
 +                      ifmsh->sync_ops->adjust_tbtt(sdata, bcn);
  
                skb = dev_alloc_skb(local->tx_headroom +
                                    bcn->head_len +
                                    256 + /* TIM IE */
 -                                  bcn->tail_len);
 +                                  bcn->tail_len +
 +                                  local->hw.extra_beacon_tailroom);
                if (!skb)
                        goto out;
                skb_reserve(skb, local->tx_headroom);
diff --combined net/nfc/core.c
index 02ab34132157066d9a0def95276b7abe3f8d7f71,83b9927e7d19f3b14c8db4b199385122843d6764..b675fa4a6f19078ff4f3ce3d42a15227334cf247
@@@ -16,7 -16,9 +16,7 @@@
   * GNU General Public License for more details.
   *
   * You should have received a copy of the GNU General Public License
 - * along with this program; if not, write to the
 - * Free Software Foundation, Inc.,
 - * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
 + * along with this program; if not, see <http://www.gnu.org/licenses/>.
   */
  
  #define pr_fmt(fmt) KBUILD_MODNAME ": %s: " fmt, __func__
@@@ -382,7 -384,7 +382,7 @@@ int nfc_dep_link_is_up(struct nfc_dev *
  {
        dev->dep_link_up = true;
  
-       if (!dev->active_target) {
+       if (!dev->active_target && rf_mode == NFC_RF_INITIATOR) {
                struct nfc_target *target;
  
                target = nfc_find_target(dev, target_idx);
diff --combined net/sched/sch_generic.c
index 32bb942d2faad9a2922f3fcdec18770548f3d64c,7fc899a943a8fa8368415bc0c6c8a939bd042963..e82e43b69c335bccbd58e6ba4ef10b56acd8f5a6
@@@ -126,7 -126,7 +126,7 @@@ int sch_direct_xmit(struct sk_buff *skb
  
        HARD_TX_LOCK(dev, txq, smp_processor_id());
        if (!netif_xmit_frozen_or_stopped(txq))
-               ret = dev_hard_start_xmit(skb, dev, txq, NULL);
+               ret = dev_hard_start_xmit(skb, dev, txq);
  
        HARD_TX_UNLOCK(dev, txq);
  
@@@ -338,13 -338,13 +338,13 @@@ EXPORT_SYMBOL(netif_carrier_off)
     cheaper.
   */
  
 -static int noop_enqueue(struct sk_buff *skb, struct Qdisc * qdisc)
 +static int noop_enqueue(struct sk_buff *skb, struct Qdisc *qdisc)
  {
        kfree_skb(skb);
        return NET_XMIT_CN;
  }
  
 -static struct sk_buff *noop_dequeue(struct Qdisc * qdisc)
 +static struct sk_buff *noop_dequeue(struct Qdisc *qdisc)
  {
        return NULL;
  }
@@@ -718,8 -718,8 +718,8 @@@ static void attach_default_qdiscs(struc
        } else {
                qdisc = qdisc_create_dflt(txq, &mq_qdisc_ops, TC_H_ROOT);
                if (qdisc) {
 -                      qdisc->ops->attach(qdisc);
                        dev->qdisc = qdisc;
 +                      qdisc->ops->attach(qdisc);
                }
        }
  }
diff --combined net/tipc/link.c
index 471973ff134f890e03437dec4ce65b436c136da6,13b9877458201fa4d5bb14a36892825272229050..d4b5de41b682188f1cf3bcffb08a44f38a9f84ae
@@@ -1,7 -1,7 +1,7 @@@
  /*
   * net/tipc/link.c: TIPC link code
   *
 - * Copyright (c) 1996-2007, 2012, Ericsson AB
 + * Copyright (c) 1996-2007, 2012-2014, Ericsson AB
   * Copyright (c) 2004-2007, 2010-2013, Wind River Systems
   * All rights reserved.
   *
@@@ -78,8 -78,8 +78,8 @@@ static const char *link_unk_evt = "Unkn
  static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr,
                                       struct sk_buff *buf);
  static void link_recv_proto_msg(struct tipc_link *l_ptr, struct sk_buff *buf);
 -static int  link_recv_changeover_msg(struct tipc_link **l_ptr,
 -                                   struct sk_buff **buf);
 +static int  tipc_link_tunnel_rcv(struct tipc_link **l_ptr,
 +                               struct sk_buff **buf);
  static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tolerance);
  static int  link_send_sections_long(struct tipc_port *sender,
                                    struct iovec const *msg_sect,
@@@ -87,6 -87,7 +87,6 @@@
  static void link_state_event(struct tipc_link *l_ptr, u32 event);
  static void link_reset_statistics(struct tipc_link *l_ptr);
  static void link_print(struct tipc_link *l_ptr, const char *str);
 -static void link_start(struct tipc_link *l_ptr);
  static int link_send_long_buf(struct tipc_link *l_ptr, struct sk_buff *buf);
  static void tipc_link_send_sync(struct tipc_link *l);
  static void tipc_link_recv_sync(struct tipc_node *n, struct sk_buff *buf);
@@@ -277,11 -278,9 +277,11 @@@ struct tipc_link *tipc_link_create(stru
  
        tipc_node_attach_link(n_ptr, l_ptr);
  
 -      k_init_timer(&l_ptr->timer, (Handler)link_timeout, (unsigned long)l_ptr);
 +      k_init_timer(&l_ptr->timer, (Handler)link_timeout,
 +                   (unsigned long)l_ptr);
        list_add_tail(&l_ptr->link_list, &b_ptr->links);
 -      tipc_k_signal((Handler)link_start, (unsigned long)l_ptr);
 +
 +      link_state_event(l_ptr, STARTING_EVT);
  
        return l_ptr;
  }
@@@ -306,13 -305,19 +306,13 @@@ void tipc_link_delete(struct tipc_link 
        tipc_node_lock(l_ptr->owner);
        tipc_link_reset(l_ptr);
        tipc_node_detach_link(l_ptr->owner, l_ptr);
 -      tipc_link_stop(l_ptr);
 +      tipc_link_purge_queues(l_ptr);
        list_del_init(&l_ptr->link_list);
        tipc_node_unlock(l_ptr->owner);
        k_term_timer(&l_ptr->timer);
        kfree(l_ptr);
  }
  
 -static void link_start(struct tipc_link *l_ptr)
 -{
 -      tipc_node_lock(l_ptr->owner);
 -      link_state_event(l_ptr, STARTING_EVT);
 -      tipc_node_unlock(l_ptr->owner);
 -}
  
  /**
   * link_schedule_port - schedule port for deferred sending
@@@ -381,7 -386,14 +381,7 @@@ exit
   */
  static void link_release_outqueue(struct tipc_link *l_ptr)
  {
 -      struct sk_buff *buf = l_ptr->first_out;
 -      struct sk_buff *next;
 -
 -      while (buf) {
 -              next = buf->next;
 -              kfree_skb(buf);
 -              buf = next;
 -      }
 +      kfree_skb_list(l_ptr->first_out);
        l_ptr->first_out = NULL;
        l_ptr->out_queue_size = 0;
  }
@@@ -398,20 -410,37 +398,20 @@@ void tipc_link_reset_fragments(struct t
  }
  
  /**
 - * tipc_link_stop - purge all inbound and outbound messages associated with link
 + * tipc_link_purge_queues - purge all pkt queues associated with link
   * @l_ptr: pointer to link
   */
 -void tipc_link_stop(struct tipc_link *l_ptr)
 +void tipc_link_purge_queues(struct tipc_link *l_ptr)
  {
 -      struct sk_buff *buf;
 -      struct sk_buff *next;
 -
 -      buf = l_ptr->oldest_deferred_in;
 -      while (buf) {
 -              next = buf->next;
 -              kfree_skb(buf);
 -              buf = next;
 -      }
 -
 -      buf = l_ptr->first_out;
 -      while (buf) {
 -              next = buf->next;
 -              kfree_skb(buf);
 -              buf = next;
 -      }
 -
 +      kfree_skb_list(l_ptr->oldest_deferred_in);
 +      kfree_skb_list(l_ptr->first_out);
        tipc_link_reset_fragments(l_ptr);
 -
        kfree_skb(l_ptr->proto_msg_queue);
        l_ptr->proto_msg_queue = NULL;
  }
  
  void tipc_link_reset(struct tipc_link *l_ptr)
  {
 -      struct sk_buff *buf;
        u32 prev_state = l_ptr->state;
        u32 checkpoint = l_ptr->next_in_no;
        int was_active_link = tipc_link_is_active(l_ptr);
        tipc_node_link_down(l_ptr->owner, l_ptr);
        tipc_bearer_remove_dest(l_ptr->b_ptr, l_ptr->addr);
  
 -      if (was_active_link && tipc_node_active_links(l_ptr->owner) &&
 -          l_ptr->owner->permit_changeover) {
 +      if (was_active_link && tipc_node_active_links(l_ptr->owner)) {
                l_ptr->reset_checkpoint = checkpoint;
                l_ptr->exp_msg_count = START_CHANGEOVER;
        }
        link_release_outqueue(l_ptr);
        kfree_skb(l_ptr->proto_msg_queue);
        l_ptr->proto_msg_queue = NULL;
 -      buf = l_ptr->oldest_deferred_in;
 -      while (buf) {
 -              struct sk_buff *next = buf->next;
 -              kfree_skb(buf);
 -              buf = next;
 -      }
 +      kfree_skb_list(l_ptr->oldest_deferred_in);
        if (!list_empty(&l_ptr->waiting_ports))
                tipc_link_wakeup_ports(l_ptr, 1);
  
@@@ -482,11 -517,10 +482,11 @@@ static void link_state_event(struct tip
        if (!l_ptr->started && (event != STARTING_EVT))
                return;         /* Not yet. */
  
 -      if (link_blocked(l_ptr)) {
 +      /* Check whether changeover is going on */
 +      if (l_ptr->exp_msg_count) {
                if (event == TIMEOUT_EVT)
                        link_set_timer(l_ptr, cont_intv);
 -              return;   /* Changeover going on */
 +              return;
        }
  
        switch (l_ptr->state) {
@@@ -756,7 -790,8 +756,7 @@@ int tipc_link_send_buf(struct tipc_lin
                return link_send_long_buf(l_ptr, buf);
  
        /* Packet can be queued or sent. */
 -      if (likely(!tipc_bearer_blocked(l_ptr->b_ptr) &&
 -                 !link_congested(l_ptr))) {
 +      if (likely(!link_congested(l_ptr))) {
                link_add_to_outqueue(l_ptr, buf, msg);
  
                tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr);
@@@ -922,13 -957,14 +922,13 @@@ static int link_send_buf_fast(struct ti
  
        if (likely(!link_congested(l_ptr))) {
                if (likely(msg_size(msg) <= l_ptr->max_pkt)) {
 -                      if (likely(!tipc_bearer_blocked(l_ptr->b_ptr))) {
 -                              link_add_to_outqueue(l_ptr, buf, msg);
 -                              tipc_bearer_send(l_ptr->b_ptr, buf,
 -                                               &l_ptr->media_addr);
 -                              l_ptr->unacked_window = 0;
 -                              return res;
 -                      }
 -              } else
 +                      link_add_to_outqueue(l_ptr, buf, msg);
 +                      tipc_bearer_send(l_ptr->b_ptr, buf,
 +                                       &l_ptr->media_addr);
 +                      l_ptr->unacked_window = 0;
 +                      return res;
 +              }
 +              else
                        *used_max_pkt = l_ptr->max_pkt;
        }
        return tipc_link_send_buf(l_ptr, buf);  /* All other cases */
@@@ -977,7 -1013,8 +977,7 @@@ exit
                        }
  
                        /* Exit if link (or bearer) is congested */
 -                      if (link_congested(l_ptr) ||
 -                          tipc_bearer_blocked(l_ptr->b_ptr)) {
 +                      if (link_congested(l_ptr)) {
                                res = link_schedule_port(l_ptr,
                                                         sender->ref, res);
                                goto exit;
@@@ -1090,7 -1127,10 +1090,7 @@@ again
                if (copy_from_user(buf->data + fragm_crs, sect_crs, sz)) {
                        res = -EFAULT;
  error:
 -                      for (; buf_chain; buf_chain = buf) {
 -                              buf = buf_chain->next;
 -                              kfree_skb(buf_chain);
 -                      }
 +                      kfree_skb_list(buf_chain);
                        return res;
                }
                sect_crs += sz;
                if (l_ptr->max_pkt < max_pkt) {
                        sender->max_pkt = l_ptr->max_pkt;
                        tipc_node_unlock(node);
 -                      for (; buf_chain; buf_chain = buf) {
 -                              buf = buf_chain->next;
 -                              kfree_skb(buf_chain);
 -                      }
 +                      kfree_skb_list(buf_chain);
                        goto again;
                }
        } else {
  reject:
 -              for (; buf_chain; buf_chain = buf) {
 -                      buf = buf_chain->next;
 -                      kfree_skb(buf_chain);
 -              }
 +              kfree_skb_list(buf_chain);
                return tipc_port_reject_sections(sender, hdr, msg_sect,
                                                 len, TIPC_ERR_NO_NODE);
        }
  /*
   * tipc_link_push_packet: Push one unsent packet to the media
   */
 -u32 tipc_link_push_packet(struct tipc_link *l_ptr)
 +static u32 tipc_link_push_packet(struct tipc_link *l_ptr)
  {
        struct sk_buff *buf = l_ptr->first_out;
        u32 r_q_size = l_ptr->retransm_queue_size;
@@@ -1235,6 -1281,9 +1235,6 @@@ void tipc_link_push_queue(struct tipc_l
  {
        u32 res;
  
 -      if (tipc_bearer_blocked(l_ptr->b_ptr))
 -              return;
 -
        do {
                res = tipc_link_push_packet(l_ptr);
        } while (!res);
@@@ -1321,15 -1370,26 +1321,15 @@@ void tipc_link_retransmit(struct tipc_l
  
        msg = buf_msg(buf);
  
 -      if (tipc_bearer_blocked(l_ptr->b_ptr)) {
 -              if (l_ptr->retransm_queue_size == 0) {
 -                      l_ptr->retransm_queue_head = msg_seqno(msg);
 -                      l_ptr->retransm_queue_size = retransmits;
 -              } else {
 -                      pr_err("Unexpected retransmit on link %s (qsize=%d)\n",
 -                             l_ptr->name, l_ptr->retransm_queue_size);
 +      /* Detect repeated retransmit failures */
 +      if (l_ptr->last_retransmitted == msg_seqno(msg)) {
 +              if (++l_ptr->stale_count > 100) {
 +                      link_retransmit_failure(l_ptr, buf);
 +                      return;
                }
 -              return;
        } else {
 -              /* Detect repeated retransmit failures on unblocked bearer */
 -              if (l_ptr->last_retransmitted == msg_seqno(msg)) {
 -                      if (++l_ptr->stale_count > 100) {
 -                              link_retransmit_failure(l_ptr, buf);
 -                              return;
 -                      }
 -              } else {
 -                      l_ptr->last_retransmitted = msg_seqno(msg);
 -                      l_ptr->stale_count = 1;
 -              }
 +              l_ptr->last_retransmitted = msg_seqno(msg);
 +              l_ptr->stale_count = 1;
        }
  
        while (retransmits && (buf != l_ptr->next_out) && buf) {
@@@ -1416,14 -1476,14 +1416,14 @@@ static int link_recv_buf_validate(struc
  }
  
  /**
 - * tipc_recv_msg - process TIPC messages arriving from off-node
 + * tipc_rcv - process TIPC packets/messages arriving from off-node
   * @head: pointer to message buffer chain
   * @tb_ptr: pointer to bearer message arrived on
   *
   * Invoked with no locks held.  Bearer pointer must point to a valid bearer
   * structure (i.e. cannot be NULL), but bearer can be inactive.
   */
 -void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *b_ptr)
 +void tipc_rcv(struct sk_buff *head, struct tipc_bearer *b_ptr)
  {
        read_lock_bh(&tipc_net_lock);
        while (head) {
                int type;
  
                head = head->next;
+               buf->next = NULL;
  
                /* Ensure bearer is still enabled */
                if (unlikely(!b_ptr->active))
@@@ -1597,7 -1658,7 +1598,7 @@@ deliver
                        continue;
                case CHANGEOVER_PROTOCOL:
                        type = msg_type(msg);
 -                      if (link_recv_changeover_msg(&l_ptr, &buf)) {
 +                      if (tipc_link_tunnel_rcv(&l_ptr, &buf)) {
                                msg = buf_msg(buf);
                                seq_no = msg_seqno(msg);
                                if (type == ORIGINAL_MSG)
@@@ -1726,8 -1787,7 +1727,8 @@@ void tipc_link_send_proto_msg(struct ti
                l_ptr->proto_msg_queue = NULL;
        }
  
 -      if (link_blocked(l_ptr))
 +      /* Don't send protocol message during link changeover */
 +      if (l_ptr->exp_msg_count)
                return;
  
        /* Abort non-RESET send if communication with node is prohibited */
        skb_copy_to_linear_data(buf, msg, sizeof(l_ptr->proto_msg));
        buf->priority = TC_PRIO_CONTROL;
  
 -      /* Defer message if bearer is already blocked */
 -      if (tipc_bearer_blocked(l_ptr->b_ptr)) {
 -              l_ptr->proto_msg_queue = buf;
 -              return;
 -      }
 -
        tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr);
        l_ptr->unacked_window = 0;
        kfree_skb(buf);
@@@ -1820,8 -1886,7 +1821,8 @@@ static void link_recv_proto_msg(struct 
        u32 msg_tol;
        struct tipc_msg *msg = buf_msg(buf);
  
 -      if (link_blocked(l_ptr))
 +      /* Discard protocol message during link changeover */
 +      if (l_ptr->exp_msg_count)
                goto exit;
  
        /* record unnumbered packet arrival (force mismatch on next timeout) */
                if (tipc_own_addr > msg_prevnode(msg))
                        l_ptr->b_ptr->net_plane = msg_net_plane(msg);
  
 -      l_ptr->owner->permit_changeover = msg_redundant_link(msg);
 -
        switch (msg_type(msg)) {
  
        case RESET_MSG:
@@@ -1946,13 -2013,13 +1947,13 @@@ exit
  }
  
  
 -/*
 - * tipc_link_tunnel(): Send one message via a link belonging to
 - * another bearer. Owner node is locked.
 +/* tipc_link_tunnel_xmit(): Tunnel one packet via a link belonging to
 + * a different bearer. Owner node is locked.
   */
 -static void tipc_link_tunnel(struct tipc_link *l_ptr,
 -                           struct tipc_msg *tunnel_hdr, struct tipc_msg *msg,
 -                           u32 selector)
 +static void tipc_link_tunnel_xmit(struct tipc_link *l_ptr,
 +                                struct tipc_msg *tunnel_hdr,
 +                                struct tipc_msg *msg,
 +                                u32 selector)
  {
        struct tipc_link *tunnel;
        struct sk_buff *buf;
  }
  
  
 -
 -/*
 - * changeover(): Send whole message queue via the remaining link
 - *               Owner node is locked.
 +/* tipc_link_failover_send_queue(): A link has gone down, but a second
 + * link is still active. We can do failover. Tunnel the failing link's
 + * whole send queue via the remaining link. This way, we don't lose
 + * any packets, and sequence order is preserved for subsequent traffic
 + * sent over the remaining link. Owner node is locked.
   */
 -void tipc_link_changeover(struct tipc_link *l_ptr)
 +void tipc_link_failover_send_queue(struct tipc_link *l_ptr)
  {
        u32 msgcount = l_ptr->out_queue_size;
        struct sk_buff *crs = l_ptr->first_out;
        if (!tunnel)
                return;
  
 -      if (!l_ptr->owner->permit_changeover) {
 -              pr_warn("%speer did not permit changeover\n", link_co_err);
 -              return;
 -      }
 -
        tipc_msg_init(&tunnel_hdr, CHANGEOVER_PROTOCOL,
                 ORIGINAL_MSG, INT_H_SIZE, l_ptr->addr);
        msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id);
                        msgcount = msg_msgcnt(msg);
                        while (msgcount--) {
                                msg_set_seqno(m, msg_seqno(msg));
 -                              tipc_link_tunnel(l_ptr, &tunnel_hdr, m,
 -                                               msg_link_selector(m));
 +                              tipc_link_tunnel_xmit(l_ptr, &tunnel_hdr, m,
 +                                                    msg_link_selector(m));
                                pos += align(msg_size(m));
                                m = (struct tipc_msg *)pos;
                        }
                } else {
 -                      tipc_link_tunnel(l_ptr, &tunnel_hdr, msg,
 -                                       msg_link_selector(msg));
 +                      tipc_link_tunnel_xmit(l_ptr, &tunnel_hdr, msg,
 +                                            msg_link_selector(msg));
                }
                crs = crs->next;
        }
  }
  
 -void tipc_link_send_duplicate(struct tipc_link *l_ptr, struct tipc_link *tunnel)
 +/* tipc_link_dup_send_queue(): A second link has become active. Tunnel a
 + * duplicate of the first link's send queue via the new link. This way, we
 + * are guaranteed that currently queued packets from a socket are delivered
 + * before future traffic from the same socket, even if this is using the
 + * new link. The last arriving copy of each duplicate packet is dropped at
 + * the receiving end by the regular protocol check, so packet cardinality
 + * and sequence order is preserved per sender/receiver socket pair.
 + * Owner node is locked.
 + */
 +void tipc_link_dup_send_queue(struct tipc_link *l_ptr,
 +                            struct tipc_link *tunnel)
  {
        struct sk_buff *iter;
        struct tipc_msg tunnel_hdr;
@@@ -2104,14 -2165,12 +2105,14 @@@ static struct sk_buff *buf_extract(stru
        return eb;
  }
  
 -/*
 - *  link_recv_changeover_msg(): Receive tunneled packet sent
 - *  via other link. Node is locked. Return extracted buffer.
 +/*  tipc_link_tunnel_rcv(): Receive a tunneled packet, sent
 + *  via other link as result of a failover (ORIGINAL_MSG) or
 + *  a new active link (DUPLICATE_MSG). Failover packets are
 + *  returned to the active link for delivery upwards.
 + *  Owner node is locked.
   */
 -static int link_recv_changeover_msg(struct tipc_link **l_ptr,
 -                                  struct sk_buff **buf)
 +static int tipc_link_tunnel_rcv(struct tipc_link **l_ptr,
 +                              struct sk_buff **buf)
  {
        struct sk_buff *tunnel_buf = *buf;
        struct tipc_link *dest_link;
@@@ -2248,7 -2307,11 +2249,7 @@@ static int link_send_long_buf(struct ti
                fragm = tipc_buf_acquire(fragm_sz + INT_H_SIZE);
                if (fragm == NULL) {
                        kfree_skb(buf);
 -                      while (buf_chain) {
 -                              buf = buf_chain;
 -                              buf_chain = buf_chain->next;
 -                              kfree_skb(buf);
 -                      }
 +                      kfree_skb_list(buf_chain);
                        return -ENOMEM;
                }
                msg_set_size(&fragm_hdr, fragm_sz + INT_H_SIZE);