]> Pileus Git - ~andy/linux/commitdiff
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
authorDavid S. Miller <davem@davemloft.net>
Tue, 30 Apr 2013 07:50:54 +0000 (03:50 -0400)
committerDavid S. Miller <davem@davemloft.net>
Tue, 30 Apr 2013 07:55:20 +0000 (03:55 -0400)
Conflicts:
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
drivers/net/ethernet/emulex/benet/be.h
include/net/tcp.h
net/mac802154/mac802154.h

Most conflicts were minor overlapping stuff.

The be2net driver brought in some fixes that added __vlan_put_tag
calls, which in net-next take an additional argument.

Signed-off-by: David S. Miller <davem@davemloft.net>
20 files changed:
1  2 
drivers/net/bonding/bond_main.c
drivers/net/caif/caif_spi.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
drivers/net/ethernet/emulex/benet/be.h
drivers/net/ethernet/emulex/benet/be_cmds.c
drivers/net/ethernet/emulex/benet/be_cmds.h
drivers/net/ethernet/emulex/benet/be_ethtool.c
drivers/net/ethernet/emulex/benet/be_main.c
drivers/net/ethernet/freescale/gianfar_ptp.c
drivers/net/ethernet/realtek/r8169.c
drivers/net/tun.c
net/batman-adv/routing.c
net/core/datagram.c
net/ipv4/tcp_ipv4.c
net/mac802154/mac802154.h
net/netfilter/ipvs/ip_vs_pe_sip.c
net/nfc/llcp_sock.c
net/unix/af_unix.c

index 532153db1f9c7026235b47bd963b8e06391c5dfd,7db40de1b41f441690589a4d0793e092c9d3c898..d0aade04e49aff739294de120d44defaba46fbf2
@@@ -428,15 -428,14 +428,15 @@@ int bond_dev_queue_xmit(struct bonding 
   * @bond_dev: bonding net device that got called
   * @vid: vlan id being added
   */
 -static int bond_vlan_rx_add_vid(struct net_device *bond_dev, uint16_t vid)
 +static int bond_vlan_rx_add_vid(struct net_device *bond_dev,
 +                              __be16 proto, u16 vid)
  {
        struct bonding *bond = netdev_priv(bond_dev);
        struct slave *slave, *stop_at;
        int i, res;
  
        bond_for_each_slave(bond, slave, i) {
 -              res = vlan_vid_add(slave->dev, vid);
 +              res = vlan_vid_add(slave->dev, proto, vid);
                if (res)
                        goto unwind;
        }
@@@ -454,7 -453,7 +454,7 @@@ unwind
        /* unwind from head to the slave that failed */
        stop_at = slave;
        bond_for_each_slave_from_to(bond, slave, i, bond->first_slave, stop_at)
 -              vlan_vid_del(slave->dev, vid);
 +              vlan_vid_del(slave->dev, proto, vid);
  
        return res;
  }
   * @bond_dev: bonding net device that got called
   * @vid: vlan id being removed
   */
 -static int bond_vlan_rx_kill_vid(struct net_device *bond_dev, uint16_t vid)
 +static int bond_vlan_rx_kill_vid(struct net_device *bond_dev,
 +                               __be16 proto, u16 vid)
  {
        struct bonding *bond = netdev_priv(bond_dev);
        struct slave *slave;
        int i, res;
  
        bond_for_each_slave(bond, slave, i)
 -              vlan_vid_del(slave->dev, vid);
 +              vlan_vid_del(slave->dev, proto, vid);
  
        res = bond_del_vlan(bond, vid);
        if (res) {
@@@ -490,8 -488,7 +490,8 @@@ static void bond_add_vlans_on_slave(str
        int res;
  
        list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
 -              res = vlan_vid_add(slave_dev, vlan->vlan_id);
 +              res = vlan_vid_add(slave_dev, htons(ETH_P_8021Q),
 +                                 vlan->vlan_id);
                if (res)
                        pr_warning("%s: Failed to add vlan id %d to device %s\n",
                                   bond->dev->name, vlan->vlan_id,
@@@ -507,7 -504,7 +507,7 @@@ static void bond_del_vlans_from_slave(s
        list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
                if (!vlan->vlan_id)
                        continue;
 -              vlan_vid_del(slave_dev, vlan->vlan_id);
 +              vlan_vid_del(slave_dev, htons(ETH_P_8021Q), vlan->vlan_id);
        }
  }
  
@@@ -782,7 -779,7 +782,7 @@@ static void bond_resend_igmp_join_reque
  
        /* rejoin all groups on vlan devices */
        list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
 -              vlan_dev = __vlan_find_dev_deep(bond_dev,
 +              vlan_dev = __vlan_find_dev_deep(bond_dev, htons(ETH_P_8021Q),
                                                vlan->vlan_id);
                if (vlan_dev)
                        __bond_resend_igmp_join_requests(vlan_dev);
@@@ -799,8 -796,9 +799,8 @@@ static void bond_resend_igmp_join_reque
  {
        struct bonding *bond = container_of(work, struct bonding,
                                            mcast_work.work);
 -      rcu_read_lock();
 +
        bond_resend_igmp_join_requests(bond);
 -      rcu_read_unlock();
  }
  
  /*
@@@ -1917,14 -1915,16 +1917,16 @@@ err_detach
        bond_detach_slave(bond, new_slave);
        if (bond->primary_slave == new_slave)
                bond->primary_slave = NULL;
-       write_unlock_bh(&bond->lock);
        if (bond->curr_active_slave == new_slave) {
+               bond_change_active_slave(bond, NULL);
+               write_unlock_bh(&bond->lock);
                read_lock(&bond->lock);
                write_lock_bh(&bond->curr_slave_lock);
-               bond_change_active_slave(bond, NULL);
                bond_select_active_slave(bond);
                write_unlock_bh(&bond->curr_slave_lock);
                read_unlock(&bond->lock);
+       } else {
+               write_unlock_bh(&bond->lock);
        }
        slave_disable_netpoll(new_slave);
  
@@@ -2534,8 -2534,7 +2536,8 @@@ static int bond_has_this_ip(struct bond
  
        list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
                rcu_read_lock();
 -              vlan_dev = __vlan_find_dev_deep(bond->dev, vlan->vlan_id);
 +              vlan_dev = __vlan_find_dev_deep(bond->dev, htons(ETH_P_8021Q),
 +                                              vlan->vlan_id);
                rcu_read_unlock();
                if (vlan_dev && ip == bond_confirm_addr(vlan_dev, 0, ip))
                        return 1;
@@@ -2564,7 -2563,7 +2566,7 @@@ static void bond_arp_send(struct net_de
                return;
        }
        if (vlan_id) {
 -              skb = vlan_put_tag(skb, vlan_id);
 +              skb = vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
                if (!skb) {
                        pr_err("failed to insert VLAN tag\n");
                        return;
@@@ -2626,7 -2625,6 +2628,7 @@@ static void bond_arp_send_all(struct bo
                list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
                        rcu_read_lock();
                        vlan_dev = __vlan_find_dev_deep(bond->dev,
 +                                                      htons(ETH_P_8021Q),
                                                        vlan->vlan_id);
                        rcu_read_unlock();
                        if (vlan_dev == rt->dst.dev) {
@@@ -4262,37 -4260,6 +4264,37 @@@ void bond_set_mode_ops(struct bonding *
        }
  }
  
 +static int bond_ethtool_get_settings(struct net_device *bond_dev,
 +                                   struct ethtool_cmd *ecmd)
 +{
 +      struct bonding *bond = netdev_priv(bond_dev);
 +      struct slave *slave;
 +      int i;
 +      unsigned long speed = 0;
 +
 +      ecmd->duplex = DUPLEX_UNKNOWN;
 +      ecmd->port = PORT_OTHER;
 +
 +      /* Since SLAVE_IS_OK returns false for all inactive or down slaves, we
 +       * do not need to check mode.  Though link speed might not represent
 +       * the true receive or transmit bandwidth (not all modes are symmetric)
 +       * this is an accurate maximum.
 +       */
 +      read_lock(&bond->lock);
 +      bond_for_each_slave(bond, slave, i) {
 +              if (SLAVE_IS_OK(slave)) {
 +                      if (slave->speed != SPEED_UNKNOWN)
 +                              speed += slave->speed;
 +                      if (ecmd->duplex == DUPLEX_UNKNOWN &&
 +                          slave->duplex != DUPLEX_UNKNOWN)
 +                              ecmd->duplex = slave->duplex;
 +              }
 +      }
 +      ethtool_cmd_speed_set(ecmd, speed ? : SPEED_UNKNOWN);
 +      read_unlock(&bond->lock);
 +      return 0;
 +}
 +
  static void bond_ethtool_get_drvinfo(struct net_device *bond_dev,
                                     struct ethtool_drvinfo *drvinfo)
  {
  
  static const struct ethtool_ops bond_ethtool_ops = {
        .get_drvinfo            = bond_ethtool_get_drvinfo,
 +      .get_settings           = bond_ethtool_get_settings,
        .get_link               = ethtool_op_get_link,
  };
  
@@@ -4395,9 -4361,9 +4397,9 @@@ static void bond_setup(struct net_devic
         */
  
        bond_dev->hw_features = BOND_VLAN_FEATURES |
 -                              NETIF_F_HW_VLAN_TX |
 -                              NETIF_F_HW_VLAN_RX |
 -                              NETIF_F_HW_VLAN_FILTER;
 +                              NETIF_F_HW_VLAN_CTAG_TX |
 +                              NETIF_F_HW_VLAN_CTAG_RX |
 +                              NETIF_F_HW_VLAN_CTAG_FILTER;
  
        bond_dev->hw_features &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_HW_CSUM);
        bond_dev->features |= bond_dev->hw_features;
index 2fb279a63c507d8f15e9e09b7c01bf86bc7b7980,ae7e756c6c655a21b15cbfffc8fbfda6983c9b7f..155db68e13bae83ce006d7835bdbc93d38cdd878
@@@ -1,6 -1,7 +1,6 @@@
  /*
   * Copyright (C) ST-Ericsson AB 2010
 - * Contact: Sjur Brendeland / sjur.brandeland@stericsson.com
 - * Author:  Daniel Martensson / Daniel.Martensson@stericsson.com
 + * Author:  Daniel Martensson
   * License terms: GNU General Public License (GPL) version 2.
   */
  
@@@ -28,7 -29,7 +28,7 @@@
  #endif /* CONFIG_CAIF_SPI_SYNC */
  
  MODULE_LICENSE("GPL");
 -MODULE_AUTHOR("Daniel Martensson<daniel.martensson@stericsson.com>");
 +MODULE_AUTHOR("Daniel Martensson");
  MODULE_DESCRIPTION("CAIF SPI driver");
  
  /* Returns the number of padding bytes for alignment. */
@@@ -863,6 -864,7 +863,7 @@@ static int __init cfspi_init_module(voi
        driver_remove_file(&cfspi_spi_driver.driver,
                           &driver_attr_up_head_align);
   err_create_up_head_align:
+       platform_driver_unregister(&cfspi_spi_driver);
   err_dev_register:
        return result;
  }
index 466b512cda4fdc0c9324c0f726f9e208d2dc548b,51a6030138e368455e3536564ad9011430505d8b..b8fbe266ab68f1619a18b7e49e9d6df19a5f0625
@@@ -451,8 -451,7 +451,8 @@@ static void bnx2x_tpa_start(struct bnx2
   * Compute number of aggregated segments, and gso_type.
   */
  static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags,
 -                               u16 len_on_bd, unsigned int pkt_len)
 +                               u16 len_on_bd, unsigned int pkt_len,
 +                               u16 num_of_coalesced_segs)
  {
        /* TPA aggregation won't have either IP options or TCP options
         * other than timestamp or IPv6 extension headers.
        /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
         * to skb_shinfo(skb)->gso_segs
         */
 -      NAPI_GRO_CB(skb)->count = DIV_ROUND_UP(pkt_len - hdrs_len,
 -                                             skb_shinfo(skb)->gso_size);
 +      NAPI_GRO_CB(skb)->count = num_of_coalesced_segs;
  }
  
  static int bnx2x_alloc_rx_sge(struct bnx2x *bp,
@@@ -537,8 -537,7 +537,8 @@@ static int bnx2x_fill_frag_skb(struct b
        /* This is needed in order to enable forwarding support */
        if (frag_size)
                bnx2x_set_gro_params(skb, tpa_info->parsing_flags, len_on_bd,
 -                                   le16_to_cpu(cqe->pkt_len));
 +                                   le16_to_cpu(cqe->pkt_len),
 +                                   le16_to_cpu(cqe->num_of_coalesced_segs));
  
  #ifdef BNX2X_STOP_ON_ERROR
        if (pages > min_t(u32, 8, MAX_SKB_FRAGS) * SGE_PAGES) {
@@@ -642,14 -641,6 +642,14 @@@ static void bnx2x_gro_ipv6_csum(struct 
        th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
                                  &iph->saddr, &iph->daddr, 0);
  }
 +
 +static void bnx2x_gro_csum(struct bnx2x *bp, struct sk_buff *skb,
 +                          void (*gro_func)(struct bnx2x*, struct sk_buff*))
 +{
 +      skb_set_network_header(skb, 0);
 +      gro_func(bp, skb);
 +      tcp_gro_complete(skb);
 +}
  #endif
  
  static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp,
  {
  #ifdef CONFIG_INET
        if (skb_shinfo(skb)->gso_size) {
 -              skb_set_network_header(skb, 0);
                switch (be16_to_cpu(skb->protocol)) {
                case ETH_P_IP:
 -                      bnx2x_gro_ip_csum(bp, skb);
 +                      bnx2x_gro_csum(bp, skb, bnx2x_gro_ip_csum);
                        break;
                case ETH_P_IPV6:
 -                      bnx2x_gro_ipv6_csum(bp, skb);
 +                      bnx2x_gro_csum(bp, skb, bnx2x_gro_ipv6_csum);
                        break;
                default:
 -                      BNX2X_ERR("FW GRO supports only IPv4/IPv6, not 0x%04x\n",
 +                      BNX2X_ERR("Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
                                  be16_to_cpu(skb->protocol));
                }
 -              tcp_gro_complete(skb);
        }
  #endif
        napi_gro_receive(&fp->napi, skb);
@@@ -725,7 -718,7 +725,7 @@@ static void bnx2x_tpa_stop(struct bnx2
                if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
                                         skb, cqe, cqe_idx)) {
                        if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
 -                              __vlan_hwaccel_put_tag(skb, tpa_info->vlan_tag);
 +                              __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tpa_info->vlan_tag);
                        bnx2x_gro_receive(bp, fp, skb);
                } else {
                        DP(NETIF_MSG_RX_STATUS,
@@@ -1000,7 -993,7 +1000,7 @@@ reuse_rx
  
                if (le16_to_cpu(cqe_fp->pars_flags.flags) &
                    PARSING_FLAGS_VLAN)
 -                      __vlan_hwaccel_put_tag(skb,
 +                      __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
                                               le16_to_cpu(cqe_fp->vlan_tag));
                napi_gro_receive(&fp->napi, skb);
  
@@@ -1044,6 -1037,7 +1044,7 @@@ static irqreturn_t bnx2x_msix_fp_int(in
        DP(NETIF_MSG_INTR,
           "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
           fp->index, fp->fw_sb_id, fp->igu_sb_id);
        bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
  
  #ifdef BNX2X_STOP_ON_ERROR
@@@ -1725,7 -1719,7 +1726,7 @@@ static int bnx2x_req_irq(struct bnx2x *
        return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev);
  }
  
static int bnx2x_setup_irqs(struct bnx2x *bp)
+ int bnx2x_setup_irqs(struct bnx2x *bp)
  {
        int rc = 0;
        if (bp->flags & USING_MSIX_FLAG &&
@@@ -2016,7 -2010,7 +2017,7 @@@ static int bnx2x_init_hw(struct bnx2x *
   * Cleans the object that have internal lists without sending
   * ramrods. Should be run when interrutps are disabled.
   */
 -static void bnx2x_squeeze_objects(struct bnx2x *bp)
 +void bnx2x_squeeze_objects(struct bnx2x *bp)
  {
        int rc;
        unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
@@@ -2581,6 -2575,8 +2582,8 @@@ int bnx2x_nic_load(struct bnx2x *bp, in
                }
        }
  
+       bnx2x_pre_irq_nic_init(bp);
        /* Connect to IRQs */
        rc = bnx2x_setup_irqs(bp);
        if (rc) {
                LOAD_ERROR_EXIT(bp, load_error2);
        }
  
-       /* Setup NIC internals and enable interrupts */
-       bnx2x_nic_init(bp, load_code);
        /* Init per-function objects */
        if (IS_PF(bp)) {
+               /* Setup NIC internals and enable interrupts */
+               bnx2x_post_irq_nic_init(bp, load_code);
                bnx2x_init_bp_objs(bp);
                bnx2x_iov_nic_init(bp);
  
        if (IS_PF(bp))
                rc = bnx2x_set_eth_mac(bp, true);
        else /* vf */
 -              rc = bnx2x_vfpf_set_mac(bp);
 +              rc = bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, bp->fp->index,
 +                                         true);
        if (rc) {
                BNX2X_ERR("Setting Ethernet MAC failed\n");
                LOAD_ERROR_EXIT(bp, load_error3);
@@@ -2785,7 -2780,7 +2788,7 @@@ load_error0
  #endif /* ! BNX2X_STOP_ON_ERROR */
  }
  
 -static int bnx2x_drain_tx_queues(struct bnx2x *bp)
 +int bnx2x_drain_tx_queues(struct bnx2x *bp)
  {
        u8 rc = 0, cos, i;
  
@@@ -2934,9 -2929,9 +2937,9 @@@ int bnx2x_nic_unload(struct bnx2x *bp, 
                bnx2x_free_fp_mem_cnic(bp);
  
        if (IS_PF(bp)) {
 -              bnx2x_free_mem(bp);
                if (CNIC_LOADED(bp))
                        bnx2x_free_mem_cnic(bp);
 +              bnx2x_free_mem(bp);
        }
        bp->state = BNX2X_STATE_CLOSED;
        bp->cnic_loaded = false;
@@@ -3097,11 -3092,11 +3100,11 @@@ int bnx2x_poll(struct napi_struct *napi
   * to ease the pain of our fellow microcode engineers
   * we use one mapping for both BDs
   */
 -static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
 -                                 struct bnx2x_fp_txdata *txdata,
 -                                 struct sw_tx_bd *tx_buf,
 -                                 struct eth_tx_start_bd **tx_bd, u16 hlen,
 -                                 u16 bd_prod, int nbd)
 +static u16 bnx2x_tx_split(struct bnx2x *bp,
 +                        struct bnx2x_fp_txdata *txdata,
 +                        struct sw_tx_bd *tx_buf,
 +                        struct eth_tx_start_bd **tx_bd, u16 hlen,
 +                        u16 bd_prod)
  {
        struct eth_tx_start_bd *h_tx_bd = *tx_bd;
        struct eth_tx_bd *d_tx_bd;
        int old_len = le16_to_cpu(h_tx_bd->nbytes);
  
        /* first fix first BD */
 -      h_tx_bd->nbd = cpu_to_le16(nbd);
        h_tx_bd->nbytes = cpu_to_le16(hlen);
  
 -      DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x) nbd %d\n",
 -         h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo, h_tx_bd->nbd);
 +      DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x)\n",
 +         h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo);
  
        /* now get a new data BD
         * (after the pbd) and fill it */
  
  #define bswab32(b32) ((__force __le32) swab32((__force __u32) (b32)))
  #define bswab16(b16) ((__force __le16) swab16((__force __u16) (b16)))
 -static inline __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
 +static __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
  {
        __sum16 tsum = (__force __sum16) csum;
  
        return bswab16(tsum);
  }
  
 -static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
 +static u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
  {
        u32 rc;
 +      __u8 prot = 0;
 +      __be16 protocol;
  
        if (skb->ip_summed != CHECKSUM_PARTIAL)
 -              rc = XMIT_PLAIN;
 +              return XMIT_PLAIN;
  
 -      else {
 -              if (vlan_get_protocol(skb) == htons(ETH_P_IPV6)) {
 -                      rc = XMIT_CSUM_V6;
 -                      if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
 -                              rc |= XMIT_CSUM_TCP;
 +      protocol = vlan_get_protocol(skb);
 +      if (protocol == htons(ETH_P_IPV6)) {
 +              rc = XMIT_CSUM_V6;
 +              prot = ipv6_hdr(skb)->nexthdr;
 +      } else {
 +              rc = XMIT_CSUM_V4;
 +              prot = ip_hdr(skb)->protocol;
 +      }
  
 +      if (!CHIP_IS_E1x(bp) && skb->encapsulation) {
 +              if (inner_ip_hdr(skb)->version == 6) {
 +                      rc |= XMIT_CSUM_ENC_V6;
 +                      if (inner_ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
 +                              rc |= XMIT_CSUM_TCP;
                } else {
 -                      rc = XMIT_CSUM_V4;
 -                      if (ip_hdr(skb)->protocol == IPPROTO_TCP)
 +                      rc |= XMIT_CSUM_ENC_V4;
 +                      if (inner_ip_hdr(skb)->protocol == IPPROTO_TCP)
                                rc |= XMIT_CSUM_TCP;
                }
        }
 +      if (prot == IPPROTO_TCP)
 +              rc |= XMIT_CSUM_TCP;
  
 -      if (skb_is_gso_v6(skb))
 -              rc |= XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6;
 -      else if (skb_is_gso(skb))
 -              rc |= XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP;
 +      if (skb_is_gso_v6(skb)) {
 +              rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6);
 +              if (rc & XMIT_CSUM_ENC)
 +                      rc |= XMIT_GSO_ENC_V6;
 +      } else if (skb_is_gso(skb)) {
 +              rc |= (XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP);
 +              if (rc & XMIT_CSUM_ENC)
 +                      rc |= XMIT_GSO_ENC_V4;
 +      }
  
        return rc;
  }
@@@ -3281,23 -3260,14 +3284,23 @@@ exit_lbl
  }
  #endif
  
 -static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
 -                                      u32 xmit_type)
 +static void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
 +                               u32 xmit_type)
  {
 +      struct ipv6hdr *ipv6;
 +
        *parsing_data |= (skb_shinfo(skb)->gso_size <<
                              ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
                              ETH_TX_PARSE_BD_E2_LSO_MSS;
 -      if ((xmit_type & XMIT_GSO_V6) &&
 -          (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
 +
 +      if (xmit_type & XMIT_GSO_ENC_V6)
 +              ipv6 = inner_ipv6_hdr(skb);
 +      else if (xmit_type & XMIT_GSO_V6)
 +              ipv6 = ipv6_hdr(skb);
 +      else
 +              ipv6 = NULL;
 +
 +      if (ipv6 && ipv6->nexthdr == NEXTHDR_IPV6)
                *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
  }
  
   * @pbd:      parse BD
   * @xmit_type:        xmit flags
   */
 -static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
 -                                   struct eth_tx_parse_bd_e1x *pbd,
 -                                   u32 xmit_type)
 +static void bnx2x_set_pbd_gso(struct sk_buff *skb,
 +                            struct eth_tx_parse_bd_e1x *pbd,
 +                            u32 xmit_type)
  {
        pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
        pbd->tcp_send_seq = bswab32(tcp_hdr(skb)->seq);
 -      pbd->tcp_flags = pbd_tcp_flags(skb);
 +      pbd->tcp_flags = pbd_tcp_flags(tcp_hdr(skb));
  
        if (xmit_type & XMIT_GSO_V4) {
                pbd->ip_id = bswab16(ip_hdr(skb)->id);
                cpu_to_le16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN);
  }
  
 +/**
 + * bnx2x_set_pbd_csum_enc - update PBD with checksum and return header length
 + *
 + * @bp:                       driver handle
 + * @skb:              packet skb
 + * @parsing_data:     data to be updated
 + * @xmit_type:                xmit flags
 + *
 + * 57712/578xx related, when skb has encapsulation
 + */
 +static u8 bnx2x_set_pbd_csum_enc(struct bnx2x *bp, struct sk_buff *skb,
 +                               u32 *parsing_data, u32 xmit_type)
 +{
 +      *parsing_data |=
 +              ((((u8 *)skb_inner_transport_header(skb) - skb->data) >> 1) <<
 +              ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
 +              ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
 +
 +      if (xmit_type & XMIT_CSUM_TCP) {
 +              *parsing_data |= ((inner_tcp_hdrlen(skb) / 4) <<
 +                      ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
 +                      ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
 +
 +              return skb_inner_transport_header(skb) +
 +                      inner_tcp_hdrlen(skb) - skb->data;
 +      }
 +
 +      /* We support checksum offload for TCP and UDP only.
 +       * No need to pass the UDP header length - it's a constant.
 +       */
 +      return skb_inner_transport_header(skb) +
 +              sizeof(struct udphdr) - skb->data;
 +}
 +
  /**
   * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
   *
   * @parsing_data:     data to be updated
   * @xmit_type:                xmit flags
   *
 - * 57712 related
 + * 57712/578xx related
   */
 -static inline  u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
 -                                      u32 *parsing_data, u32 xmit_type)
 +static u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
 +                              u32 *parsing_data, u32 xmit_type)
  {
        *parsing_data |=
                ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
 -              ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT) &
 -              ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W;
 +              ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
 +              ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
  
        if (xmit_type & XMIT_CSUM_TCP) {
                *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
        return skb_transport_header(skb) + sizeof(struct udphdr) - skb->data;
  }
  
 -static inline void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
 -      struct eth_tx_start_bd *tx_start_bd, u32 xmit_type)
 +/* set FW indication according to inner or outer protocols if tunneled */
 +static void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
 +                             struct eth_tx_start_bd *tx_start_bd,
 +                             u32 xmit_type)
  {
        tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
  
 -      if (xmit_type & XMIT_CSUM_V4)
 -              tx_start_bd->bd_flags.as_bitfield |=
 -                                      ETH_TX_BD_FLAGS_IP_CSUM;
 -      else
 -              tx_start_bd->bd_flags.as_bitfield |=
 -                                      ETH_TX_BD_FLAGS_IPV6;
 +      if (xmit_type & (XMIT_CSUM_ENC_V6 | XMIT_CSUM_V6))
 +              tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
  
        if (!(xmit_type & XMIT_CSUM_TCP))
                tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
   * @pbd:      parse BD to be updated
   * @xmit_type:        xmit flags
   */
 -static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
 -      struct eth_tx_parse_bd_e1x *pbd,
 -      u32 xmit_type)
 +static u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
 +                           struct eth_tx_parse_bd_e1x *pbd,
 +                           u32 xmit_type)
  {
        u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
  
        return hlen;
  }
  
 +static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
 +                                    struct eth_tx_parse_bd_e2 *pbd_e2,
 +                                    struct eth_tx_parse_2nd_bd *pbd2,
 +                                    u16 *global_data,
 +                                    u32 xmit_type)
 +{
 +      u16 hlen_w = 0;
 +      u8 outerip_off, outerip_len = 0;
 +      /* from outer IP to transport */
 +      hlen_w = (skb_inner_transport_header(skb) -
 +                skb_network_header(skb)) >> 1;
 +
 +      /* transport len */
 +      if (xmit_type & XMIT_CSUM_TCP)
 +              hlen_w += inner_tcp_hdrlen(skb) >> 1;
 +      else
 +              hlen_w += sizeof(struct udphdr) >> 1;
 +
 +      pbd2->fw_ip_hdr_to_payload_w = hlen_w;
 +
 +      if (xmit_type & XMIT_CSUM_ENC_V4) {
 +              struct iphdr *iph = ip_hdr(skb);
 +              pbd2->fw_ip_csum_wo_len_flags_frag =
 +                      bswab16(csum_fold((~iph->check) -
 +                                        iph->tot_len - iph->frag_off));
 +      } else {
 +              pbd2->fw_ip_hdr_to_payload_w =
 +                      hlen_w - ((sizeof(struct ipv6hdr)) >> 1);
 +      }
 +
 +      pbd2->tcp_send_seq = bswab32(inner_tcp_hdr(skb)->seq);
 +
 +      pbd2->tcp_flags = pbd_tcp_flags(inner_tcp_hdr(skb));
 +
 +      if (xmit_type & XMIT_GSO_V4) {
 +              pbd2->hw_ip_id = bswab16(inner_ip_hdr(skb)->id);
 +
 +              pbd_e2->data.tunnel_data.pseudo_csum =
 +                      bswab16(~csum_tcpudp_magic(
 +                                      inner_ip_hdr(skb)->saddr,
 +                                      inner_ip_hdr(skb)->daddr,
 +                                      0, IPPROTO_TCP, 0));
 +
 +              outerip_len = ip_hdr(skb)->ihl << 1;
 +      } else {
 +              pbd_e2->data.tunnel_data.pseudo_csum =
 +                      bswab16(~csum_ipv6_magic(
 +                                      &inner_ipv6_hdr(skb)->saddr,
 +                                      &inner_ipv6_hdr(skb)->daddr,
 +                                      0, IPPROTO_TCP, 0));
 +      }
 +
 +      outerip_off = (skb_network_header(skb) - skb->data) >> 1;
 +
 +      *global_data |=
 +              outerip_off |
 +              (!!(xmit_type & XMIT_CSUM_V6) <<
 +                      ETH_TX_PARSE_2ND_BD_IP_HDR_TYPE_OUTER_SHIFT) |
 +              (outerip_len <<
 +                      ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT) |
 +              ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
 +                      ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN_SHIFT);
 +
 +      if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
 +              SET_FLAG(*global_data, ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST, 1);
 +              pbd2->tunnel_udp_hdr_start_w = skb_transport_offset(skb) >> 1;
 +      }
 +}
 +
  /* called with netif_tx_lock
   * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
   * netif_wake_queue()
@@@ -3552,7 -3421,6 +3555,7 @@@ netdev_tx_t bnx2x_start_xmit(struct sk_
        struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
        struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
        struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
 +      struct eth_tx_parse_2nd_bd *pbd2 = NULL;
        u32 pbd_e2_parsing_data = 0;
        u16 pkt_prod, bd_prod;
        int nbd, txq_index;
                        mac_type = MULTICAST_ADDRESS;
        }
  
 -#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
 +#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
        /* First, check if we need to linearize the skb (due to FW
           restrictions). No need to check fragmentation if page size > 8K
           (there will be no violation to FW restrictions) */
        first_bd = tx_start_bd;
  
        tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
 -      SET_FLAG(tx_start_bd->general_data,
 -               ETH_TX_START_BD_PARSE_NBDS,
 -               0);
  
 -      /* header nbd */
 -      SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_HDR_NBDS, 1);
 +      /* header nbd: indirectly zero other flags! */
 +      tx_start_bd->general_data = 1 << ETH_TX_START_BD_HDR_NBDS_SHIFT;
  
        /* remember the first BD of the packet */
        tx_buf->first_bd = txdata->tx_bd_prod;
                /* when transmitting in a vf, start bd must hold the ethertype
                 * for fw to enforce it
                 */
 -#ifndef BNX2X_STOP_ON_ERROR
 -              if (IS_VF(bp)) {
 -#endif
 +              if (IS_VF(bp))
                        tx_start_bd->vlan_or_ethertype =
                                cpu_to_le16(ntohs(eth->h_proto));
 -#ifndef BNX2X_STOP_ON_ERROR
 -              } else {
 +              else
                        /* used by FW for packet accounting */
                        tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
 -              }
 -#endif
        }
  
 +      nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
 +
        /* turn on parsing and get a BD */
        bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
  
        if (!CHIP_IS_E1x(bp)) {
                pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
                memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
 -              /* Set PBD in checksum offload case */
 -              if (xmit_type & XMIT_CSUM)
 +
 +              if (xmit_type & XMIT_CSUM_ENC) {
 +                      u16 global_data = 0;
 +
 +                      /* Set PBD in enc checksum offload case */
 +                      hlen = bnx2x_set_pbd_csum_enc(bp, skb,
 +                                                    &pbd_e2_parsing_data,
 +                                                    xmit_type);
 +
 +                      /* turn on 2nd parsing and get a BD */
 +                      bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
 +
 +                      pbd2 = &txdata->tx_desc_ring[bd_prod].parse_2nd_bd;
 +
 +                      memset(pbd2, 0, sizeof(*pbd2));
 +
 +                      pbd_e2->data.tunnel_data.ip_hdr_start_inner_w =
 +                              (skb_inner_network_header(skb) -
 +                               skb->data) >> 1;
 +
 +                      if (xmit_type & XMIT_GSO_ENC)
 +                              bnx2x_update_pbds_gso_enc(skb, pbd_e2, pbd2,
 +                                                        &global_data,
 +                                                        xmit_type);
 +
 +                      pbd2->global_data = cpu_to_le16(global_data);
 +
 +                      /* add addition parse BD indication to start BD */
 +                      SET_FLAG(tx_start_bd->general_data,
 +                               ETH_TX_START_BD_PARSE_NBDS, 1);
 +                      /* set encapsulation flag in start BD */
 +                      SET_FLAG(tx_start_bd->general_data,
 +                               ETH_TX_START_BD_TUNNEL_EXIST, 1);
 +                      nbd++;
 +              } else if (xmit_type & XMIT_CSUM) {
 +                      /* Set PBD in checksum offload case w/o encapsulation */
                        hlen = bnx2x_set_pbd_csum_e2(bp, skb,
                                                     &pbd_e2_parsing_data,
                                                     xmit_type);
 +              }
  
 -              if (IS_MF_SI(bp) || IS_VF(bp)) {
 -                      /* fill in the MAC addresses in the PBD - for local
 -                       * switching
 -                       */
 -                      bnx2x_set_fw_mac_addr(&pbd_e2->src_mac_addr_hi,
 -                                            &pbd_e2->src_mac_addr_mid,
 -                                            &pbd_e2->src_mac_addr_lo,
 +              /* Add the macs to the parsing BD this is a vf */
 +              if (IS_VF(bp)) {
 +                      /* override GRE parameters in BD */
 +                      bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
 +                                            &pbd_e2->data.mac_addr.src_mid,
 +                                            &pbd_e2->data.mac_addr.src_lo,
                                              eth->h_source);
 -                      bnx2x_set_fw_mac_addr(&pbd_e2->dst_mac_addr_hi,
 -                                            &pbd_e2->dst_mac_addr_mid,
 -                                            &pbd_e2->dst_mac_addr_lo,
 +
 +                      bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi,
 +                                            &pbd_e2->data.mac_addr.dst_mid,
 +                                            &pbd_e2->data.mac_addr.dst_lo,
                                              eth->h_dest);
                }
  
        /* Setup the data pointer of the first BD of the packet */
        tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
        tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
 -      nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
        tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
        pkt_size = tx_start_bd->nbytes;
  
        DP(NETIF_MSG_TX_QUEUED,
 -         "first bd @%p  addr (%x:%x)  nbd %d  nbytes %d  flags %x  vlan %x\n",
 +         "first bd @%p  addr (%x:%x)  nbytes %d  flags %x  vlan %x\n",
           tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
 -         le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
 +         le16_to_cpu(tx_start_bd->nbytes),
           tx_start_bd->bd_flags.as_bitfield,
           le16_to_cpu(tx_start_bd->vlan_or_ethertype));
  
  
                tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
  
 -              if (unlikely(skb_headlen(skb) > hlen))
 +              if (unlikely(skb_headlen(skb) > hlen)) {
 +                      nbd++;
                        bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
                                                 &tx_start_bd, hlen,
 -                                               bd_prod, ++nbd);
 +                                               bd_prod);
 +              }
                if (!CHIP_IS_E1x(bp))
                        bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
                                             xmit_type);
        if (pbd_e2)
                DP(NETIF_MSG_TX_QUEUED,
                   "PBD (E2) @%p  dst %x %x %x src %x %x %x parsing_data %x\n",
 -                 pbd_e2, pbd_e2->dst_mac_addr_hi, pbd_e2->dst_mac_addr_mid,
 -                 pbd_e2->dst_mac_addr_lo, pbd_e2->src_mac_addr_hi,
 -                 pbd_e2->src_mac_addr_mid, pbd_e2->src_mac_addr_lo,
 +                 pbd_e2,
 +                 pbd_e2->data.mac_addr.dst_hi,
 +                 pbd_e2->data.mac_addr.dst_mid,
 +                 pbd_e2->data.mac_addr.dst_lo,
 +                 pbd_e2->data.mac_addr.src_hi,
 +                 pbd_e2->data.mac_addr.src_mid,
 +                 pbd_e2->data.mac_addr.src_lo,
                   pbd_e2->parsing_data);
        DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d  bd %u\n", nbd, bd_prod);
  
index 54e1b149acb3f5f1777547d88aeff7470a0d8668,c3a65d04c8c9a43689b6ffcfa25a892b3f5a0b62..151675d66b0d4d1f5b80289bb8697c43c69fb301
@@@ -50,13 -50,13 +50,13 @@@ extern int int_mode
                } \
        } while (0)
  
 -#define BNX2X_PCI_ALLOC(x, y, size) \
 -      do { \
 -              x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
 -              if (x == NULL) \
 -                      goto alloc_mem_err; \
 -              memset((void *)x, 0, size); \
 -      } while (0)
 +#define BNX2X_PCI_ALLOC(x, y, size)                           \
 +do {                                                          \
 +      x = dma_alloc_coherent(&bp->pdev->dev, size, y,         \
 +                             GFP_KERNEL | __GFP_ZERO);        \
 +      if (x == NULL)                                          \
 +              goto alloc_mem_err;                             \
 +} while (0)
  
  #define BNX2X_ALLOC(x, size) \
        do { \
@@@ -295,16 -295,29 +295,29 @@@ void bnx2x_int_disable_sync(struct bnx2
  void bnx2x_nic_init_cnic(struct bnx2x *bp);
  
  /**
-  * bnx2x_nic_init - init driver internals.
+  * bnx2x_preirq_nic_init - init driver internals.
   *
   * @bp:               driver handle
   *
   * Initializes:
-  *  - rings
+  *  - fastpath object
+  *  - fastpath rings
+  *  etc.
+  */
+ void bnx2x_pre_irq_nic_init(struct bnx2x *bp);
+ /**
+  * bnx2x_postirq_nic_init - init driver internals.
+  *
+  * @bp:               driver handle
+  * @load_code:        COMMON, PORT or FUNCTION
+  *
+  * Initializes:
   *  - status blocks
+  *  - slowpath rings
   *  - etc.
   */
- void bnx2x_nic_init(struct bnx2x *bp, u32 load_code);
+ void bnx2x_post_irq_nic_init(struct bnx2x *bp, u32 load_code);
  /**
   * bnx2x_alloc_mem_cnic - allocate driver's memory for cnic.
   *
@@@ -496,10 -509,7 +509,10 @@@ netdev_tx_t bnx2x_start_xmit(struct sk_
  /* setup_tc callback */
  int bnx2x_setup_tc(struct net_device *dev, u8 num_tc);
  
 +int bnx2x_get_vf_config(struct net_device *dev, int vf,
 +                      struct ifla_vf_info *ivi);
  int bnx2x_set_vf_mac(struct net_device *dev, int queue, u8 *mac);
 +int bnx2x_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos);
  
  /* select_queue callback */
  u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb);
@@@ -837,7 -847,7 +850,7 @@@ static inline void bnx2x_add_all_napi_c
        /* Add NAPI objects */
        for_each_rx_queue_cnic(bp, i)
                netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
 -                             bnx2x_poll, BNX2X_NAPI_WEIGHT);
 +                             bnx2x_poll, NAPI_POLL_WEIGHT);
  }
  
  static inline void bnx2x_add_all_napi(struct bnx2x *bp)
        /* Add NAPI objects */
        for_each_eth_queue(bp, i)
                netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
 -                             bnx2x_poll, BNX2X_NAPI_WEIGHT);
 +                             bnx2x_poll, NAPI_POLL_WEIGHT);
  }
  
  static inline void bnx2x_del_all_napi_cnic(struct bnx2x *bp)
@@@ -973,9 -983,6 +986,9 @@@ static inline int bnx2x_func_start(stru
        else /* CHIP_IS_E1X */
                start_params->network_cos_mode = FW_WRR;
  
 +      start_params->gre_tunnel_mode = IPGRE_TUNNEL;
 +      start_params->gre_tunnel_rss = GRE_INNER_HEADERS_RSS;
 +
        return bnx2x_func_state_change(bp, &func_params);
  }
  
@@@ -1402,8 -1409,4 +1415,8 @@@ static inline bool bnx2x_is_valid_ether
   *
   */
  void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len);
 +
 +int bnx2x_drain_tx_queues(struct bnx2x *bp);
 +void bnx2x_squeeze_objects(struct bnx2x *bp);
 +
  #endif /* BNX2X_CMN_H */
index 91a0434ce1bdb5d640af56dc46b89a97aa240f6a,a8f1ee31de774cfbd85715daf5ee776a56f0c4a5..a024eec94be1eb166694d4d220c7f7d943d92676
@@@ -75,6 -75,8 +75,6 @@@
  #define FW_FILE_NAME_E1H      "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw"
  #define FW_FILE_NAME_E2               "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw"
  
 -#define MAC_LEADING_ZERO_CNT (ALIGN(ETH_ALEN, sizeof(u32)) - ETH_ALEN)
 -
  /* Time in jiffies before concluding the transmitter is hung */
  #define TX_TIMEOUT            (5*HZ)
  
@@@ -2953,16 -2955,14 +2953,16 @@@ static unsigned long bnx2x_get_common_f
        __set_bit(BNX2X_Q_FLG_ACTIVE, &flags);
  
        /* tx only connections collect statistics (on the same index as the
 -       *  parent connection). The statistics are zeroed when the parent
 -       *  connection is initialized.
 +       * parent connection). The statistics are zeroed when the parent
 +       * connection is initialized.
         */
  
        __set_bit(BNX2X_Q_FLG_STATS, &flags);
        if (zero_stats)
                __set_bit(BNX2X_Q_FLG_ZERO_STATS, &flags);
  
 +      __set_bit(BNX2X_Q_FLG_PCSUM_ON_PKT, &flags);
 +      __set_bit(BNX2X_Q_FLG_TUN_INC_INNER_IP_ID, &flags);
  
  #ifdef BNX2X_STOP_ON_ERROR
        __set_bit(BNX2X_Q_FLG_TX_SEC, &flags);
@@@ -3227,29 -3227,16 +3227,29 @@@ static void bnx2x_drv_info_ether_stat(s
  {
        struct eth_stats_info *ether_stat =
                &bp->slowpath->drv_info_to_mcp.ether_stat;
 +      struct bnx2x_vlan_mac_obj *mac_obj =
 +              &bp->sp_objs->mac_obj;
 +      int i;
  
        strlcpy(ether_stat->version, DRV_MODULE_VERSION,
                ETH_STAT_INFO_VERSION_LEN);
  
 -      bp->sp_objs[0].mac_obj.get_n_elements(bp, &bp->sp_objs[0].mac_obj,
 -                                      DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED,
 -                                      ether_stat->mac_local);
 -
 +      /* get DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED macs, placing them in the
 +       * mac_local field in ether_stat struct. The base address is offset by 2
 +       * bytes to account for the field being 8 bytes but a mac address is
 +       * only 6 bytes. Likewise, the stride for the get_n_elements function is
 +       * 2 bytes to compensate from the 6 bytes of a mac to the 8 bytes
 +       * allocated by the ether_stat struct, so the macs will land in their
 +       * proper positions.
 +       */
 +      for (i = 0; i < DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED; i++)
 +              memset(ether_stat->mac_local + i, 0,
 +                     sizeof(ether_stat->mac_local[0]));
 +      mac_obj->get_n_elements(bp, &bp->sp_objs[0].mac_obj,
 +                              DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED,
 +                              ether_stat->mac_local + MAC_PAD, MAC_PAD,
 +                              ETH_ALEN);
        ether_stat->mtu_size = bp->dev->mtu;
 -
        if (bp->dev->features & NETIF_F_RXCSUM)
                ether_stat->feature_flags |= FEATURE_ETH_CHKSUM_OFFLOAD_MASK;
        if (bp->dev->features & NETIF_F_TSO)
@@@ -3271,7 -3258,8 +3271,7 @@@ static void bnx2x_drv_info_fcoe_stat(st
        if (!CNIC_LOADED(bp))
                return;
  
 -      memcpy(fcoe_stat->mac_local + MAC_LEADING_ZERO_CNT,
 -             bp->fip_mac, ETH_ALEN);
 +      memcpy(fcoe_stat->mac_local + MAC_PAD, bp->fip_mac, ETH_ALEN);
  
        fcoe_stat->qos_priority =
                app->traffic_type_priority[LLFC_TRAFFIC_TYPE_FCOE];
@@@ -3373,8 -3361,8 +3373,8 @@@ static void bnx2x_drv_info_iscsi_stat(s
        if (!CNIC_LOADED(bp))
                return;
  
 -      memcpy(iscsi_stat->mac_local + MAC_LEADING_ZERO_CNT,
 -             bp->cnic_eth_dev.iscsi_mac, ETH_ALEN);
 +      memcpy(iscsi_stat->mac_local + MAC_PAD, bp->cnic_eth_dev.iscsi_mac,
 +             ETH_ALEN);
  
        iscsi_stat->qos_priority =
                app->traffic_type_priority[LLFC_TRAFFIC_TYPE_ISCSI];
@@@ -6030,10 -6018,11 +6030,11 @@@ void bnx2x_nic_init_cnic(struct bnx2x *
        mmiowb();
  }
  
- void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
+ void bnx2x_pre_irq_nic_init(struct bnx2x *bp)
  {
        int i;
  
+       /* Setup NIC internals and enable interrupts */
        for_each_eth_queue(bp, i)
                bnx2x_init_eth_fp(bp, i);
  
        rmb();
        bnx2x_init_rx_rings(bp);
        bnx2x_init_tx_rings(bp);
-       /* Initialize MOD_ABS interrupts */
-       bnx2x_init_mod_abs_int(bp, &bp->link_vars, bp->common.chip_id,
-                              bp->common.shmem_base, bp->common.shmem2_base,
-                              BP_PORT(bp));
 +      if (IS_VF(bp)) {
 +              bnx2x_memset_stats(bp);
 +              return;
 +      }
 +
+       if (IS_PF(bp)) {
+               /* Initialize MOD_ABS interrupts */
+               bnx2x_init_mod_abs_int(bp, &bp->link_vars, bp->common.chip_id,
+                                      bp->common.shmem_base,
+                                      bp->common.shmem2_base, BP_PORT(bp));
+               /* initialize the default status block and sp ring */
+               bnx2x_init_def_sb(bp);
+               bnx2x_update_dsb_idx(bp);
+               bnx2x_init_sp_ring(bp);
+       }
+ }
  
-       bnx2x_init_def_sb(bp);
-       bnx2x_update_dsb_idx(bp);
-       bnx2x_init_sp_ring(bp);
+ void bnx2x_post_irq_nic_init(struct bnx2x *bp, u32 load_code)
+ {
        bnx2x_init_eq_ring(bp);
        bnx2x_init_internal(bp, load_code);
        bnx2x_pf_init(bp);
                                   AEU_INPUTS_ATTN_BITS_SPIO5);
  }
  
- /* end of nic init */
- /*
-  * gzip service functions
-  */
+ /* gzip service functions */
  static int bnx2x_gunzip_init(struct bnx2x *bp)
  {
        bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
@@@ -7786,7 -7773,7 +7790,7 @@@ int bnx2x_alloc_mem_cnic(struct bnx2x *
                                sizeof(struct
                                       host_hc_status_block_e1x));
  
 -      if (CONFIGURE_NIC_MODE(bp))
 +      if (CONFIGURE_NIC_MODE(bp) && !bp->t2)
                /* allocate searcher T2 table, as it wan't allocated before */
                BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ);
  
@@@ -7809,7 -7796,7 +7813,7 @@@ int bnx2x_alloc_mem(struct bnx2x *bp
  {
        int i, allocated, context_size;
  
 -      if (!CONFIGURE_NIC_MODE(bp))
 +      if (!CONFIGURE_NIC_MODE(bp) && !bp->t2)
                /* allocate searcher T2 table */
                BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ);
  
@@@ -7930,6 -7917,8 +7934,6 @@@ int bnx2x_del_all_macs(struct bnx2x *bp
  
  int bnx2x_set_eth_mac(struct bnx2x *bp, bool set)
  {
 -      unsigned long ramrod_flags = 0;
 -
        if (is_zero_ether_addr(bp->dev->dev_addr) &&
            (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) {
                DP(NETIF_MSG_IFUP | NETIF_MSG_IFDOWN,
                return 0;
        }
  
 -      DP(NETIF_MSG_IFUP, "Adding Eth MAC\n");
 +      if (IS_PF(bp)) {
 +              unsigned long ramrod_flags = 0;
  
 -      __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
 -      /* Eth MAC is set on RSS leading client (fp[0]) */
 -      return bnx2x_set_mac_one(bp, bp->dev->dev_addr, &bp->sp_objs->mac_obj,
 -                               set, BNX2X_ETH_MAC, &ramrod_flags);
 +              DP(NETIF_MSG_IFUP, "Adding Eth MAC\n");
 +              __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
 +              return bnx2x_set_mac_one(bp, bp->dev->dev_addr,
 +                                       &bp->sp_objs->mac_obj, set,
 +                                       BNX2X_ETH_MAC, &ramrod_flags);
 +      } else { /* vf */
 +              return bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr,
 +                                           bp->fp->index, true);
 +      }
  }
  
  int bnx2x_setup_leading(struct bnx2x *bp)
@@@ -9542,10 -9525,6 +9546,10 @@@ sp_rtnl_not_reset
                bnx2x_vfpf_storm_rx_mode(bp);
        }
  
 +      if (test_and_clear_bit(BNX2X_SP_RTNL_HYPERVISOR_VLAN,
 +                             &bp->sp_rtnl_state))
 +              bnx2x_pf_set_vfs_vlan(bp);
 +
        /* work which needs rtnl lock not-taken (as it takes the lock itself and
         * can be called from other contexts as well)
         */
  
        /* enable SR-IOV if applicable */
        if (IS_SRIOV(bp) && test_and_clear_bit(BNX2X_SP_RTNL_ENABLE_SRIOV,
 -                                             &bp->sp_rtnl_state))
 +                                             &bp->sp_rtnl_state)) {
 +              bnx2x_disable_sriov(bp);
                bnx2x_enable_sriov(bp);
 +      }
  }
  
  static void bnx2x_period_task(struct work_struct *work)
@@@ -9724,31 -9701,6 +9728,31 @@@ static struct bnx2x_prev_path_list 
        return NULL;
  }
  
 +static int bnx2x_prev_path_mark_eeh(struct bnx2x *bp)
 +{
 +      struct bnx2x_prev_path_list *tmp_list;
 +      int rc;
 +
 +      rc = down_interruptible(&bnx2x_prev_sem);
 +      if (rc) {
 +              BNX2X_ERR("Received %d when tried to take lock\n", rc);
 +              return rc;
 +      }
 +
 +      tmp_list = bnx2x_prev_path_get_entry(bp);
 +      if (tmp_list) {
 +              tmp_list->aer = 1;
 +              rc = 0;
 +      } else {
 +              BNX2X_ERR("path %d: Entry does not exist for eeh; Flow occurs before initial insmod is over ?\n",
 +                        BP_PATH(bp));
 +      }
 +
 +      up(&bnx2x_prev_sem);
 +
 +      return rc;
 +}
 +
  static bool bnx2x_prev_is_path_marked(struct bnx2x *bp)
  {
        struct bnx2x_prev_path_list *tmp_list;
        if (down_trylock(&bnx2x_prev_sem))
                return false;
  
 -      list_for_each_entry(tmp_list, &bnx2x_prev_list, list) {
 -              if (PCI_SLOT(bp->pdev->devfn) == tmp_list->slot &&
 -                  bp->pdev->bus->number == tmp_list->bus &&
 -                  BP_PATH(bp) == tmp_list->path) {
 +      tmp_list = bnx2x_prev_path_get_entry(bp);
 +      if (tmp_list) {
 +              if (tmp_list->aer) {
 +                      DP(NETIF_MSG_HW, "Path %d was marked by AER\n",
 +                         BP_PATH(bp));
 +              } else {
                        rc = true;
                        BNX2X_DEV_INFO("Path %d was already cleaned from previous drivers\n",
                                       BP_PATH(bp));
 -                      break;
                }
        }
  
@@@ -9779,28 -9730,6 +9783,28 @@@ static int bnx2x_prev_mark_path(struct 
        struct bnx2x_prev_path_list *tmp_list;
        int rc;
  
 +      rc = down_interruptible(&bnx2x_prev_sem);
 +      if (rc) {
 +              BNX2X_ERR("Received %d when tried to take lock\n", rc);
 +              return rc;
 +      }
 +
 +      /* Check whether the entry for this path already exists */
 +      tmp_list = bnx2x_prev_path_get_entry(bp);
 +      if (tmp_list) {
 +              if (!tmp_list->aer) {
 +                      BNX2X_ERR("Re-Marking the path.\n");
 +              } else {
 +                      DP(NETIF_MSG_HW, "Removing AER indication from path %d\n",
 +                         BP_PATH(bp));
 +                      tmp_list->aer = 0;
 +              }
 +              up(&bnx2x_prev_sem);
 +              return 0;
 +      }
 +      up(&bnx2x_prev_sem);
 +
 +      /* Create an entry for this path and add it */
        tmp_list = kmalloc(sizeof(struct bnx2x_prev_path_list), GFP_KERNEL);
        if (!tmp_list) {
                BNX2X_ERR("Failed to allocate 'bnx2x_prev_path_list'\n");
        tmp_list->bus = bp->pdev->bus->number;
        tmp_list->slot = PCI_SLOT(bp->pdev->devfn);
        tmp_list->path = BP_PATH(bp);
 +      tmp_list->aer = 0;
        tmp_list->undi = after_undi ? (1 << BP_PORT(bp)) : 0;
  
        rc = down_interruptible(&bnx2x_prev_sem);
                BNX2X_ERR("Received %d when tried to take lock\n", rc);
                kfree(tmp_list);
        } else {
 -              BNX2X_DEV_INFO("Marked path [%d] - finished previous unload\n",
 -                              BP_PATH(bp));
 +              DP(NETIF_MSG_HW, "Marked path [%d] - finished previous unload\n",
 +                 BP_PATH(bp));
                list_add(&tmp_list->list, &bnx2x_prev_list);
                up(&bnx2x_prev_sem);
        }
@@@ -10062,7 -9990,6 +10066,7 @@@ static int bnx2x_prev_unload(struct bnx
        }
  
        do {
 +              int aer = 0;
                /* Lock MCP using an unload request */
                fw = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS, 0);
                if (!fw) {
                        break;
                }
  
 -              if (fw == FW_MSG_CODE_DRV_UNLOAD_COMMON) {
 +              rc = down_interruptible(&bnx2x_prev_sem);
 +              if (rc) {
 +                      BNX2X_ERR("Cannot check for AER; Received %d when tried to take lock\n",
 +                                rc);
 +              } else {
 +                      /* If Path is marked by EEH, ignore unload status */
 +                      aer = !!(bnx2x_prev_path_get_entry(bp) &&
 +                               bnx2x_prev_path_get_entry(bp)->aer);
 +                      up(&bnx2x_prev_sem);
 +              }
 +
 +              if (fw == FW_MSG_CODE_DRV_UNLOAD_COMMON || aer) {
                        rc = bnx2x_prev_unload_common(bp);
                        break;
                }
@@@ -10122,12 -10038,8 +10126,12 @@@ static void bnx2x_get_common_hwinfo(str
        id = ((val & 0xffff) << 16);
        val = REG_RD(bp, MISC_REG_CHIP_REV);
        id |= ((val & 0xf) << 12);
 -      val = REG_RD(bp, MISC_REG_CHIP_METAL);
 -      id |= ((val & 0xff) << 4);
 +
 +      /* Metal is read from PCI regs, but we can't access >=0x400 from
 +       * the configuration space (so we need to reg_rd)
 +       */
 +      val = REG_RD(bp, PCICFG_OFFSET + PCI_ID_VAL3);
 +      id |= (((val >> 24) & 0xf) << 4);
        val = REG_RD(bp, MISC_REG_BOND_ID);
        id |= (val & 0xf);
        bp->common.chip_id = id;
@@@ -10791,12 -10703,6 +10795,12 @@@ static void bnx2x_get_fcoe_info(struct 
                (max_fcoe_conn & BNX2X_MAX_FCOE_INIT_CONN_MASK) >>
                BNX2X_MAX_FCOE_INIT_CONN_SHIFT;
  
 +      /* Calculate the number of maximum allowed FCoE tasks */
 +      bp->cnic_eth_dev.max_fcoe_exchanges = MAX_NUM_FCOE_TASKS_PER_ENGINE;
 +      if (IS_MF(bp) || CHIP_MODE_IS_4_PORT(bp))
 +              bp->cnic_eth_dev.max_fcoe_exchanges /=
 +                                              MAX_FCOE_FUNCS_PER_ENGINE;
 +
        /* Read the WWN: */
        if (!IS_MF(bp)) {
                /* Port info */
@@@ -10910,12 -10816,14 +10914,12 @@@ static void bnx2x_get_cnic_mac_hwinfo(s
                        }
                }
  
 -              if (IS_MF_STORAGE_SD(bp))
 -                      /* Zero primary MAC configuration */
 -                      memset(bp->dev->dev_addr, 0, ETH_ALEN);
 -
 -              if (IS_MF_FCOE_AFEX(bp) || IS_MF_FCOE_SD(bp))
 -                      /* use FIP MAC as primary MAC */
 +              /* If this is a storage-only interface, use SAN mac as
 +               * primary MAC. Notice that for SD this is already the case,
 +               * as the SAN mac was copied from the primary MAC.
 +               */
 +              if (IS_MF_FCOE_AFEX(bp))
                        memcpy(bp->dev->dev_addr, fip_mac, ETH_ALEN);
 -
        } else {
                val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
                                iscsi_mac_upper);
@@@ -11152,9 -11060,6 +11156,9 @@@ static int bnx2x_get_hwinfo(struct bnx2
                                } else
                                        BNX2X_DEV_INFO("illegal OV for SD\n");
                                break;
 +                      case SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF:
 +                              bp->mf_config[vn] = 0;
 +                              break;
                        default:
                                /* Unknown configuration: reset mf_config */
                                bp->mf_config[vn] = 0;
@@@ -11501,6 -11406,26 +11505,6 @@@ static int bnx2x_init_bp(struct bnx2x *
   * net_device service functions
   */
  
 -static int bnx2x_open_epilog(struct bnx2x *bp)
 -{
 -      /* Enable sriov via delayed work. This must be done via delayed work
 -       * because it causes the probe of the vf devices to be run, which invoke
 -       * register_netdevice which must have rtnl lock taken. As we are holding
 -       * the lock right now, that could only work if the probe would not take
 -       * the lock. However, as the probe of the vf may be called from other
 -       * contexts as well (such as passthrough to vm failes) it can't assume
 -       * the lock is being held for it. Using delayed work here allows the
 -       * probe code to simply take the lock (i.e. wait for it to be released
 -       * if it is being held).
 -       */
 -      smp_mb__before_clear_bit();
 -      set_bit(BNX2X_SP_RTNL_ENABLE_SRIOV, &bp->sp_rtnl_state);
 -      smp_mb__after_clear_bit();
 -      schedule_delayed_work(&bp->sp_rtnl_task, 0);
 -
 -      return 0;
 -}
 -
  /* called with rtnl_lock */
  static int bnx2x_open(struct net_device *dev)
  {
@@@ -11870,8 -11795,6 +11874,8 @@@ static const struct net_device_ops bnx2
        .ndo_setup_tc           = bnx2x_setup_tc,
  #ifdef CONFIG_BNX2X_SRIOV
        .ndo_set_vf_mac         = bnx2x_set_vf_mac,
 +      .ndo_set_vf_vlan        = bnx2x_set_vf_vlan,
 +      .ndo_get_vf_config      = bnx2x_get_vf_config,
  #endif
  #ifdef NETDEV_FCOE_WWNN
        .ndo_fcoe_get_wwn       = bnx2x_fcoe_get_wwn,
@@@ -12034,26 -11957,19 +12038,26 @@@ static int bnx2x_init_dev(struct bnx2x 
        dev->watchdog_timeo = TX_TIMEOUT;
  
        dev->netdev_ops = &bnx2x_netdev_ops;
 -      bnx2x_set_ethtool_ops(dev);
 +      bnx2x_set_ethtool_ops(bp, dev);
  
        dev->priv_flags |= IFF_UNICAST_FLT;
  
        dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
                NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 |
                NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO |
 -              NETIF_F_RXHASH | NETIF_F_HW_VLAN_TX;
 +              NETIF_F_RXHASH | NETIF_F_HW_VLAN_CTAG_TX;
 +      if (!CHIP_IS_E1x(bp)) {
 +              dev->hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL;
 +              dev->hw_enc_features =
 +                      NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
 +                      NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 |
 +                      NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL;
 +      }
  
        dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
                NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA;
  
 -      dev->features |= dev->hw_features | NETIF_F_HW_VLAN_RX;
 +      dev->features |= dev->hw_features | NETIF_F_HW_VLAN_CTAG_RX;
        if (bp->flags & USING_DAC_FLAG)
                dev->features |= NETIF_F_HIGHDMA;
  
@@@ -12535,7 -12451,7 +12539,7 @@@ static int bnx2x_init_one(struct pci_de
         * l2 connections.
         */
        if (IS_VF(bp)) {
 -              bnx2x_vf_map_doorbells(bp);
 +              bp->doorbells = bnx2x_vf_doorbells(bp);
                rc = bnx2x_vf_pci_alloc(bp);
                if (rc)
                        goto init_one_exit;
                        goto init_one_exit;
        }
  
 -      /* Enable SRIOV if capability found in configuration space.
 -       * Once the generic SR-IOV framework makes it in from the
 -       * pci tree this will be revised, to allow dynamic control
 -       * over the number of VFs. Right now, change the num of vfs
 -       * param below to enable SR-IOV.
 -       */
 -      rc = bnx2x_iov_init_one(bp, int_mode, 0/*num vfs*/);
 +      /* Enable SRIOV if capability found in configuration space */
 +      rc = bnx2x_iov_init_one(bp, int_mode, BNX2X_MAX_NUM_OF_VFS);
        if (rc)
                goto init_one_exit;
  
        if (CHIP_IS_E1x(bp))
                bp->flags |= NO_FCOE_FLAG;
  
 -      /* disable FCOE for 57840 device, until FW supports it */
 -      switch (ent->driver_data) {
 -      case BCM57840_O:
 -      case BCM57840_4_10:
 -      case BCM57840_2_20:
 -      case BCM57840_MFO:
 -      case BCM57840_MF:
 -              bp->flags |= NO_FCOE_FLAG;
 -      }
 -
        /* Set bp->num_queues for MSI-X mode*/
        bnx2x_set_num_queues(bp);
  
@@@ -12709,7 -12640,9 +12713,7 @@@ static void bnx2x_remove_one(struct pci
  
  static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
  {
 -      int i;
 -
 -      bp->state = BNX2X_STATE_ERROR;
 +      bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
  
        bp->rx_mode = BNX2X_RX_MODE_NONE;
  
  
        /* Stop Tx */
        bnx2x_tx_disable(bp);
 -
 -      bnx2x_netif_stop(bp, 0);
        /* Delete all NAPI objects */
        bnx2x_del_all_napi(bp);
        if (CNIC_LOADED(bp))
                bnx2x_del_all_napi_cnic(bp);
 +      netdev_reset_tc(bp->dev);
  
        del_timer_sync(&bp->timer);
 +      cancel_delayed_work(&bp->sp_task);
 +      cancel_delayed_work(&bp->period_task);
  
 -      bnx2x_stats_handle(bp, STATS_EVENT_STOP);
 -
 -      /* Release IRQs */
 -      bnx2x_free_irq(bp);
 -
 -      /* Free SKBs, SGEs, TPA pool and driver internals */
 -      bnx2x_free_skbs(bp);
 -
 -      for_each_rx_queue(bp, i)
 -              bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
 -
 -      bnx2x_free_mem(bp);
 +      spin_lock_bh(&bp->stats_lock);
 +      bp->stats_state = STATS_STATE_DISABLED;
 +      spin_unlock_bh(&bp->stats_lock);
  
 -      bp->state = BNX2X_STATE_CLOSED;
 +      bnx2x_save_statistics(bp);
  
        netif_carrier_off(bp->dev);
  
@@@ -12768,8 -12709,6 +12772,8 @@@ static pci_ers_result_t bnx2x_io_error_
  
        rtnl_lock();
  
 +      BNX2X_ERR("IO error detected\n");
 +
        netif_device_detach(dev);
  
        if (state == pci_channel_io_perm_failure) {
        if (netif_running(dev))
                bnx2x_eeh_nic_unload(bp);
  
 +      bnx2x_prev_path_mark_eeh(bp);
 +
        pci_disable_device(pdev);
  
        rtnl_unlock();
@@@ -12800,10 -12737,9 +12804,10 @@@ static pci_ers_result_t bnx2x_io_slot_r
  {
        struct net_device *dev = pci_get_drvdata(pdev);
        struct bnx2x *bp = netdev_priv(dev);
 +      int i;
  
        rtnl_lock();
 -
 +      BNX2X_ERR("IO slot reset initializing...\n");
        if (pci_enable_device(pdev)) {
                dev_err(&pdev->dev,
                        "Cannot re-enable PCI device after reset\n");
  
        pci_set_master(pdev);
        pci_restore_state(pdev);
 +      pci_save_state(pdev);
  
        if (netif_running(dev))
                bnx2x_set_power_state(bp, PCI_D0);
  
 +      if (netif_running(dev)) {
 +              BNX2X_ERR("IO slot reset --> driver unload\n");
 +              if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
 +                      u32 v;
 +
 +                      v = SHMEM2_RD(bp,
 +                                    drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
 +                      SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
 +                                v & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
 +              }
 +              bnx2x_drain_tx_queues(bp);
 +              bnx2x_send_unload_req(bp, UNLOAD_RECOVERY);
 +              bnx2x_netif_stop(bp, 1);
 +              bnx2x_free_irq(bp);
 +
 +              /* Report UNLOAD_DONE to MCP */
 +              bnx2x_send_unload_done(bp, true);
 +
 +              bp->sp_state = 0;
 +              bp->port.pmf = 0;
 +
 +              bnx2x_prev_unload(bp);
 +
 +              /* We should have resetted the engine, so It's fair to
 +               * assume the FW will no longer write to the bnx2x driver.
 +               */
 +              bnx2x_squeeze_objects(bp);
 +              bnx2x_free_skbs(bp);
 +              for_each_rx_queue(bp, i)
 +                      bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
 +              bnx2x_free_fp_mem(bp);
 +              bnx2x_free_mem(bp);
 +
 +              bp->state = BNX2X_STATE_CLOSED;
 +      }
 +
        rtnl_unlock();
  
        return PCI_ERS_RESULT_RECOVERED;
@@@ -12880,9 -12779,6 +12884,9 @@@ static void bnx2x_io_resume(struct pci_
  
        bnx2x_eeh_recover(bp);
  
 +      bp->fw_seq = SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
 +                                                      DRV_MSG_SEQ_NUMBER_MASK;
 +
        if (netif_running(dev))
                bnx2x_nic_load(bp, LOAD_NORMAL);
  
@@@ -12905,9 -12801,6 +12909,9 @@@ static struct pci_driver bnx2x_pci_driv
        .suspend     = bnx2x_suspend,
        .resume      = bnx2x_resume,
        .err_handler = &bnx2x_err_handler,
 +#ifdef CONFIG_BNX2X_SRIOV
 +      .sriov_configure = bnx2x_sriov_configure,
 +#endif
  };
  
  static int __init bnx2x_init(void)
index 9045903dcda3f6c0983ccb75455acaa90514d417,941aa1f5cf9b05b12648f5aed8cb46c59f3eec1e..234ce6f07544822152075bd4401492237647a9cd
@@@ -1,5 -1,5 +1,5 @@@
  /*
 - * Copyright (C) 2005 - 2011 Emulex
 + * Copyright (C) 2005 - 2013 Emulex
   * All rights reserved.
   *
   * This program is free software; you can redistribute it and/or
@@@ -214,7 -214,6 +214,7 @@@ struct be_tx_stats 
  };
  
  struct be_tx_obj {
 +      u32 db_offset;
        struct be_queue_info q;
        struct be_queue_info cq;
        /* Remember the skbs that were transmitted */
@@@ -293,7 -292,7 +293,7 @@@ struct be_drv_stats 
        u32 rx_in_range_errors;
        u32 rx_out_range_errors;
        u32 rx_frame_too_long;
 -      u32 rx_address_mismatch_drops;
 +      u32 rx_address_filtered;
        u32 rx_dropped_too_small;
        u32 rx_dropped_too_short;
        u32 rx_dropped_header_too_small;
@@@ -329,6 -328,7 +329,7 @@@ enum vf_state 
  #define BE_FLAGS_WORKER_SCHEDULED             (1 << 3)
  #define BE_UC_PMAC_COUNT              30
  #define BE_VF_UC_PMAC_COUNT           2
+ #define BE_FLAGS_QNQ_ASYNC_EVT_RCVD           (1 << 11)
  
  struct phy_info {
        u8 transceiver;
@@@ -435,7 -435,7 +436,8 @@@ struct be_adapter 
        u8 wol_cap;
        bool wol;
        u32 uc_macs;            /* Count of secondary UC MAC programmed */
 +      u16 asic_rev;
+       u16 qnq_vid;
        u32 msg_enable;
        int be_get_temp_freq;
        u16 max_mcast_mac;
        u16 max_event_queues;
        u32 if_cap_flags;
        u8 pf_number;
 +      u64 rss_flags;
  };
  
  #define be_physfn(adapter)            (!adapter->virtfn)
@@@ -651,6 -650,11 +653,11 @@@ static inline bool be_is_wol_excluded(s
        }
  }
  
+ static inline int qnq_async_evt_rcvd(struct be_adapter *adapter)
+ {
+       return adapter->flags & BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
+ }
  extern void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm,
                u16 num_popped);
  extern void be_link_status_update(struct be_adapter *adapter, u8 link_status);
index 9080c2775e9fd77fec2be7e9a6d1c174a8b19896,24c80d1d70bab59dd21c7e71d6513a174b681dd2..25d3290b8cac3bfc7409529509614ba1560561dd
@@@ -1,5 -1,5 +1,5 @@@
  /*
 - * Copyright (C) 2005 - 2011 Emulex
 + * Copyright (C) 2005 - 2013 Emulex
   * All rights reserved.
   *
   * This program is free software; you can redistribute it and/or
@@@ -263,6 -263,27 +263,27 @@@ static void be_async_grp5_evt_process(s
        }
  }
  
+ static void be_async_dbg_evt_process(struct be_adapter *adapter,
+               u32 trailer, struct be_mcc_compl *cmp)
+ {
+       u8 event_type = 0;
+       struct be_async_event_qnq *evt = (struct be_async_event_qnq *) cmp;
+       event_type = (trailer >> ASYNC_TRAILER_EVENT_TYPE_SHIFT) &
+               ASYNC_TRAILER_EVENT_TYPE_MASK;
+       switch (event_type) {
+       case ASYNC_DEBUG_EVENT_TYPE_QNQ:
+               if (evt->valid)
+                       adapter->qnq_vid = le16_to_cpu(evt->vlan_tag);
+               adapter->flags |= BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
+       break;
+       default:
+               dev_warn(&adapter->pdev->dev, "Unknown debug event\n");
+       break;
+       }
+ }
  static inline bool is_link_state_evt(u32 trailer)
  {
        return ((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
@@@ -277,6 -298,13 +298,13 @@@ static inline bool is_grp5_evt(u32 trai
                                ASYNC_EVENT_CODE_GRP_5);
  }
  
+ static inline bool is_dbg_evt(u32 trailer)
+ {
+       return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
+               ASYNC_TRAILER_EVENT_CODE_MASK) ==
+                               ASYNC_EVENT_CODE_QNQ);
+ }
  static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter)
  {
        struct be_queue_info *mcc_cq = &adapter->mcc_obj.cq;
@@@ -325,6 -353,9 +353,9 @@@ int be_process_mcc(struct be_adapter *a
                        else if (is_grp5_evt(compl->flags))
                                be_async_grp5_evt_process(adapter,
                                compl->flags, compl);
+                       else if (is_dbg_evt(compl->flags))
+                               be_async_dbg_evt_process(adapter,
+                               compl->flags, compl);
                } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
                                status = be_mcc_compl_process(adapter, compl);
                                atomic_dec(&mcc_obj->q.used);
@@@ -687,8 -718,10 +718,8 @@@ static struct be_mcc_wrb *wrb_from_mccq
        if (!mccq->created)
                return NULL;
  
 -      if (atomic_read(&mccq->used) >= mccq->len) {
 -              dev_err(&adapter->pdev->dev, "Out of MCCQ wrbs\n");
 +      if (atomic_read(&mccq->used) >= mccq->len)
                return NULL;
 -      }
  
        wrb = queue_head_node(mccq);
        queue_head_inc(mccq);
@@@ -1020,6 -1053,7 +1051,7 @@@ int be_cmd_mccq_ext_create(struct be_ad
  
        /* Subscribe to Link State and Group 5 Events(bits 1 and 5 set) */
        req->async_event_bitmap[0] = cpu_to_le32(0x00000022);
+       req->async_event_bitmap[0] |= cpu_to_le32(1 << ASYNC_EVENT_CODE_QNQ);
        be_dws_cpu_to_le(ctxt, sizeof(req->context));
  
        be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
@@@ -1093,14 -1127,15 +1125,14 @@@ int be_cmd_mccq_create(struct be_adapte
        return status;
  }
  
 -int be_cmd_txq_create(struct be_adapter *adapter,
 -                      struct be_queue_info *txq,
 -                      struct be_queue_info *cq)
 +int be_cmd_txq_create(struct be_adapter *adapter, struct be_tx_obj *txo)
  {
        struct be_mcc_wrb *wrb;
        struct be_cmd_req_eth_tx_create *req;
 +      struct be_queue_info *txq = &txo->q;
 +      struct be_queue_info *cq = &txo->cq;
        struct be_dma_mem *q_mem = &txq->dma_mem;
 -      void *ctxt;
 -      int status;
 +      int status, ver = 0;
  
        spin_lock_bh(&adapter->mcc_lock);
  
        }
  
        req = embedded_payload(wrb);
 -      ctxt = &req->context;
  
        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
                OPCODE_ETH_TX_CREATE, sizeof(*req), wrb, NULL);
  
        if (lancer_chip(adapter)) {
                req->hdr.version = 1;
 -              AMAP_SET_BITS(struct amap_tx_context, if_id, ctxt,
 -                                      adapter->if_handle);
 +              req->if_id = cpu_to_le16(adapter->if_handle);
 +      } else if (BEx_chip(adapter)) {
 +              if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC)
 +                      req->hdr.version = 2;
 +      } else { /* For SH */
 +              req->hdr.version = 2;
        }
  
        req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
        req->ulp_num = BE_ULP1_NUM;
        req->type = BE_ETH_TX_RING_TYPE_STANDARD;
 -
 -      AMAP_SET_BITS(struct amap_tx_context, tx_ring_size, ctxt,
 -              be_encoded_q_len(txq->len));
 -      AMAP_SET_BITS(struct amap_tx_context, ctx_valid, ctxt, 1);
 -      AMAP_SET_BITS(struct amap_tx_context, cq_id_send, ctxt, cq->id);
 -
 -      be_dws_cpu_to_le(ctxt, sizeof(req->context));
 -
 +      req->cq_id = cpu_to_le16(cq->id);
 +      req->queue_size = be_encoded_q_len(txq->len);
        be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
  
 +      ver = req->hdr.version;
 +
        status = be_mcc_notify_wait(adapter);
        if (!status) {
                struct be_cmd_resp_eth_tx_create *resp = embedded_payload(wrb);
                txq->id = le16_to_cpu(resp->cid);
 +              if (ver == 2)
 +                      txo->db_offset = le32_to_cpu(resp->db_offset);
 +              else
 +                      txo->db_offset = DB_TXULP1_OFFSET;
                txq->created = true;
        }
  
@@@ -1834,7 -1866,7 +1866,7 @@@ err
  
  /* Uses mbox */
  int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num,
 -              u32 *mode, u32 *caps)
 +                      u32 *mode, u32 *caps, u16 *asic_rev)
  {
        struct be_mcc_wrb *wrb;
        struct be_cmd_req_query_fw_cfg *req;
                *port_num = le32_to_cpu(resp->phys_port);
                *mode = le32_to_cpu(resp->function_mode);
                *caps = le32_to_cpu(resp->function_caps);
 +              *asic_rev = le32_to_cpu(resp->asic_revision) & 0xFF;
        }
  
        mutex_unlock(&adapter->mbox_lock);
@@@ -1898,8 -1929,7 +1930,8 @@@ int be_cmd_reset_function(struct be_ada
        return status;
  }
  
 -int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable, u16 table_size)
 +int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable,
 +                      u32 rss_hash_opts, u16 table_size)
  {
        struct be_mcc_wrb *wrb;
        struct be_cmd_req_rss_config *req;
                OPCODE_ETH_RSS_CONFIG, sizeof(*req), wrb, NULL);
  
        req->if_id = cpu_to_le32(adapter->if_handle);
 -      req->enable_rss = cpu_to_le16(RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
 -                                    RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6);
 +      req->enable_rss = cpu_to_le16(rss_hash_opts);
 +      req->cpu_table_size_log2 = cpu_to_le16(fls(table_size) - 1);
  
 -      if (lancer_chip(adapter) || skyhawk_chip(adapter)) {
 +      if (lancer_chip(adapter) || skyhawk_chip(adapter))
                req->hdr.version = 1;
 -              req->enable_rss |= cpu_to_le16(RSS_ENABLE_UDP_IPV4 |
 -                                             RSS_ENABLE_UDP_IPV6);
 -      }
  
 -      req->cpu_table_size_log2 = cpu_to_le16(fls(table_size) - 1);
        memcpy(req->cpu_table, rsstable, table_size);
        memcpy(req->hash, myhash, sizeof(myhash));
        be_dws_cpu_to_le(req->hash, sizeof(req->hash));
@@@ -2341,6 -2375,7 +2373,6 @@@ int be_cmd_get_seeprom_data(struct be_a
  {
        struct be_mcc_wrb *wrb;
        struct be_cmd_req_seeprom_read *req;
 -      struct be_sge *sge;
        int status;
  
        spin_lock_bh(&adapter->mcc_lock);
                goto err;
        }
        req = nonemb_cmd->va;
 -      sge = nonembedded_sgl(wrb);
  
        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
                        OPCODE_COMMON_SEEPROM_READ, sizeof(*req), wrb,
@@@ -2457,6 -2493,9 +2489,9 @@@ int be_cmd_get_cntl_attributes(struct b
        struct mgmt_controller_attrib *attribs;
        struct be_dma_mem attribs_cmd;
  
+       if (mutex_lock_interruptible(&adapter->mbox_lock))
+               return -1;
        memset(&attribs_cmd, 0, sizeof(struct be_dma_mem));
        attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs);
        attribs_cmd.va = pci_alloc_consistent(adapter->pdev, attribs_cmd.size,
        if (!attribs_cmd.va) {
                dev_err(&adapter->pdev->dev,
                                "Memory allocation failure\n");
-               return -ENOMEM;
+               status = -ENOMEM;
+               goto err;
        }
  
-       if (mutex_lock_interruptible(&adapter->mbox_lock))
-               return -1;
        wrb = wrb_from_mbox(adapter);
        if (!wrb) {
                status = -EBUSY;
  
  err:
        mutex_unlock(&adapter->mbox_lock);
-       pci_free_consistent(adapter->pdev, attribs_cmd.size, attribs_cmd.va,
-                                       attribs_cmd.dma);
+       if (attribs_cmd.va)
+               pci_free_consistent(adapter->pdev, attribs_cmd.size,
+                                   attribs_cmd.va, attribs_cmd.dma);
        return status;
  }
  
@@@ -2663,8 -2701,10 +2697,8 @@@ int be_cmd_set_mac_list(struct be_adapt
        cmd.size = sizeof(struct be_cmd_req_set_mac_list);
        cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size,
                        &cmd.dma, GFP_KERNEL);
 -      if (!cmd.va) {
 -              dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
 +      if (!cmd.va)
                return -ENOMEM;
 -      }
  
        spin_lock_bh(&adapter->mcc_lock);
  
@@@ -2788,6 -2828,9 +2822,9 @@@ int be_cmd_get_acpi_wol_cap(struct be_a
                            CMD_SUBSYSTEM_ETH))
                return -EPERM;
  
+       if (mutex_lock_interruptible(&adapter->mbox_lock))
+               return -1;
        memset(&cmd, 0, sizeof(struct be_dma_mem));
        cmd.size = sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1);
        cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
        if (!cmd.va) {
                dev_err(&adapter->pdev->dev,
                                "Memory allocation failure\n");
-               return -ENOMEM;
+               status = -ENOMEM;
+               goto err;
        }
  
-       if (mutex_lock_interruptible(&adapter->mbox_lock))
-               return -1;
        wrb = wrb_from_mbox(adapter);
        if (!wrb) {
                status = -EBUSY;
        }
  err:
        mutex_unlock(&adapter->mbox_lock);
-       pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
+       if (cmd.va)
+               pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
        return status;
  
  }
@@@ -2936,15 -2978,14 +2972,15 @@@ static struct be_nic_resource_desc *be_
        int i;
  
        for (i = 0; i < desc_count; i++) {
 -              desc->desc_len = RESOURCE_DESC_SIZE;
 +              desc->desc_len = desc->desc_len ? : RESOURCE_DESC_SIZE;
                if (((void *)desc + desc->desc_len) >
                    (void *)(buf + max_buf_size)) {
                        desc = NULL;
                        break;
                }
  
 -              if (desc->desc_type == NIC_RESOURCE_DESC_TYPE_ID)
 +              if (desc->desc_type == NIC_RESOURCE_DESC_TYPE_V0 ||
 +                  desc->desc_type == NIC_RESOURCE_DESC_TYPE_V1)
                        break;
  
                desc = (void *)desc + desc->desc_len;
@@@ -2964,16 -3005,18 +3000,18 @@@ int be_cmd_get_func_config(struct be_ad
        int status;
        struct be_dma_mem cmd;
  
+       if (mutex_lock_interruptible(&adapter->mbox_lock))
+               return -1;
        memset(&cmd, 0, sizeof(struct be_dma_mem));
        cmd.size = sizeof(struct be_cmd_resp_get_func_config);
        cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
                                      &cmd.dma);
        if (!cmd.va) {
                dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
-               return -ENOMEM;
+               status = -ENOMEM;
+               goto err;
        }
-       if (mutex_lock_interruptible(&adapter->mbox_lock))
-               return -1;
  
        wrb = wrb_from_mbox(adapter);
        if (!wrb) {
                               OPCODE_COMMON_GET_FUNC_CONFIG,
                               cmd.size, wrb, &cmd);
  
 +      if (skyhawk_chip(adapter))
 +              req->hdr.version = 1;
 +
        status = be_mbox_notify_wait(adapter);
        if (!status) {
                struct be_cmd_resp_get_func_config *resp = cmd.va;
        }
  err:
        mutex_unlock(&adapter->mbox_lock);
-       pci_free_consistent(adapter->pdev, cmd.size,
-                           cmd.va, cmd.dma);
+       if (cmd.va)
+               pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
        return status;
  }
  
 - /* Uses sync mcc */
 -int be_cmd_get_profile_config(struct be_adapter *adapter, u32 *cap_flags,
 -                            u8 domain)
 +/* Uses mbox */
 +int be_cmd_get_profile_config_mbox(struct be_adapter *adapter,
 +                                 u8 domain, struct be_dma_mem *cmd)
  {
        struct be_mcc_wrb *wrb;
        struct be_cmd_req_get_profile_config *req;
        int status;
 -      struct be_dma_mem cmd;
  
 -      memset(&cmd, 0, sizeof(struct be_dma_mem));
 -      cmd.size = sizeof(struct be_cmd_resp_get_profile_config);
 -      cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
 -                                    &cmd.dma);
 -      if (!cmd.va) {
 -              dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
 -              return -ENOMEM;
 -      }
 +      if (mutex_lock_interruptible(&adapter->mbox_lock))
 +              return -1;
 +      wrb = wrb_from_mbox(adapter);
 +
 +      req = cmd->va;
 +      be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
 +                             OPCODE_COMMON_GET_PROFILE_CONFIG,
 +                             cmd->size, wrb, cmd);
 +
 +      req->type = ACTIVE_PROFILE_TYPE;
 +      req->hdr.domain = domain;
 +      if (!lancer_chip(adapter))
 +              req->hdr.version = 1;
 +
 +      status = be_mbox_notify_wait(adapter);
 +
 +      mutex_unlock(&adapter->mbox_lock);
 +      return status;
 +}
 +
 +/* Uses sync mcc */
 +int be_cmd_get_profile_config_mccq(struct be_adapter *adapter,
 +                                 u8 domain, struct be_dma_mem *cmd)
 +{
 +      struct be_mcc_wrb *wrb;
 +      struct be_cmd_req_get_profile_config *req;
 +      int status;
  
        spin_lock_bh(&adapter->mcc_lock);
  
                goto err;
        }
  
 -      req = cmd.va;
 -
 +      req = cmd->va;
        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
                               OPCODE_COMMON_GET_PROFILE_CONFIG,
 -                             cmd.size, wrb, &cmd);
 +                             cmd->size, wrb, cmd);
  
        req->type = ACTIVE_PROFILE_TYPE;
        req->hdr.domain = domain;
 +      if (!lancer_chip(adapter))
 +              req->hdr.version = 1;
  
        status = be_mcc_notify_wait(adapter);
 +
 +err:
 +      spin_unlock_bh(&adapter->mcc_lock);
 +      return status;
 +}
 +
 +/* Uses sync mcc, if MCCQ is already created otherwise mbox */
 +int be_cmd_get_profile_config(struct be_adapter *adapter, u32 *cap_flags,
 +                            u16 *txq_count, u8 domain)
 +{
 +      struct be_queue_info *mccq = &adapter->mcc_obj.q;
 +      struct be_dma_mem cmd;
 +      int status;
 +
 +      memset(&cmd, 0, sizeof(struct be_dma_mem));
 +      if (!lancer_chip(adapter))
 +              cmd.size = sizeof(struct be_cmd_resp_get_profile_config_v1);
 +      else
 +              cmd.size = sizeof(struct be_cmd_resp_get_profile_config);
 +      cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
 +                                    &cmd.dma);
 +      if (!cmd.va) {
 +              dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
 +              return -ENOMEM;
 +      }
 +
 +      if (!mccq->created)
 +              status = be_cmd_get_profile_config_mbox(adapter, domain, &cmd);
 +      else
 +              status = be_cmd_get_profile_config_mccq(adapter, domain, &cmd);
        if (!status) {
                struct be_cmd_resp_get_profile_config *resp = cmd.va;
                u32 desc_count = le32_to_cpu(resp->desc_count);
                        status = -EINVAL;
                        goto err;
                }
 -              *cap_flags = le32_to_cpu(desc->cap_flags);
 +              if (cap_flags)
 +                      *cap_flags = le32_to_cpu(desc->cap_flags);
 +              if (txq_count)
 +                      *txq_count = le32_to_cpu(desc->txq_count);
        }
  err:
 -      spin_unlock_bh(&adapter->mcc_lock);
 -      pci_free_consistent(adapter->pdev, cmd.size,
 -                          cmd.va, cmd.dma);
 +      if (cmd.va)
 +              pci_free_consistent(adapter->pdev, cmd.size,
 +                                  cmd.va, cmd.dma);
        return status;
  }
  
@@@ -3155,7 -3143,7 +3193,7 @@@ int be_cmd_set_profile_config(struct be
        req->hdr.domain = domain;
        req->desc_count = cpu_to_le32(1);
  
 -      req->nic_desc.desc_type = NIC_RESOURCE_DESC_TYPE_ID;
 +      req->nic_desc.desc_type = NIC_RESOURCE_DESC_TYPE_V0;
        req->nic_desc.desc_len = RESOURCE_DESC_SIZE;
        req->nic_desc.flags = (1 << QUN) | (1 << IMM) | (1 << NOSV);
        req->nic_desc.pf_num = adapter->pf_number;
        return status;
  }
  
 +int be_cmd_intr_set(struct be_adapter *adapter, bool intr_enable)
 +{
 +      struct be_mcc_wrb *wrb;
 +      struct be_cmd_req_intr_set *req;
 +      int status;
 +
 +      if (mutex_lock_interruptible(&adapter->mbox_lock))
 +              return -1;
 +
 +      wrb = wrb_from_mbox(adapter);
 +
 +      req = embedded_payload(wrb);
 +
 +      be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
 +                             OPCODE_COMMON_SET_INTERRUPT_ENABLE, sizeof(*req),
 +                             wrb, NULL);
 +
 +      req->intr_enabled = intr_enable;
 +
 +      status = be_mbox_notify_wait(adapter);
 +
 +      mutex_unlock(&adapter->mbox_lock);
 +      return status;
 +}
 +
  int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload,
                        int wrb_payload_size, u16 *cmd_status, u16 *ext_status)
  {
index 1b01e9b32794edb5d3f3df76cdab40818daf6160,07fd9277d3cb354d580ddc034cb6b4ca94b3464e..a855668e0cc5dc2d9468c2c83907877e5795a5b6
@@@ -1,5 -1,5 +1,5 @@@
  /*
 - * Copyright (C) 2005 - 2011 Emulex
 + * Copyright (C) 2005 - 2013 Emulex
   * All rights reserved.
   *
   * This program is free software; you can redistribute it and/or
@@@ -84,6 -84,9 +84,9 @@@ struct be_mcc_compl 
  #define ASYNC_EVENT_QOS_SPEED         0x1
  #define ASYNC_EVENT_COS_PRIORITY      0x2
  #define ASYNC_EVENT_PVID_STATE                0x3
+ #define ASYNC_EVENT_CODE_QNQ          0x6
+ #define ASYNC_DEBUG_EVENT_TYPE_QNQ    1
  struct be_async_event_trailer {
        u32 code;
  };
@@@ -144,6 -147,16 +147,16 @@@ struct be_async_event_grp5_pvid_state 
        struct be_async_event_trailer trailer;
  } __packed;
  
+ /* async event indicating outer VLAN tag in QnQ */
+ struct be_async_event_qnq {
+       u8 valid;       /* Indicates if outer VLAN is valid */
+       u8 rsvd0;
+       u16 vlan_tag;
+       u32 event_tag;
+       u8 rsvd1[4];
+       struct be_async_event_trailer trailer;
+ } __packed;
  struct be_mcc_mailbox {
        struct be_mcc_wrb wrb;
        struct be_mcc_compl compl;
  #define OPCODE_COMMON_GET_BEACON_STATE                        70
  #define OPCODE_COMMON_READ_TRANSRECV_DATA             73
  #define OPCODE_COMMON_GET_PORT_NAME                   77
 +#define OPCODE_COMMON_SET_INTERRUPT_ENABLE            89
  #define OPCODE_COMMON_GET_PHY_DETAILS                 102
  #define OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP         103
  #define OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES  121
@@@ -474,27 -486,46 +487,27 @@@ struct be_cmd_resp_mcc_create 
  #define BE_ETH_TX_RING_TYPE_STANDARD          2
  #define BE_ULP1_NUM                           1
  
 -/* Pseudo amap definition in which each bit of the actual structure is defined
 - * as a byte: used to calculate offset/shift/mask of each field */
 -struct amap_tx_context {
 -      u8 if_id[16];           /* dword 0 */
 -      u8 tx_ring_size[4];     /* dword 0 */
 -      u8 rsvd1[26];           /* dword 0 */
 -      u8 pci_func_id[8];      /* dword 1 */
 -      u8 rsvd2[9];            /* dword 1 */
 -      u8 ctx_valid;           /* dword 1 */
 -      u8 cq_id_send[16];      /* dword 2 */
 -      u8 rsvd3[16];           /* dword 2 */
 -      u8 rsvd4[32];           /* dword 3 */
 -      u8 rsvd5[32];           /* dword 4 */
 -      u8 rsvd6[32];           /* dword 5 */
 -      u8 rsvd7[32];           /* dword 6 */
 -      u8 rsvd8[32];           /* dword 7 */
 -      u8 rsvd9[32];           /* dword 8 */
 -      u8 rsvd10[32];          /* dword 9 */
 -      u8 rsvd11[32];          /* dword 10 */
 -      u8 rsvd12[32];          /* dword 11 */
 -      u8 rsvd13[32];          /* dword 12 */
 -      u8 rsvd14[32];          /* dword 13 */
 -      u8 rsvd15[32];          /* dword 14 */
 -      u8 rsvd16[32];          /* dword 15 */
 -} __packed;
 -
  struct be_cmd_req_eth_tx_create {
        struct be_cmd_req_hdr hdr;
        u8 num_pages;
        u8 ulp_num;
 -      u8 type;
 -      u8 bound_port;
 -      u8 context[sizeof(struct amap_tx_context) / 8];
 +      u16 type;
 +      u16 if_id;
 +      u8 queue_size;
 +      u8 rsvd0;
 +      u32 rsvd1;
 +      u16 cq_id;
 +      u16 rsvd2;
 +      u32 rsvd3[13];
        struct phys_addr pages[8];
  } __packed;
  
  struct be_cmd_resp_eth_tx_create {
        struct be_cmd_resp_hdr hdr;
        u16 cid;
 -      u16 rsvd0;
 +      u16 rid;
 +      u32 db_offset;
 +      u32 rsvd0[4];
  } __packed;
  
  /******************** Create RxQ ***************************/
@@@ -590,8 -621,8 +603,8 @@@ struct be_port_rxf_stats_v0 
        u32 rx_in_range_errors; /* dword 10*/
        u32 rx_out_range_errors;        /* dword 11*/
        u32 rx_frame_too_long;  /* dword 12*/
 -      u32 rx_address_mismatch_drops;  /* dword 13*/
 -      u32 rx_vlan_mismatch_drops;     /* dword 14*/
 +      u32 rx_address_filtered;        /* dword 13*/
 +      u32 rx_vlan_filtered;   /* dword 14*/
        u32 rx_dropped_too_small;       /* dword 15*/
        u32 rx_dropped_too_short;       /* dword 16*/
        u32 rx_dropped_header_too_small;        /* dword 17*/
@@@ -797,8 -828,8 +810,8 @@@ struct lancer_pport_stats 
        u32 rx_control_frames_unknown_opcode_hi;
        u32 rx_in_range_errors;
        u32 rx_out_of_range_errors;
 -      u32 rx_address_mismatch_drops;
 -      u32 rx_vlan_mismatch_drops;
 +      u32 rx_address_filtered;
 +      u32 rx_vlan_filtered;
        u32 rx_dropped_too_small;
        u32 rx_dropped_too_short;
        u32 rx_dropped_header_too_small;
@@@ -1048,6 -1079,7 +1061,6 @@@ struct be_cmd_resp_modify_eq_delay 
  } __packed;
  
  /******************** Get FW Config *******************/
 -#define BE_FUNCTION_CAPS_RSS                  0x2
  /* The HW can come up in either of the following multi-channel modes
   * based on the skew/IPL.
   */
@@@ -1090,9 -1122,6 +1103,9 @@@ struct be_cmd_resp_query_fw_cfg 
  #define RSS_ENABLE_UDP_IPV4                   0x10
  #define RSS_ENABLE_UDP_IPV6                   0x20
  
 +#define L3_RSS_FLAGS                          (RXH_IP_DST | RXH_IP_SRC)
 +#define L4_RSS_FLAGS                          (RXH_L4_B_0_1 | RXH_L4_B_2_3)
 +
  struct be_cmd_req_rss_config {
        struct be_cmd_req_hdr hdr;
        u32 if_id;
@@@ -1576,7 -1605,7 +1589,7 @@@ struct be_port_rxf_stats_v1 
        u32 rx_in_range_errors;
        u32 rx_out_range_errors;
        u32 rx_frame_too_long;
 -      u32 rx_address_mismatch_drops;
 +      u32 rx_address_filtered;
        u32 rx_dropped_too_small;
        u32 rx_dropped_too_short;
        u32 rx_dropped_header_too_small;
@@@ -1690,11 -1719,9 +1703,11 @@@ struct be_cmd_req_set_ext_fat_caps 
        struct be_fat_conf_params set_params;
  };
  
 -#define RESOURCE_DESC_SIZE                    72
 -#define NIC_RESOURCE_DESC_TYPE_ID             0x41
 +#define RESOURCE_DESC_SIZE                    88
 +#define NIC_RESOURCE_DESC_TYPE_V0             0x41
 +#define NIC_RESOURCE_DESC_TYPE_V1             0x51
  #define MAX_RESOURCE_DESC                     4
 +#define MAX_RESOURCE_DESC_V1                  32
  
  /* QOS unit number */
  #define QUN                                   4
@@@ -1741,7 -1768,7 +1754,7 @@@ struct be_cmd_req_get_func_config 
  };
  
  struct be_cmd_resp_get_func_config {
 -      struct be_cmd_req_hdr hdr;
 +      struct be_cmd_resp_hdr hdr;
        u32 desc_count;
        u8 func_param[MAX_RESOURCE_DESC * RESOURCE_DESC_SIZE];
  };
@@@ -1760,12 -1787,6 +1773,12 @@@ struct be_cmd_resp_get_profile_config 
        u8 func_param[MAX_RESOURCE_DESC * RESOURCE_DESC_SIZE];
  };
  
 +struct be_cmd_resp_get_profile_config_v1 {
 +      struct be_cmd_req_hdr hdr;
 +      u32 desc_count;
 +      u8 func_param[MAX_RESOURCE_DESC_V1 * RESOURCE_DESC_SIZE];
 +};
 +
  struct be_cmd_req_set_profile_config {
        struct be_cmd_req_hdr hdr;
        u32 rsvd;
@@@ -1783,12 -1804,6 +1796,12 @@@ struct be_cmd_enable_disable_vf 
        u8 rsvd[3];
  };
  
 +struct be_cmd_req_intr_set {
 +      struct be_cmd_req_hdr hdr;
 +      u8 intr_enabled;
 +      u8 rsvd[3];
 +};
 +
  static inline bool check_privilege(struct be_adapter *adapter, u32 flags)
  {
        return flags & adapter->cmd_privileges ? true : false;
@@@ -1832,7 -1847,8 +1845,7 @@@ extern int be_cmd_mccq_create(struct be
                        struct be_queue_info *mccq,
                        struct be_queue_info *cq);
  extern int be_cmd_txq_create(struct be_adapter *adapter,
 -                      struct be_queue_info *txq,
 -                      struct be_queue_info *cq);
 +                      struct be_tx_obj *txo);
  extern int be_cmd_rxq_create(struct be_adapter *adapter,
                        struct be_queue_info *rxq, u16 cq_id,
                        u16 frag_size, u32 if_id, u32 rss, u8 *rss_id);
@@@ -1859,11 -1875,11 +1872,11 @@@ extern int be_cmd_set_flow_control(stru
                        u32 tx_fc, u32 rx_fc);
  extern int be_cmd_get_flow_control(struct be_adapter *adapter,
                        u32 *tx_fc, u32 *rx_fc);
 -extern int be_cmd_query_fw_cfg(struct be_adapter *adapter,
 -                      u32 *port_num, u32 *function_mode, u32 *function_caps);
 +extern int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num,
 +                      u32 *function_mode, u32 *function_caps, u16 *asic_rev);
  extern int be_cmd_reset_function(struct be_adapter *adapter);
  extern int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable,
 -                      u16 table_size);
 +                           u32 rss_hash_opts, u16 table_size);
  extern int be_process_mcc(struct be_adapter *adapter);
  extern int be_cmd_set_beacon_state(struct be_adapter *adapter,
                        u8 port_num, u8 beacon, u8 status, u8 state);
@@@ -1928,11 -1944,10 +1941,11 @@@ extern int lancer_test_and_set_rdy_stat
  extern int be_cmd_query_port_name(struct be_adapter *adapter, u8 *port_name);
  extern int be_cmd_get_func_config(struct be_adapter *adapter);
  extern int be_cmd_get_profile_config(struct be_adapter *adapter, u32 *cap_flags,
 -                                   u8 domain);
 +                                   u16 *txq_count, u8 domain);
  
  extern int be_cmd_set_profile_config(struct be_adapter *adapter, u32 bps,
                                     u8 domain);
  extern int be_cmd_get_if_id(struct be_adapter *adapter,
                            struct be_vf_cfg *vf_cfg, int vf_num);
  extern int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain);
 +extern int be_cmd_intr_set(struct be_adapter *adapter, bool intr_enable);
index ec3050b3133e3946ea3d9062d8fdb611a4b13e79,1b7233c11af3e0eebf47f4e6a11c786998b16d97..5733cde88e2cccea3ea26fcf1a1a76ade95322ec
@@@ -1,5 -1,5 +1,5 @@@
  /*
 - * Copyright (C) 2005 - 2011 Emulex
 + * Copyright (C) 2005 - 2013 Emulex
   * All rights reserved.
   *
   * This program is free software; you can redistribute it and/or
@@@ -54,7 -54,7 +54,7 @@@ static const struct be_ethtool_stat et_
        /* Received packets dropped when they don't pass the unicast or
         * multicast address filtering.
         */
 -      {DRVSTAT_INFO(rx_address_mismatch_drops)},
 +      {DRVSTAT_INFO(rx_address_filtered)},
        /* Received packets dropped when IP packet length field is less than
         * the IP header length field.
         */
@@@ -680,7 -680,8 +680,8 @@@ be_get_wol(struct net_device *netdev, s
  
        if (be_is_wol_supported(adapter)) {
                wol->supported |= WAKE_MAGIC;
-               wol->wolopts |= WAKE_MAGIC;
+               if (adapter->wol)
+                       wol->wolopts |= WAKE_MAGIC;
        } else
                wol->wolopts = 0;
        memset(&wol->sopass, 0, sizeof(wol->sopass));
@@@ -719,8 -720,10 +720,8 @@@ be_test_ddr_dma(struct be_adapter *adap
        ddrdma_cmd.size = sizeof(struct be_cmd_req_ddrdma_test);
        ddrdma_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, ddrdma_cmd.size,
                                           &ddrdma_cmd.dma, GFP_KERNEL);
 -      if (!ddrdma_cmd.va) {
 -              dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
 +      if (!ddrdma_cmd.va)
                return -ENOMEM;
 -      }
  
        for (i = 0; i < 2; i++) {
                ret = be_cmd_ddr_dma_test(adapter, pattern[i],
@@@ -755,12 -758,6 +756,12 @@@ be_self_test(struct net_device *netdev
        int status;
        u8 link_status = 0;
  
 +      if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) {
 +              dev_err(&adapter->pdev->dev, "Self test not supported\n");
 +              test->flags |= ETH_TEST_FL_FAILED;
 +              return;
 +      }
 +
        memset(data, 0, sizeof(u64) * ETHTOOL_TESTS_NUM);
  
        if (test->flags & ETH_TEST_FL_OFFLINE) {
@@@ -849,8 -846,11 +850,8 @@@ be_read_eeprom(struct net_device *netde
        eeprom_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, eeprom_cmd.size,
                                           &eeprom_cmd.dma, GFP_KERNEL);
  
 -      if (!eeprom_cmd.va) {
 -              dev_err(&adapter->pdev->dev,
 -                      "Memory allocation failure. Could not read eeprom\n");
 +      if (!eeprom_cmd.va)
                return -ENOMEM;
 -      }
  
        status = be_cmd_get_seeprom_data(adapter, &eeprom_cmd);
  
@@@ -940,159 -940,6 +941,159 @@@ static void be_set_msg_level(struct net
        return;
  }
  
 +static u64 be_get_rss_hash_opts(struct be_adapter *adapter, u64 flow_type)
 +{
 +      u64 data = 0;
 +
 +      switch (flow_type) {
 +      case TCP_V4_FLOW:
 +              if (adapter->rss_flags & RSS_ENABLE_IPV4)
 +                      data |= RXH_IP_DST | RXH_IP_SRC;
 +              if (adapter->rss_flags & RSS_ENABLE_TCP_IPV4)
 +                      data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
 +              break;
 +      case UDP_V4_FLOW:
 +              if (adapter->rss_flags & RSS_ENABLE_IPV4)
 +                      data |= RXH_IP_DST | RXH_IP_SRC;
 +              if (adapter->rss_flags & RSS_ENABLE_UDP_IPV4)
 +                      data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
 +              break;
 +      case TCP_V6_FLOW:
 +              if (adapter->rss_flags & RSS_ENABLE_IPV6)
 +                      data |= RXH_IP_DST | RXH_IP_SRC;
 +              if (adapter->rss_flags & RSS_ENABLE_TCP_IPV6)
 +                      data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
 +              break;
 +      case UDP_V6_FLOW:
 +              if (adapter->rss_flags & RSS_ENABLE_IPV6)
 +                      data |= RXH_IP_DST | RXH_IP_SRC;
 +              if (adapter->rss_flags & RSS_ENABLE_UDP_IPV6)
 +                      data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
 +              break;
 +      }
 +
 +      return data;
 +}
 +
 +static int be_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
 +                    u32 *rule_locs)
 +{
 +      struct be_adapter *adapter = netdev_priv(netdev);
 +
 +      if (!be_multi_rxq(adapter)) {
 +              dev_info(&adapter->pdev->dev,
 +                       "ethtool::get_rxnfc: RX flow hashing is disabled\n");
 +              return -EINVAL;
 +      }
 +
 +      switch (cmd->cmd) {
 +      case ETHTOOL_GRXFH:
 +              cmd->data = be_get_rss_hash_opts(adapter, cmd->flow_type);
 +              break;
 +      case ETHTOOL_GRXRINGS:
 +              cmd->data = adapter->num_rx_qs - 1;
 +              break;
 +      default:
 +              return -EINVAL;
 +      }
 +
 +      return 0;
 +}
 +
 +static int be_set_rss_hash_opts(struct be_adapter *adapter,
 +                              struct ethtool_rxnfc *cmd)
 +{
 +      struct be_rx_obj *rxo;
 +      int status = 0, i, j;
 +      u8 rsstable[128];
 +      u32 rss_flags = adapter->rss_flags;
 +
 +      if (cmd->data != L3_RSS_FLAGS &&
 +          cmd->data != (L3_RSS_FLAGS | L4_RSS_FLAGS))
 +              return -EINVAL;
 +
 +      switch (cmd->flow_type) {
 +      case TCP_V4_FLOW:
 +              if (cmd->data == L3_RSS_FLAGS)
 +                      rss_flags &= ~RSS_ENABLE_TCP_IPV4;
 +              else if (cmd->data == (L3_RSS_FLAGS | L4_RSS_FLAGS))
 +                      rss_flags |= RSS_ENABLE_IPV4 |
 +                                      RSS_ENABLE_TCP_IPV4;
 +              break;
 +      case TCP_V6_FLOW:
 +              if (cmd->data == L3_RSS_FLAGS)
 +                      rss_flags &= ~RSS_ENABLE_TCP_IPV6;
 +              else if (cmd->data == (L3_RSS_FLAGS | L4_RSS_FLAGS))
 +                      rss_flags |= RSS_ENABLE_IPV6 |
 +                                      RSS_ENABLE_TCP_IPV6;
 +              break;
 +      case UDP_V4_FLOW:
 +              if ((cmd->data == (L3_RSS_FLAGS | L4_RSS_FLAGS)) &&
 +                  BEx_chip(adapter))
 +                      return -EINVAL;
 +
 +              if (cmd->data == L3_RSS_FLAGS)
 +                      rss_flags &= ~RSS_ENABLE_UDP_IPV4;
 +              else if (cmd->data == (L3_RSS_FLAGS | L4_RSS_FLAGS))
 +                      rss_flags |= RSS_ENABLE_IPV4 |
 +                                      RSS_ENABLE_UDP_IPV4;
 +              break;
 +      case UDP_V6_FLOW:
 +              if ((cmd->data == (L3_RSS_FLAGS | L4_RSS_FLAGS)) &&
 +                  BEx_chip(adapter))
 +                      return -EINVAL;
 +
 +              if (cmd->data == L3_RSS_FLAGS)
 +                      rss_flags &= ~RSS_ENABLE_UDP_IPV6;
 +              else if (cmd->data == (L3_RSS_FLAGS | L4_RSS_FLAGS))
 +                      rss_flags |= RSS_ENABLE_IPV6 |
 +                                      RSS_ENABLE_UDP_IPV6;
 +              break;
 +      default:
 +              return -EINVAL;
 +      }
 +
 +      if (rss_flags == adapter->rss_flags)
 +              return status;
 +
 +      if (be_multi_rxq(adapter)) {
 +              for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
 +                      for_all_rss_queues(adapter, rxo, i) {
 +                              if ((j + i) >= 128)
 +                                      break;
 +                              rsstable[j + i] = rxo->rss_id;
 +                      }
 +              }
 +      }
 +      status = be_cmd_rss_config(adapter, rsstable, rss_flags, 128);
 +      if (!status)
 +              adapter->rss_flags = rss_flags;
 +
 +      return status;
 +}
 +
 +static int be_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
 +{
 +      struct be_adapter *adapter = netdev_priv(netdev);
 +      int status = 0;
 +
 +      if (!be_multi_rxq(adapter)) {
 +              dev_err(&adapter->pdev->dev,
 +                      "ethtool::set_rxnfc: RX flow hashing is disabled\n");
 +              return -EINVAL;
 +      }
 +
 +      switch (cmd->cmd) {
 +      case ETHTOOL_SRXFH:
 +              status = be_set_rss_hash_opts(adapter, cmd);
 +              break;
 +      default:
 +              return -EINVAL;
 +      }
 +
 +      return status;
 +}
 +
  const struct ethtool_ops be_ethtool_ops = {
        .get_settings = be_get_settings,
        .get_drvinfo = be_get_drvinfo,
        .get_regs = be_get_regs,
        .flash_device = be_do_flash,
        .self_test = be_self_test,
 +      .get_rxnfc = be_get_rxnfc,
 +      .set_rxnfc = be_set_rxnfc,
  };
index 1c734915933f4c77c8b76eedaa800bfde2bd6e6b,1232e9164bf1b83f9e7fe641e5af0e6f4e370421..4babc8a4a54396b9dfbb64dd0292f5ade612021c
@@@ -1,5 -1,5 +1,5 @@@
  /*
 - * Copyright (C) 2005 - 2011 Emulex
 + * Copyright (C) 2005 - 2013 Emulex
   * All rights reserved.
   *
   * This program is free software; you can redistribute it and/or
@@@ -146,16 -146,20 +146,16 @@@ static int be_queue_alloc(struct be_ada
        q->entry_size = entry_size;
        mem->size = len * entry_size;
        mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
 -                                   GFP_KERNEL);
 +                                   GFP_KERNEL | __GFP_ZERO);
        if (!mem->va)
                return -ENOMEM;
 -      memset(mem->va, 0, mem->size);
        return 0;
  }
  
 -static void be_intr_set(struct be_adapter *adapter, bool enable)
 +static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
  {
        u32 reg, enabled;
  
 -      if (adapter->eeh_error)
 -              return;
 -
        pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
                                &reg);
        enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
                        PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
  }
  
 +static void be_intr_set(struct be_adapter *adapter, bool enable)
 +{
 +      int status = 0;
 +
 +      /* On lancer interrupts can't be controlled via this register */
 +      if (lancer_chip(adapter))
 +              return;
 +
 +      if (adapter->eeh_error)
 +              return;
 +
 +      status = be_cmd_intr_set(adapter, enable);
 +      if (status)
 +              be_reg_intr_set(adapter, enable);
 +}
 +
  static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
  {
        u32 val = 0;
        iowrite32(val, adapter->db + DB_RQ_OFFSET);
  }
  
 -static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
 +static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
 +                        u16 posted)
  {
        u32 val = 0;
 -      val |= qid & DB_TXULP_RING_ID_MASK;
 +      val |= txo->q.id & DB_TXULP_RING_ID_MASK;
        val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
  
        wmb();
 -      iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
 +      iowrite32(val, adapter->db + txo->db_offset);
  }
  
  static void be_eq_notify(struct be_adapter *adapter, u16 qid,
@@@ -353,9 -340,9 +353,9 @@@ static void populate_be_v0_stats(struc
        drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
        drvs->rx_dropped_header_too_small =
                port_stats->rx_dropped_header_too_small;
 -      drvs->rx_address_mismatch_drops =
 -                                      port_stats->rx_address_mismatch_drops +
 -                                      port_stats->rx_vlan_mismatch_drops;
 +      drvs->rx_address_filtered =
 +                                      port_stats->rx_address_filtered +
 +                                      port_stats->rx_vlan_filtered;
        drvs->rx_alignment_symbol_errors =
                port_stats->rx_alignment_symbol_errors;
  
@@@ -404,7 -391,7 +404,7 @@@ static void populate_be_v1_stats(struc
                port_stats->rx_dropped_header_too_small;
        drvs->rx_input_fifo_overflow_drop =
                port_stats->rx_input_fifo_overflow_drop;
 -      drvs->rx_address_mismatch_drops = port_stats->rx_address_mismatch_drops;
 +      drvs->rx_address_filtered = port_stats->rx_address_filtered;
        drvs->rx_alignment_symbol_errors =
                port_stats->rx_alignment_symbol_errors;
        drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
@@@ -445,9 -432,9 +445,9 @@@ static void populate_lancer_stats(struc
        drvs->rx_dropped_header_too_small =
                                pport_stats->rx_dropped_header_too_small;
        drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
 -      drvs->rx_address_mismatch_drops =
 -                                      pport_stats->rx_address_mismatch_drops +
 -                                      pport_stats->rx_vlan_mismatch_drops;
 +      drvs->rx_address_filtered =
 +                                      pport_stats->rx_address_filtered +
 +                                      pport_stats->rx_vlan_filtered;
        drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
        drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
        drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
@@@ -639,13 -626,8 +639,8 @@@ static inline u16 be_get_tx_vlan_tag(st
        return vlan_tag;
  }
  
- static int be_vlan_tag_chk(struct be_adapter *adapter, struct sk_buff *skb)
- {
-       return vlan_tx_tag_present(skb) || adapter->pvid;
- }
  static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
-               struct sk_buff *skb, u32 wrb_cnt, u32 len)
+               struct sk_buff *skb, u32 wrb_cnt, u32 len, bool skip_hw_vlan)
  {
        u16 vlan_tag;
  
                AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
        }
  
+       /* To skip HW VLAN tagging: evt = 1, compl = 0 */
+       AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, !skip_hw_vlan);
        AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
-       AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
        AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
        AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
  }
@@@ -696,7 -679,8 +692,8 @@@ static void unmap_tx_frag(struct devic
  }
  
  static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
-               struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
+               struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
+               bool skip_hw_vlan)
  {
        dma_addr_t busaddr;
        int i, copied = 0;
                queue_head_inc(txq);
        }
  
-       wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
+       wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan);
        be_dws_cpu_to_le(hdr, sizeof(*hdr));
  
        return copied;
@@@ -762,7 -746,8 +759,8 @@@ dma_err
  }
  
  static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
-                                            struct sk_buff *skb)
+                                            struct sk_buff *skb,
+                                            bool *skip_hw_vlan)
  {
        u16 vlan_tag = 0;
  
  
        if (vlan_tx_tag_present(skb)) {
                vlan_tag = be_get_tx_vlan_tag(adapter, skb);
 -              skb = __vlan_put_tag(skb, vlan_tag);
 +              skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
                if (skb)
                        skb->vlan_tci = 0;
        }
  
 -              skb = __vlan_put_tag(skb, vlan_tag);
+       if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
+               if (!vlan_tag)
+                       vlan_tag = adapter->pvid;
+               if (skip_hw_vlan)
+                       *skip_hw_vlan = true;
+       }
+       if (vlan_tag) {
 -              skb = __vlan_put_tag(skb, vlan_tag);
++              skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
+               if (unlikely(!skb))
+                       return skb;
+               skb->vlan_tci = 0;
+       }
+       /* Insert the outer VLAN, if any */
+       if (adapter->qnq_vid) {
+               vlan_tag = adapter->qnq_vid;
++              skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
+               if (unlikely(!skb))
+                       return skb;
+               if (skip_hw_vlan)
+                       *skip_hw_vlan = true;
+       }
        return skb;
  }
  
+ static bool be_ipv6_exthdr_check(struct sk_buff *skb)
+ {
+       struct ethhdr *eh = (struct ethhdr *)skb->data;
+       u16 offset = ETH_HLEN;
+       if (eh->h_proto == htons(ETH_P_IPV6)) {
+               struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
+               offset += sizeof(struct ipv6hdr);
+               if (ip6h->nexthdr != NEXTHDR_TCP &&
+                   ip6h->nexthdr != NEXTHDR_UDP) {
+                       struct ipv6_opt_hdr *ehdr =
+                               (struct ipv6_opt_hdr *) (skb->data + offset);
+                       /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
+                       if (ehdr->hdrlen == 0xff)
+                               return true;
+               }
+       }
+       return false;
+ }
+ static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
+ {
+       return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
+ }
+ static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
+ {
+       return BE3_chip(adapter) &&
+               be_ipv6_exthdr_check(skb);
+ }
  static netdev_tx_t be_xmit(struct sk_buff *skb,
                        struct net_device *netdev)
  {
        u32 wrb_cnt = 0, copied = 0;
        u32 start = txq->head, eth_hdr_len;
        bool dummy_wrb, stopped = false;
+       bool skip_hw_vlan = false;
+       struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
  
        eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
                VLAN_ETH_HLEN : ETH_HLEN;
  
-       /* HW has a bug which considers padding bytes as legal
-        * and modifies the IPv4 hdr's 'tot_len' field
+       /* For padded packets, BE HW modifies tot_len field in IP header
+        * incorrecly when VLAN tag is inserted by HW.
         */
-       if (skb->len <= 60 && be_vlan_tag_chk(adapter, skb) &&
-                       is_ipv4_pkt(skb)) {
+       if (skb->len <= 60 && vlan_tx_tag_present(skb) && is_ipv4_pkt(skb)) {
                ip = (struct iphdr *)ip_hdr(skb);
                pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
        }
  
+       /* If vlan tag is already inlined in the packet, skip HW VLAN
+        * tagging in UMC mode
+        */
+       if ((adapter->function_mode & UMC_ENABLED) &&
+           veh->h_vlan_proto == htons(ETH_P_8021Q))
+                       skip_hw_vlan = true;
        /* HW has a bug wherein it will calculate CSUM for VLAN
         * pkts even though it is disabled.
         * Manually insert VLAN in pkt.
         */
        if (skb->ip_summed != CHECKSUM_PARTIAL &&
-                       be_vlan_tag_chk(adapter, skb)) {
-               skb = be_insert_vlan_in_pkt(adapter, skb);
+                       vlan_tx_tag_present(skb)) {
+               skb = be_insert_vlan_in_pkt(adapter, skb, &skip_hw_vlan);
+               if (unlikely(!skb))
+                       goto tx_drop;
+       }
+       /* HW may lockup when VLAN HW tagging is requested on
+        * certain ipv6 packets. Drop such pkts if the HW workaround to
+        * skip HW tagging is not enabled by FW.
+        */
+       if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
+                    (adapter->pvid || adapter->qnq_vid) &&
+                    !qnq_async_evt_rcvd(adapter)))
+               goto tx_drop;
+       /* Manual VLAN tag insertion to prevent:
+        * ASIC lockup when the ASIC inserts VLAN tag into
+        * certain ipv6 packets. Insert VLAN tags in driver,
+        * and set event, completion, vlan bits accordingly
+        * in the Tx WRB.
+        */
+       if (be_ipv6_tx_stall_chk(adapter, skb) &&
+           be_vlan_tag_tx_chk(adapter, skb)) {
+               skb = be_insert_vlan_in_pkt(adapter, skb, &skip_hw_vlan);
                if (unlikely(!skb))
                        goto tx_drop;
        }
  
        wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
  
-       copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
+       copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
+                             skip_hw_vlan);
        if (copied) {
                int gso_segs = skb_shinfo(skb)->gso_segs;
  
                        stopped = true;
                }
  
 -              be_txq_notify(adapter, txq->id, wrb_cnt);
 +              be_txq_notify(adapter, txo, wrb_cnt);
  
                be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
        } else {
@@@ -904,7 -978,7 +991,7 @@@ set_vlan_promisc
        return status;
  }
  
 -static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
 +static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
  {
        struct be_adapter *adapter = netdev_priv(netdev);
        int status = 0;
@@@ -930,7 -1004,7 +1017,7 @@@ ret
        return status;
  }
  
 -static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
 +static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
  {
        struct be_adapter *adapter = netdev_priv(netdev);
        int status = 0;
@@@ -1385,7 -1459,7 +1472,7 @@@ static void be_rx_compl_process(struct 
  
  
        if (rxcp->vlanf)
 -              __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
 +              __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
  
        netif_receive_skb(skb);
  }
@@@ -1441,7 -1515,7 +1528,7 @@@ void be_rx_compl_process_gro(struct be_
                skb->rxhash = rxcp->rss_hash;
  
        if (rxcp->vlanf)
 -              __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
 +              __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
  
        napi_gro_frags(napi);
  }
@@@ -1971,7 -2045,7 +2058,7 @@@ static int be_tx_qs_create(struct be_ad
                if (status)
                        return status;
  
 -              status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
 +              status = be_cmd_txq_create(adapter, txo);
                if (status)
                        return status;
        }
@@@ -2449,6 -2523,9 +2536,6 @@@ static int be_close(struct net_device *
  
        be_roce_dev_close(adapter);
  
 -      if (!lancer_chip(adapter))
 -              be_intr_set(adapter, false);
 -
        for_all_evt_queues(adapter, eqo, i)
                napi_disable(&eqo->napi);
  
@@@ -2510,19 -2587,9 +2597,19 @@@ static int be_rx_qs_create(struct be_ad
                                rsstable[j + i] = rxo->rss_id;
                        }
                }
 -              rc = be_cmd_rss_config(adapter, rsstable, 128);
 -              if (rc)
 +              adapter->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
 +                                      RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
 +
 +              if (!BEx_chip(adapter))
 +                      adapter->rss_flags |= RSS_ENABLE_UDP_IPV4 |
 +                                              RSS_ENABLE_UDP_IPV6;
 +
 +              rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags,
 +                                     128);
 +              if (rc) {
 +                      adapter->rss_flags = 0;
                        return rc;
 +              }
        }
  
        /* First time posting */
@@@ -2546,6 -2613,9 +2633,6 @@@ static int be_open(struct net_device *n
  
        be_irq_register(adapter);
  
 -      if (!lancer_chip(adapter))
 -              be_intr_set(adapter, true);
 -
        for_all_rx_queues(adapter, rxo, i)
                be_cq_notify(adapter, rxo->cq.id, true, 0);
  
@@@ -2580,9 -2650,10 +2667,9 @@@ static int be_setup_wol(struct be_adapt
  
        cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
        cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
 -                                  GFP_KERNEL);
 +                                  GFP_KERNEL | __GFP_ZERO);
        if (cmd.va == NULL)
                return -1;
 -      memset(cmd.va, 0, cmd.size);
  
        if (enable) {
                status = pci_write_config_dword(adapter->pdev,
@@@ -2730,8 -2801,7 +2817,8 @@@ static int be_vfs_if_create(struct be_a
  
        for_all_vfs(adapter, vf_cfg, vf) {
                if (!BE3_chip(adapter))
 -                      be_cmd_get_profile_config(adapter, &cap_flags, vf + 1);
 +                      be_cmd_get_profile_config(adapter, &cap_flags,
 +                                                NULL, vf + 1);
  
                /* If a FW profile exists, then cap_flags are updated */
                en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
@@@ -2895,14 -2965,11 +2982,14 @@@ static void be_get_resources(struct be_
        u16 dev_num_vfs;
        int pos, status;
        bool profile_present = false;
 +      u16 txq_count = 0;
  
        if (!BEx_chip(adapter)) {
                status = be_cmd_get_func_config(adapter);
                if (!status)
                        profile_present = true;
 +      } else if (BE3_chip(adapter) && be_physfn(adapter)) {
 +              be_cmd_get_profile_config(adapter, NULL, &txq_count, 0);
        }
  
        if (profile_present) {
                        adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
  
                adapter->max_mcast_mac = BE_MAX_MC;
 -              adapter->max_tx_queues = MAX_TX_QS;
 +              adapter->max_tx_queues = txq_count ? txq_count : MAX_TX_QS;
 +              adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues,
 +                                             MAX_TX_QS);
                adapter->max_rss_queues = (adapter->be3_native) ?
                                           BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
                adapter->max_event_queues = BE3_MAX_RSS_QS;
@@@ -2976,8 -3041,7 +3063,8 @@@ static int be_get_config(struct be_adap
  
        status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
                                     &adapter->function_mode,
 -                                   &adapter->function_caps);
 +                                   &adapter->function_caps,
 +                                   &adapter->asic_rev);
        if (status)
                goto err;
  
@@@ -3238,7 -3302,7 +3325,7 @@@ static int be_flash(struct be_adapter *
        return 0;
  }
  
 -/* For BE2 and BE3 */
 +/* For BE2, BE3 and BE3-R */
  static int be_flash_BEx(struct be_adapter *adapter,
                         const struct firmware *fw,
                         struct be_dma_mem *flash_cmd,
@@@ -3481,9 -3545,11 +3568,9 @@@ static int lancer_fw_download(struct be
        flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
                                + LANCER_FW_DOWNLOAD_CHUNK;
        flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
 -                                              &flash_cmd.dma, GFP_KERNEL);
 +                                        &flash_cmd.dma, GFP_KERNEL);
        if (!flash_cmd.va) {
                status = -ENOMEM;
 -              dev_err(&adapter->pdev->dev,
 -                      "Memory allocation failure while flashing\n");
                goto lancer_fw_exit;
        }
  
@@@ -3551,22 -3617,18 +3638,22 @@@ lancer_fw_exit
  
  #define UFI_TYPE2             2
  #define UFI_TYPE3             3
 +#define UFI_TYPE3R            10
  #define UFI_TYPE4             4
  static int be_get_ufi_type(struct be_adapter *adapter,
 -                         struct flash_file_hdr_g2 *fhdr)
 +                         struct flash_file_hdr_g3 *fhdr)
  {
        if (fhdr == NULL)
                goto be_get_ufi_exit;
  
        if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
                return UFI_TYPE4;
 -      else if (BE3_chip(adapter) && fhdr->build[0] == '3')
 -              return UFI_TYPE3;
 -      else if (BE2_chip(adapter) && fhdr->build[0] == '2')
 +      else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
 +              if (fhdr->asic_type_rev == 0x10)
 +                      return UFI_TYPE3R;
 +              else
 +                      return UFI_TYPE3;
 +      } else if (BE2_chip(adapter) && fhdr->build[0] == '2')
                return UFI_TYPE2;
  
  be_get_ufi_exit:
  
  static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
  {
 -      struct flash_file_hdr_g2 *fhdr;
        struct flash_file_hdr_g3 *fhdr3;
        struct image_hdr *img_hdr_ptr = NULL;
        struct be_dma_mem flash_cmd;
                                          &flash_cmd.dma, GFP_KERNEL);
        if (!flash_cmd.va) {
                status = -ENOMEM;
 -              dev_err(&adapter->pdev->dev,
 -                      "Memory allocation failure while flashing\n");
                goto be_fw_exit;
        }
  
        p = fw->data;
 -      fhdr = (struct flash_file_hdr_g2 *)p;
 +      fhdr3 = (struct flash_file_hdr_g3 *)p;
  
 -      ufi_type = be_get_ufi_type(adapter, fhdr);
 +      ufi_type = be_get_ufi_type(adapter, fhdr3);
  
 -      fhdr3 = (struct flash_file_hdr_g3 *)fw->data;
        num_imgs = le32_to_cpu(fhdr3->num_imgs);
        for (i = 0; i < num_imgs; i++) {
                img_hdr_ptr = (struct image_hdr *)(fw->data +
                                (sizeof(struct flash_file_hdr_g3) +
                                 i * sizeof(struct image_hdr)));
                if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
 -                      if (ufi_type == UFI_TYPE4)
 +                      switch (ufi_type) {
 +                      case UFI_TYPE4:
                                status = be_flash_skyhawk(adapter, fw,
                                                        &flash_cmd, num_imgs);
 -                      else if (ufi_type == UFI_TYPE3)
 +                              break;
 +                      case UFI_TYPE3R:
                                status = be_flash_BEx(adapter, fw, &flash_cmd,
                                                      num_imgs);
 +                              break;
 +                      case UFI_TYPE3:
 +                              /* Do not flash this ufi on BE3-R cards */
 +                              if (adapter->asic_rev < 0x10)
 +                                      status = be_flash_BEx(adapter, fw,
 +                                                            &flash_cmd,
 +                                                            num_imgs);
 +                              else {
 +                                      status = -1;
 +                                      dev_err(&adapter->pdev->dev,
 +                                              "Can't load BE3 UFI on BE3R\n");
 +                              }
 +                      }
                }
        }
  
@@@ -3699,12 -3750,12 +3786,12 @@@ static void be_netdev_init(struct net_d
  
        netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
                NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
 -              NETIF_F_HW_VLAN_TX;
 +              NETIF_F_HW_VLAN_CTAG_TX;
        if (be_multi_rxq(adapter))
                netdev->hw_features |= NETIF_F_RXHASH;
  
        netdev->features |= netdev->hw_features |
 -              NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
 +              NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
  
        netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
                NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
@@@ -3828,13 -3879,12 +3915,13 @@@ static int be_ctrl_init(struct be_adapt
  
        rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
        rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
 -                                      &rx_filter->dma, GFP_KERNEL);
 +                                         &rx_filter->dma,
 +                                         GFP_KERNEL | __GFP_ZERO);
        if (rx_filter->va == NULL) {
                status = -ENOMEM;
                goto free_mbox;
        }
 -      memset(rx_filter->va, 0, rx_filter->size);
 +
        mutex_init(&adapter->mbox_lock);
        spin_lock_init(&adapter->mcc_lock);
        spin_lock_init(&adapter->mcc_cq_lock);
@@@ -3876,9 -3926,10 +3963,9 @@@ static int be_stats_init(struct be_adap
                cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
  
        cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
 -                                   GFP_KERNEL);
 +                                   GFP_KERNEL | __GFP_ZERO);
        if (cmd->va == NULL)
                return -1;
 -      memset(cmd->va, 0, cmd->size);
        return 0;
  }
  
@@@ -3890,7 -3941,6 +3977,7 @@@ static void be_remove(struct pci_dev *p
                return;
  
        be_roce_dev_remove(adapter);
 +      be_intr_set(adapter, false);
  
        cancel_delayed_work_sync(&adapter->func_recovery_work);
  
@@@ -4145,11 -4195,6 +4232,11 @@@ static int be_probe(struct pci_dev *pde
  
        status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
        if (!status) {
 +              status = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
 +              if (status < 0) {
 +                      dev_err(&pdev->dev, "dma_set_coherent_mask failed\n");
 +                      goto free_netdev;
 +              }
                netdev->features |= NETIF_F_HIGHDMA;
        } else {
                status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
                        goto ctrl_clean;
        }
  
 -      /* tell fw we're ready to fire cmds */
 -      status = be_cmd_fw_init(adapter);
 -      if (status)
 -              goto ctrl_clean;
 -
        if (be_reset_required(adapter)) {
                status = be_cmd_reset_function(adapter);
                if (status)
                        goto ctrl_clean;
 +
 +              /* Wait for interrupts to quiesce after an FLR */
 +              msleep(100);
        }
  
 -      /* The INTR bit may be set in the card when probed by a kdump kernel
 -       * after a crash.
 -       */
 -      if (!lancer_chip(adapter))
 -              be_intr_set(adapter, false);
 +      /* Allow interrupts for other ULPs running on NIC function */
 +      be_intr_set(adapter, true);
 +
 +      /* tell fw we're ready to fire cmds */
 +      status = be_cmd_fw_init(adapter);
 +      if (status)
 +              goto ctrl_clean;
  
        status = be_stats_init(adapter);
        if (status)
@@@ -4400,12 -4445,12 +4487,12 @@@ static void be_eeh_resume(struct pci_de
  
        pci_save_state(pdev);
  
 -      /* tell fw we're ready to fire cmds */
 -      status = be_cmd_fw_init(adapter);
 +      status = be_cmd_reset_function(adapter);
        if (status)
                goto err;
  
 -      status = be_cmd_reset_function(adapter);
 +      /* tell fw we're ready to fire cmds */
 +      status = be_cmd_fw_init(adapter);
        if (status)
                goto err;
  
index fe8e9e5cfb2b720728bf40faf39d56978e01c85b,a3f8a2551f2dc0f86a06356be9c9e2cc16d7f71e..576e4b858fce09d7bd00f1a7492daf598bbe1741
@@@ -17,9 -17,6 +17,9 @@@
   *  along with this program; if not, write to the Free Software
   *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
   */
 +
 +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 +
  #include <linux/device.h>
  #include <linux/hrtimer.h>
  #include <linux/init.h>
@@@ -130,7 -127,6 +130,6 @@@ struct gianfar_ptp_registers 
  
  #define DRIVER                "gianfar_ptp"
  #define DEFAULT_CKSEL 1
- #define N_ALARM               1 /* first alarm is used internally to reset fipers */
  #define N_EXT_TS      2
  #define REG_SIZE      sizeof(struct gianfar_ptp_registers)
  
@@@ -413,7 -409,7 +412,7 @@@ static struct ptp_clock_info ptp_gianfa
        .owner          = THIS_MODULE,
        .name           = "gianfar clock",
        .max_adj        = 512000,
-       .n_alarm        = N_ALARM,
+       .n_alarm        = 0,
        .n_ext_ts       = N_EXT_TS,
        .n_per_out      = 0,
        .pps            = 1,
index c6dac38fd9ccf0a56a8cfa45316e0b2c020659d5,15ba8c47d79fd4282766528a994de538616c2d18..79c520b64fddd00ab36436215abbfa5847aa8336
@@@ -47,9 -47,7 +47,9 @@@
  #define FIRMWARE_8402_1               "rtl_nic/rtl8402-1.fw"
  #define FIRMWARE_8411_1               "rtl_nic/rtl8411-1.fw"
  #define FIRMWARE_8106E_1      "rtl_nic/rtl8106e-1.fw"
 -#define FIRMWARE_8168G_1      "rtl_nic/rtl8168g-1.fw"
 +#define FIRMWARE_8106E_2      "rtl_nic/rtl8106e-2.fw"
 +#define FIRMWARE_8168G_2      "rtl_nic/rtl8168g-2.fw"
 +#define FIRMWARE_8168G_3      "rtl_nic/rtl8168g-3.fw"
  
  #ifdef RTL8169_DEBUG
  #define assert(expr) \
@@@ -142,8 -140,6 +142,8 @@@ enum mac_version 
        RTL_GIGA_MAC_VER_39,
        RTL_GIGA_MAC_VER_40,
        RTL_GIGA_MAC_VER_41,
 +      RTL_GIGA_MAC_VER_42,
 +      RTL_GIGA_MAC_VER_43,
        RTL_GIGA_MAC_NONE   = 0xff,
  };
  
@@@ -266,16 -262,10 +266,16 @@@ static const struct 
                _R("RTL8106e",          RTL_TD_1, FIRMWARE_8106E_1,
                                                        JUMBO_1K, true),
        [RTL_GIGA_MAC_VER_40] =
 -              _R("RTL8168g/8111g",    RTL_TD_1, FIRMWARE_8168G_1,
 +              _R("RTL8168g/8111g",    RTL_TD_1, FIRMWARE_8168G_2,
                                                        JUMBO_9K, false),
        [RTL_GIGA_MAC_VER_41] =
                _R("RTL8168g/8111g",    RTL_TD_1, NULL, JUMBO_9K, false),
 +      [RTL_GIGA_MAC_VER_42] =
 +              _R("RTL8168g/8111g",    RTL_TD_1, FIRMWARE_8168G_3,
 +                                                      JUMBO_9K, false),
 +      [RTL_GIGA_MAC_VER_43] =
 +              _R("RTL8106e",          RTL_TD_1, FIRMWARE_8106E_2,
 +                                                      JUMBO_1K, true),
  };
  #undef _R
  
@@@ -339,7 -329,6 +339,7 @@@ enum rtl_registers 
  #define       RXCFG_FIFO_SHIFT                13
                                        /* No threshold before first PCI xfer */
  #define       RX_FIFO_THRESH                  (7 << RXCFG_FIFO_SHIFT)
 +#define       RX_EARLY_OFF                    (1 << 11)
  #define       RXCFG_DMA_SHIFT                 8
                                        /* Unlimited maximum PCI burst. */
  #define       RX_DMA_BURST                    (7 << RXCFG_DMA_SHIFT)
@@@ -524,7 -513,6 +524,7 @@@ enum rtl_register_content 
        PMEnable        = (1 << 0),     /* Power Management Enable */
  
        /* Config2 register p. 25 */
 +      ClkReqEn        = (1 << 7),     /* Clock Request Enable */
        MSIEnable       = (1 << 5),     /* 8169 only. Reserved in the 8168. */
        PCI_Clock_66MHz = 0x01,
        PCI_Clock_33MHz = 0x00,
        Spi_en          = (1 << 3),
        LanWake         = (1 << 1),     /* LanWake enable/disable */
        PMEStatus       = (1 << 0),     /* PME status can be reset by PCI RST# */
 +      ASPM_en         = (1 << 0),     /* ASPM enable */
  
        /* TBICSR p.28 */
        TBIReset        = 0x80000000,
@@@ -827,9 -814,7 +827,9 @@@ MODULE_FIRMWARE(FIRMWARE_8168F_2)
  MODULE_FIRMWARE(FIRMWARE_8402_1);
  MODULE_FIRMWARE(FIRMWARE_8411_1);
  MODULE_FIRMWARE(FIRMWARE_8106E_1);
 -MODULE_FIRMWARE(FIRMWARE_8168G_1);
 +MODULE_FIRMWARE(FIRMWARE_8106E_2);
 +MODULE_FIRMWARE(FIRMWARE_8168G_2);
 +MODULE_FIRMWARE(FIRMWARE_8168G_3);
  
  static void rtl_lock_work(struct rtl8169_private *tp)
  {
@@@ -1039,6 -1024,14 +1039,6 @@@ static u16 r8168_phy_ocp_read(struct rt
                (RTL_R32(GPHY_OCP) & 0xffff) : ~0;
  }
  
 -static void rtl_w1w0_phy_ocp(struct rtl8169_private *tp, int reg, int p, int m)
 -{
 -      int val;
 -
 -      val = r8168_phy_ocp_read(tp, reg);
 -      r8168_phy_ocp_write(tp, reg, (val | p) & ~m);
 -}
 -
  static void r8168_mac_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data)
  {
        void __iomem *ioaddr = tp->mmio_addr;
@@@ -1084,21 -1077,6 +1084,21 @@@ static int r8168g_mdio_read(struct rtl8
        return r8168_phy_ocp_read(tp, tp->ocp_base + reg * 2);
  }
  
 +static void mac_mcu_write(struct rtl8169_private *tp, int reg, int value)
 +{
 +      if (reg == 0x1f) {
 +              tp->ocp_base = value << 4;
 +              return;
 +      }
 +
 +      r8168_mac_ocp_write(tp, tp->ocp_base + reg, value);
 +}
 +
 +static int mac_mcu_read(struct rtl8169_private *tp, int reg)
 +{
 +      return r8168_mac_ocp_read(tp, tp->ocp_base + reg);
 +}
 +
  DECLARE_RTL_COND(rtl_phyar_cond)
  {
        void __iomem *ioaddr = tp->mmio_addr;
@@@ -1793,17 -1771,16 +1793,17 @@@ static void __rtl8169_set_features(stru
        netdev_features_t changed = features ^ dev->features;
        void __iomem *ioaddr = tp->mmio_addr;
  
 -      if (!(changed & (NETIF_F_RXALL | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_RX)))
 +      if (!(changed & (NETIF_F_RXALL | NETIF_F_RXCSUM |
 +                       NETIF_F_HW_VLAN_CTAG_RX)))
                return;
  
 -      if (changed & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_RX)) {
 +      if (changed & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX)) {
                if (features & NETIF_F_RXCSUM)
                        tp->cp_cmd |= RxChkSum;
                else
                        tp->cp_cmd &= ~RxChkSum;
  
 -              if (dev->features & NETIF_F_HW_VLAN_RX)
 +              if (dev->features & NETIF_F_HW_VLAN_CTAG_RX)
                        tp->cp_cmd |= RxVlan;
                else
                        tp->cp_cmd &= ~RxVlan;
@@@ -1843,7 -1820,7 +1843,7 @@@ static void rtl8169_rx_vlan_tag(struct 
        u32 opts2 = le32_to_cpu(desc->opts2);
  
        if (opts2 & RxVlanTag)
 -              __vlan_hwaccel_put_tag(skb, swab16(opts2 & 0xffff));
 +              __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), swab16(opts2 & 0xffff));
  }
  
  static int rtl8169_gset_tbi(struct net_device *dev, struct ethtool_cmd *cmd)
@@@ -2051,7 -2028,6 +2051,7 @@@ static void rtl8169_get_mac_version(str
                int mac_version;
        } mac_info[] = {
                /* 8168G family. */
 +              { 0x7cf00000, 0x50900000,       RTL_GIGA_MAC_VER_42 },
                { 0x7cf00000, 0x4c100000,       RTL_GIGA_MAC_VER_41 },
                { 0x7cf00000, 0x4c000000,       RTL_GIGA_MAC_VER_40 },
  
                netif_notice(tp, probe, dev,
                             "unknown MAC, using family default\n");
                tp->mac_version = default_version;
 +      } else if (tp->mac_version == RTL_GIGA_MAC_VER_42) {
 +              tp->mac_version = tp->mii.supports_gmii ?
 +                                RTL_GIGA_MAC_VER_42 :
 +                                RTL_GIGA_MAC_VER_43;
        }
  }
  
@@@ -2170,7 -2142,9 +2170,7 @@@ static void rtl_writephy_batch(struct r
  #define PHY_DATA_OR           0x10000000
  #define PHY_DATA_AND          0x20000000
  #define PHY_BJMPN             0x30000000
 -#define PHY_READ_EFUSE                0x40000000
 -#define PHY_READ_MAC_BYTE     0x50000000
 -#define PHY_WRITE_MAC_BYTE    0x60000000
 +#define PHY_MDIO_CHG          0x40000000
  #define PHY_CLEAR_READCOUNT   0x70000000
  #define PHY_WRITE             0x80000000
  #define PHY_READCOUNT_EQ_SKIP 0x90000000
  #define PHY_WRITE_PREVIOUS    0xc0000000
  #define PHY_SKIPN             0xd0000000
  #define PHY_DELAY_MS          0xe0000000
 -#define PHY_WRITE_ERI_WORD    0xf0000000
  
  struct fw_info {
        u32     magic;
@@@ -2255,7 -2230,7 +2255,7 @@@ static bool rtl_fw_data_ok(struct rtl81
                case PHY_READ:
                case PHY_DATA_OR:
                case PHY_DATA_AND:
 -              case PHY_READ_EFUSE:
 +              case PHY_MDIO_CHG:
                case PHY_CLEAR_READCOUNT:
                case PHY_WRITE:
                case PHY_WRITE_PREVIOUS:
                        }
                        break;
  
 -              case PHY_READ_MAC_BYTE:
 -              case PHY_WRITE_MAC_BYTE:
 -              case PHY_WRITE_ERI_WORD:
                default:
                        netif_err(tp, ifup, tp->dev,
                                  "Invalid action 0x%08x\n", action);
  static void rtl_phy_write_fw(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
  {
        struct rtl_fw_phy_action *pa = &rtl_fw->phy_action;
 +      struct mdio_ops org, *ops = &tp->mdio_ops;
        u32 predata, count;
        size_t index;
  
        predata = count = 0;
 +      org.write = ops->write;
 +      org.read = ops->read;
  
        for (index = 0; index < pa->size; ) {
                u32 action = le32_to_cpu(pa->code[index]);
                case PHY_BJMPN:
                        index -= regno;
                        break;
 -              case PHY_READ_EFUSE:
 -                      predata = rtl8168d_efuse_read(tp, regno);
 +              case PHY_MDIO_CHG:
 +                      if (data == 0) {
 +                              ops->write = org.write;
 +                              ops->read = org.read;
 +                      } else if (data == 1) {
 +                              ops->write = mac_mcu_write;
 +                              ops->read = mac_mcu_read;
 +                      }
 +
                        index++;
                        break;
                case PHY_CLEAR_READCOUNT:
                        index++;
                        break;
  
 -              case PHY_READ_MAC_BYTE:
 -              case PHY_WRITE_MAC_BYTE:
 -              case PHY_WRITE_ERI_WORD:
                default:
                        BUG();
                }
        }
 +
 +      ops->write = org.write;
 +      ops->read = org.read;
  }
  
  static void rtl_release_firmware(struct rtl8169_private *tp)
@@@ -3400,68 -3368,51 +3400,68 @@@ static void rtl8411_hw_phy_config(struc
  
  static void rtl8168g_1_hw_phy_config(struct rtl8169_private *tp)
  {
 -      static const u16 mac_ocp_patch[] = {
 -              0xe008, 0xe01b, 0xe01d, 0xe01f,
 -              0xe021, 0xe023, 0xe025, 0xe027,
 -              0x49d2, 0xf10d, 0x766c, 0x49e2,
 -              0xf00a, 0x1ec0, 0x8ee1, 0xc60a,
 -
 -              0x77c0, 0x4870, 0x9fc0, 0x1ea0,
 -              0xc707, 0x8ee1, 0x9d6c, 0xc603,
 -              0xbe00, 0xb416, 0x0076, 0xe86c,
 -              0xc602, 0xbe00, 0x0000, 0xc602,
 -
 -              0xbe00, 0x0000, 0xc602, 0xbe00,
 -              0x0000, 0xc602, 0xbe00, 0x0000,
 -              0xc602, 0xbe00, 0x0000, 0xc602,
 -              0xbe00, 0x0000, 0xc602, 0xbe00,
 -
 -              0x0000, 0x0000, 0x0000, 0x0000
 -      };
 -      u32 i;
 +      rtl_apply_firmware(tp);
  
 -      /* Patch code for GPHY reset */
 -      for (i = 0; i < ARRAY_SIZE(mac_ocp_patch); i++)
 -              r8168_mac_ocp_write(tp, 0xf800 + 2*i, mac_ocp_patch[i]);
 -      r8168_mac_ocp_write(tp, 0xfc26, 0x8000);
 -      r8168_mac_ocp_write(tp, 0xfc28, 0x0075);
 +      rtl_writephy(tp, 0x1f, 0x0a46);
 +      if (rtl_readphy(tp, 0x10) & 0x0100) {
 +              rtl_writephy(tp, 0x1f, 0x0bcc);
 +              rtl_w1w0_phy(tp, 0x12, 0x0000, 0x8000);
 +      } else {
 +              rtl_writephy(tp, 0x1f, 0x0bcc);
 +              rtl_w1w0_phy(tp, 0x12, 0x8000, 0x0000);
 +      }
  
 -      rtl_apply_firmware(tp);
 +      rtl_writephy(tp, 0x1f, 0x0a46);
 +      if (rtl_readphy(tp, 0x13) & 0x0100) {
 +              rtl_writephy(tp, 0x1f, 0x0c41);
 +              rtl_w1w0_phy(tp, 0x15, 0x0002, 0x0000);
 +      } else {
 +              rtl_writephy(tp, 0x1f, 0x0c41);
 +              rtl_w1w0_phy(tp, 0x15, 0x0000, 0x0002);
 +      }
  
 -      if (r8168_phy_ocp_read(tp, 0xa460) & 0x0100)
 -              rtl_w1w0_phy_ocp(tp, 0xbcc4, 0x0000, 0x8000);
 -      else
 -              rtl_w1w0_phy_ocp(tp, 0xbcc4, 0x8000, 0x0000);
 +      /* Enable PHY auto speed down */
 +      rtl_writephy(tp, 0x1f, 0x0a44);
 +      rtl_w1w0_phy(tp, 0x11, 0x000c, 0x0000);
 +
 +      rtl_writephy(tp, 0x1f, 0x0bcc);
 +      rtl_w1w0_phy(tp, 0x14, 0x0100, 0x0000);
 +      rtl_writephy(tp, 0x1f, 0x0a44);
 +      rtl_w1w0_phy(tp, 0x11, 0x00c0, 0x0000);
 +      rtl_writephy(tp, 0x1f, 0x0a43);
 +      rtl_writephy(tp, 0x13, 0x8084);
 +      rtl_w1w0_phy(tp, 0x14, 0x0000, 0x6000);
 +      rtl_w1w0_phy(tp, 0x10, 0x1003, 0x0000);
 +
 +      /* EEE auto-fallback function */
 +      rtl_writephy(tp, 0x1f, 0x0a4b);
 +      rtl_w1w0_phy(tp, 0x11, 0x0004, 0x0000);
 +
 +      /* Enable UC LPF tune function */
 +      rtl_writephy(tp, 0x1f, 0x0a43);
 +      rtl_writephy(tp, 0x13, 0x8012);
 +      rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000);
  
 -      if (r8168_phy_ocp_read(tp, 0xa466) & 0x0100)
 -              rtl_w1w0_phy_ocp(tp, 0xc41a, 0x0002, 0x0000);
 -      else
 -              rtl_w1w0_phy_ocp(tp, 0xbcc4, 0x0000, 0x0002);
 +      rtl_writephy(tp, 0x1f, 0x0c42);
 +      rtl_w1w0_phy(tp, 0x11, 0x4000, 0x2000);
  
 -      rtl_w1w0_phy_ocp(tp, 0xa442, 0x000c, 0x0000);
 -      rtl_w1w0_phy_ocp(tp, 0xa4b2, 0x0004, 0x0000);
 +      /* Improve SWR Efficiency */
 +      rtl_writephy(tp, 0x1f, 0x0bcd);
 +      rtl_writephy(tp, 0x14, 0x5065);
 +      rtl_writephy(tp, 0x14, 0xd065);
 +      rtl_writephy(tp, 0x1f, 0x0bc8);
 +      rtl_writephy(tp, 0x11, 0x5655);
 +      rtl_writephy(tp, 0x1f, 0x0bcd);
 +      rtl_writephy(tp, 0x14, 0x1065);
 +      rtl_writephy(tp, 0x14, 0x9065);
 +      rtl_writephy(tp, 0x14, 0x1065);
  
 -      r8168_phy_ocp_write(tp, 0xa436, 0x8012);
 -      rtl_w1w0_phy_ocp(tp, 0xa438, 0x8000, 0x0000);
 +      rtl_writephy(tp, 0x1f, 0x0000);
 +}
  
 -      rtl_w1w0_phy_ocp(tp, 0xc422, 0x4000, 0x2000);
 +static void rtl8168g_2_hw_phy_config(struct rtl8169_private *tp)
 +{
 +      rtl_apply_firmware(tp);
  }
  
  static void rtl8102e_hw_phy_config(struct rtl8169_private *tp)
@@@ -3649,10 -3600,6 +3649,10 @@@ static void rtl_hw_phy_config(struct ne
        case RTL_GIGA_MAC_VER_40:
                rtl8168g_1_hw_phy_config(tp);
                break;
 +      case RTL_GIGA_MAC_VER_42:
 +      case RTL_GIGA_MAC_VER_43:
 +              rtl8168g_2_hw_phy_config(tp);
 +              break;
  
        case RTL_GIGA_MAC_VER_41:
        default:
@@@ -3861,8 -3808,6 +3861,8 @@@ static void rtl_init_mdio_ops(struct rt
                break;
        case RTL_GIGA_MAC_VER_40:
        case RTL_GIGA_MAC_VER_41:
 +      case RTL_GIGA_MAC_VER_42:
 +      case RTL_GIGA_MAC_VER_43:
                ops->write      = r8168g_mdio_write;
                ops->read       = r8168g_mdio_read;
                break;
@@@ -3914,8 -3859,6 +3914,8 @@@ static void rtl_wol_suspend_quirk(struc
        case RTL_GIGA_MAC_VER_39:
        case RTL_GIGA_MAC_VER_40:
        case RTL_GIGA_MAC_VER_41:
 +      case RTL_GIGA_MAC_VER_42:
 +      case RTL_GIGA_MAC_VER_43:
                RTL_W32(RxConfig, RTL_R32(RxConfig) |
                        AcceptBroadcast | AcceptMulticast | AcceptMyPhys);
                break;
@@@ -4023,8 -3966,6 +4023,8 @@@ static void r8168_phy_power_down(struc
        switch (tp->mac_version) {
        case RTL_GIGA_MAC_VER_32:
        case RTL_GIGA_MAC_VER_33:
 +      case RTL_GIGA_MAC_VER_40:
 +      case RTL_GIGA_MAC_VER_41:
                rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE | BMCR_PDOWN);
                break;
  
@@@ -4086,11 -4027,6 +4086,11 @@@ static void r8168_pll_power_down(struc
        case RTL_GIGA_MAC_VER_33:
                RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80);
                break;
 +      case RTL_GIGA_MAC_VER_40:
 +      case RTL_GIGA_MAC_VER_41:
 +              rtl_w1w0_eri(tp, 0x1a8, ERIAR_MASK_1111, 0x00000000,
 +                           0xfc000000, ERIAR_EXGMAC);
 +              break;
        }
  }
  
@@@ -4108,11 -4044,6 +4108,11 @@@ static void r8168_pll_power_up(struct r
        case RTL_GIGA_MAC_VER_33:
                RTL_W8(PMCH, RTL_R8(PMCH) | 0x80);
                break;
 +      case RTL_GIGA_MAC_VER_40:
 +      case RTL_GIGA_MAC_VER_41:
 +              rtl_w1w0_eri(tp, 0x1a8, ERIAR_MASK_1111, 0xfc000000,
 +                           0x00000000, ERIAR_EXGMAC);
 +              break;
        }
  
        r8168_phy_power_up(tp);
@@@ -4149,7 -4080,6 +4149,7 @@@ static void rtl_init_pll_power_ops(stru
        case RTL_GIGA_MAC_VER_30:
        case RTL_GIGA_MAC_VER_37:
        case RTL_GIGA_MAC_VER_39:
 +      case RTL_GIGA_MAC_VER_43:
                ops->down       = r810x_pll_power_down;
                ops->up         = r810x_pll_power_up;
                break;
        case RTL_GIGA_MAC_VER_38:
        case RTL_GIGA_MAC_VER_40:
        case RTL_GIGA_MAC_VER_41:
 +      case RTL_GIGA_MAC_VER_42:
                ops->down       = r8168_pll_power_down;
                ops->up         = r8168_pll_power_up;
                break;
@@@ -4220,12 -4149,6 +4220,12 @@@ static void rtl_init_rxcfg(struct rtl81
        case RTL_GIGA_MAC_VER_34:
                RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST);
                break;
 +      case RTL_GIGA_MAC_VER_40:
 +      case RTL_GIGA_MAC_VER_41:
 +      case RTL_GIGA_MAC_VER_42:
 +      case RTL_GIGA_MAC_VER_43:
 +              RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST | RX_EARLY_OFF);
 +              break;
        default:
                RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST);
                break;
@@@ -4382,8 -4305,6 +4382,8 @@@ static void rtl_init_jumbo_ops(struct r
         */
        case RTL_GIGA_MAC_VER_40:
        case RTL_GIGA_MAC_VER_41:
 +      case RTL_GIGA_MAC_VER_42:
 +      case RTL_GIGA_MAC_VER_43:
        default:
                ops->disable    = NULL;
                ops->enable     = NULL;
@@@ -4491,8 -4412,6 +4491,8 @@@ static void rtl8169_hw_reset(struct rtl
                   tp->mac_version == RTL_GIGA_MAC_VER_37 ||
                   tp->mac_version == RTL_GIGA_MAC_VER_40 ||
                   tp->mac_version == RTL_GIGA_MAC_VER_41 ||
 +                 tp->mac_version == RTL_GIGA_MAC_VER_42 ||
 +                 tp->mac_version == RTL_GIGA_MAC_VER_43 ||
                   tp->mac_version == RTL_GIGA_MAC_VER_38) {
                RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq);
                rtl_udelay_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 666);
@@@ -5208,8 -5127,6 +5208,8 @@@ static void rtl_hw_start_8168g_1(struc
        void __iomem *ioaddr = tp->mmio_addr;
        struct pci_dev *pdev = tp->pci_dev;
  
 +      RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
 +
        rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x080002, ERIAR_EXGMAC);
        rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x38, ERIAR_EXGMAC);
        rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x48, ERIAR_EXGMAC);
  
        rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
        rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
 +      rtl_eri_write(tp, 0x2f8, ERIAR_MASK_0011, 0x1d8f, ERIAR_EXGMAC);
  
        RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
        RTL_W32(MISC, RTL_R32(MISC) & ~RXDV_GATED_EN);
        /* Adjust EEE LED frequency */
        RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07);
  
 -      rtl_w1w0_eri(tp, 0x2fc, ERIAR_MASK_0001, 0x01, 0x02, ERIAR_EXGMAC);
 +      rtl_w1w0_eri(tp, 0x2fc, ERIAR_MASK_0001, 0x01, 0x06, ERIAR_EXGMAC);
 +      rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, 0x1000, ERIAR_EXGMAC);
 +}
 +
 +static void rtl_hw_start_8168g_2(struct rtl8169_private *tp)
 +{
 +      void __iomem *ioaddr = tp->mmio_addr;
 +      static const struct ephy_info e_info_8168g_2[] = {
 +              { 0x00, 0x0000, 0x0008 },
 +              { 0x0c, 0x3df0, 0x0200 },
 +              { 0x19, 0xffff, 0xfc00 },
 +              { 0x1e, 0xffff, 0x20eb }
 +      };
 +
 +      rtl_hw_start_8168g_1(tp);
 +
 +      /* disable aspm and clock request before access ephy */
 +      RTL_W8(Config2, RTL_R8(Config2) & ~ClkReqEn);
 +      RTL_W8(Config5, RTL_R8(Config5) & ~ASPM_en);
 +      rtl_ephy_init(tp, e_info_8168g_2, ARRAY_SIZE(e_info_8168g_2));
  }
  
  static void rtl_hw_start_8168(struct net_device *dev)
  
        rtl_set_rx_tx_desc_registers(tp, ioaddr);
  
 -      rtl_set_rx_mode(dev);
 -
 -      RTL_W32(TxConfig, (TX_DMA_BURST << TxDMAShift) |
 -              (InterFrameGap << TxInterFrameGapShift));
 +      rtl_set_rx_tx_config_registers(tp);
  
        RTL_R8(IntrMask);
  
        case RTL_GIGA_MAC_VER_41:
                rtl_hw_start_8168g_1(tp);
                break;
 +      case RTL_GIGA_MAC_VER_42:
 +              rtl_hw_start_8168g_2(tp);
 +              break;
  
        default:
                printk(KERN_ERR PFX "%s: unknown chipset (mac_version = %d).\n",
                break;
        }
  
 +      RTL_W8(Cfg9346, Cfg9346_Lock);
 +
        RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
  
 -      RTL_W8(Cfg9346, Cfg9346_Lock);
 +      rtl_set_rx_mode(dev);
  
        RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xF000);
  }
@@@ -5529,17 -5424,6 +5529,17 @@@ static void rtl_hw_start_8101(struct ne
  
        RTL_W8(Cfg9346, Cfg9346_Unlock);
  
 +      RTL_W8(MaxTxPacketSize, TxPacketMax);
 +
 +      rtl_set_rx_max_size(ioaddr, rx_buf_sz);
 +
 +      tp->cp_cmd &= ~R810X_CPCMD_QUIRK_MASK;
 +      RTL_W16(CPlusCmd, tp->cp_cmd);
 +
 +      rtl_set_rx_tx_desc_registers(tp, ioaddr);
 +
 +      rtl_set_rx_tx_config_registers(tp);
 +
        switch (tp->mac_version) {
        case RTL_GIGA_MAC_VER_07:
                rtl_hw_start_8102e_1(tp);
        case RTL_GIGA_MAC_VER_39:
                rtl_hw_start_8106(tp);
                break;
 +      case RTL_GIGA_MAC_VER_43:
 +              rtl_hw_start_8168g_2(tp);
 +              break;
        }
  
        RTL_W8(Cfg9346, Cfg9346_Lock);
  
 -      RTL_W8(MaxTxPacketSize, TxPacketMax);
 -
 -      rtl_set_rx_max_size(ioaddr, rx_buf_sz);
 -
 -      tp->cp_cmd &= ~R810X_CPCMD_QUIRK_MASK;
 -      RTL_W16(CPlusCmd, tp->cp_cmd);
 -
        RTL_W16(IntrMitigate, 0x0000);
  
 -      rtl_set_rx_tx_desc_registers(tp, ioaddr);
 -
        RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
 -      rtl_set_rx_tx_config_registers(tp);
 -
 -      RTL_R8(IntrMask);
  
        rtl_set_rx_mode(dev);
  
 +      RTL_R8(IntrMask);
 +
        RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xf000);
  }
  
@@@ -5896,6 -5787,14 +5896,14 @@@ static netdev_tx_t rtl8169_start_xmit(s
                goto err_stop_0;
        }
  
+       /* 8168evl does not automatically pad to minimum length. */
+       if (unlikely(tp->mac_version == RTL_GIGA_MAC_VER_34 &&
+                    skb->len < ETH_ZLEN)) {
+               if (skb_padto(skb, ETH_ZLEN))
+                       goto err_update_stats;
+               skb_put(skb, ETH_ZLEN - skb->len);
+       }
        if (unlikely(le32_to_cpu(txd->opts1) & DescOwn))
                goto err_stop_0;
  
@@@ -5967,6 -5866,7 +5975,7 @@@ err_dma_1
        rtl8169_unmap_tx_skb(d, tp->tx_skb + entry, txd);
  err_dma_0:
        dev_kfree_skb(skb);
+ err_update_stats:
        dev->stats.tx_dropped++;
        return NETDEV_TX_OK;
  
@@@ -6853,8 -6753,6 +6862,8 @@@ static void rtl_hw_initialize(struct rt
        switch (tp->mac_version) {
        case RTL_GIGA_MAC_VER_40:
        case RTL_GIGA_MAC_VER_41:
 +      case RTL_GIGA_MAC_VER_42:
 +      case RTL_GIGA_MAC_VER_43:
                rtl_hw_init_8168g(tp);
                break;
  
@@@ -7037,17 -6935,16 +7046,17 @@@ rtl_init_one(struct pci_dev *pdev, cons
        /* don't enable SG, IP_CSUM and TSO by default - it might not work
         * properly for all devices */
        dev->features |= NETIF_F_RXCSUM |
 -              NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
 +              NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
  
        dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
 -              NETIF_F_RXCSUM | NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
 +              NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_TX |
 +              NETIF_F_HW_VLAN_CTAG_RX;
        dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
                NETIF_F_HIGHDMA;
  
        if (tp->mac_version == RTL_GIGA_MAC_VER_05)
                /* 8110SCd requires hardware Rx VLAN - disallow toggling */
 -              dev->hw_features &= ~NETIF_F_HW_VLAN_RX;
 +              dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX;
  
        dev->hw_features |= NETIF_F_RXALL;
        dev->hw_features |= NETIF_F_RXFCS;
diff --combined drivers/net/tun.c
index 66109a2ad886270c0b726e51710d74fbf57aa8f1,dcd0c19a431e1dc7c982b38bd1deff0aa9b38280..f042b0373e5ddec6a8a85703843b89a43cbeb1f7
@@@ -409,12 -409,14 +409,12 @@@ static void __tun_detach(struct tun_fil
  {
        struct tun_file *ntfile;
        struct tun_struct *tun;
 -      struct net_device *dev;
  
        tun = rtnl_dereference(tfile->tun);
  
        if (tun && !tfile->detached) {
                u16 index = tfile->queue_index;
                BUG_ON(index >= tun->numqueues);
 -              dev = tun->dev;
  
                rcu_assign_pointer(tun->tfiles[index],
                                   tun->tfiles[tun->numqueues - 1]);
@@@ -1203,8 -1205,6 +1203,8 @@@ static ssize_t tun_get_user(struct tun_
        }
  
        skb_reset_network_header(skb);
 +      skb_probe_transport_header(skb, 0);
 +
        rxhash = skb_get_rxhash(skb);
        netif_rx_ni(skb);
  
@@@ -1471,14 -1471,17 +1471,17 @@@ static int tun_recvmsg(struct kiocb *io
        if (!tun)
                return -EBADFD;
  
-       if (flags & ~(MSG_DONTWAIT|MSG_TRUNC))
-               return -EINVAL;
+       if (flags & ~(MSG_DONTWAIT|MSG_TRUNC)) {
+               ret = -EINVAL;
+               goto out;
+       }
        ret = tun_do_read(tun, tfile, iocb, m->msg_iov, total_len,
                          flags & MSG_DONTWAIT);
        if (ret > total_len) {
                m->msg_flags |= MSG_TRUNC;
                ret = flags & MSG_TRUNC ? ret : total_len;
        }
+ out:
        tun_put(tun);
        return ret;
  }
@@@ -1593,8 -1596,12 +1596,12 @@@ static int tun_set_iff(struct net *net
                        return err;
  
                if (tun->flags & TUN_TAP_MQ &&
-                   (tun->numqueues + tun->numdisabled > 1))
-                       return -EBUSY;
+                   (tun->numqueues + tun->numdisabled > 1)) {
+                       /* One or more queue has already been attached, no need
+                        * to initialize the device again.
+                        */
+                       return 0;
+               }
        }
        else {
                char *name;
                dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST |
                        TUN_USER_FEATURES;
                dev->features = dev->hw_features;
 +              dev->vlan_features = dev->features;
  
                INIT_LIST_HEAD(&tun->disabled);
                err = tun_attach(tun, file);
diff --combined net/batman-adv/routing.c
index 2f1f88923df8ca0208222b559bcffbf23870603d,7de033667ebcfdba792b9f59c96d69378ed758be..b27a4d792d1537fe2f53a492b6f3b19992e5dbc0
@@@ -29,7 -29,6 +29,7 @@@
  #include "unicast.h"
  #include "bridge_loop_avoidance.h"
  #include "distributed-arp-table.h"
 +#include "network-coding.h"
  
  static int batadv_route_unicast_packet(struct sk_buff *skb,
                                       struct batadv_hard_iface *recv_if);
@@@ -549,17 -548,6 +549,17 @@@ batadv_find_ifalter_router(struct batad
        return router;
  }
  
 +/**
 + * batadv_check_unicast_packet - Check for malformed unicast packets
 + * @bat_priv: the bat priv with all the soft interface information
 + * @skb: packet to check
 + * @hdr_size: size of header to pull
 + *
 + * Check for short header and bad addresses in given packet. Returns negative
 + * value when check fails and 0 otherwise. The negative value depends on the
 + * reason: -ENODATA for bad header, -EBADR for broadcast destination or source,
 + * and -EREMOTE for non-local (other host) destination.
 + */
  static int batadv_check_unicast_packet(struct batadv_priv *bat_priv,
                                       struct sk_buff *skb, int hdr_size)
  {
  
        /* drop packet if it has not necessary minimum size */
        if (unlikely(!pskb_may_pull(skb, hdr_size)))
 -              return -1;
 +              return -ENODATA;
  
        ethhdr = (struct ethhdr *)skb_mac_header(skb);
  
        /* packet with unicast indication but broadcast recipient */
        if (is_broadcast_ether_addr(ethhdr->h_dest))
 -              return -1;
 +              return -EBADR;
  
        /* packet with broadcast sender address */
        if (is_broadcast_ether_addr(ethhdr->h_source))
 -              return -1;
 +              return -EBADR;
  
        /* not for me */
        if (!batadv_is_my_mac(bat_priv, ethhdr->h_dest))
 -              return -1;
 +              return -EREMOTE;
  
        return 0;
  }
@@@ -864,17 -852,14 +864,17 @@@ static int batadv_route_unicast_packet(
        /* decrement ttl */
        unicast_packet->header.ttl--;
  
 -      /* Update stats counter */
 -      batadv_inc_counter(bat_priv, BATADV_CNT_FORWARD);
 -      batadv_add_counter(bat_priv, BATADV_CNT_FORWARD_BYTES,
 -                         skb->len + ETH_HLEN);
 -
 -      /* route it */
 -      if (batadv_send_skb_to_orig(skb, orig_node, recv_if))
 +      /* network code packet if possible */
 +      if (batadv_nc_skb_forward(skb, neigh_node, ethhdr)) {
                ret = NET_RX_SUCCESS;
 +      } else if (batadv_send_skb_to_orig(skb, orig_node, recv_if)) {
 +              ret = NET_RX_SUCCESS;
 +
 +              /* Update stats counter */
 +              batadv_inc_counter(bat_priv, BATADV_CNT_FORWARD);
 +              batadv_add_counter(bat_priv, BATADV_CNT_FORWARD_BYTES,
 +                                 skb->len + ETH_HLEN);
 +      }
  
  out:
        if (neigh_node)
@@@ -939,7 -924,7 +939,7 @@@ out
  }
  
  static int batadv_check_unicast_ttvn(struct batadv_priv *bat_priv,
-                                    struct sk_buff *skb) {
+                                    struct sk_buff *skb, int hdr_len) {
        uint8_t curr_ttvn, old_ttvn;
        struct batadv_orig_node *orig_node;
        struct ethhdr *ethhdr;
        int is_old_ttvn;
  
        /* check if there is enough data before accessing it */
-       if (pskb_may_pull(skb, sizeof(*unicast_packet) + ETH_HLEN) < 0)
+       if (pskb_may_pull(skb, hdr_len + ETH_HLEN) < 0)
                return 0;
  
        /* create a copy of the skb (in case of for re-routing) to modify it. */
                return 0;
  
        unicast_packet = (struct batadv_unicast_packet *)skb->data;
-       ethhdr = (struct ethhdr *)(skb->data + sizeof(*unicast_packet));
+       ethhdr = (struct ethhdr *)(skb->data + hdr_len);
  
        /* check if the destination client was served by this node and it is now
         * roaming. In this case, it means that the node has got a ROAM_ADV
@@@ -1050,7 -1035,7 +1050,7 @@@ int batadv_recv_unicast_packet(struct s
        struct batadv_unicast_4addr_packet *unicast_4addr_packet;
        uint8_t *orig_addr;
        struct batadv_orig_node *orig_node = NULL;
 -      int hdr_size = sizeof(*unicast_packet);
 +      int check, hdr_size = sizeof(*unicast_packet);
        bool is4addr;
  
        unicast_packet = (struct batadv_unicast_packet *)skb->data;
        if (is4addr)
                hdr_size = sizeof(*unicast_4addr_packet);
  
 -      if (batadv_check_unicast_packet(bat_priv, skb, hdr_size) < 0)
 +      /* function returns -EREMOTE for promiscuous packets */
 +      check = batadv_check_unicast_packet(bat_priv, skb, hdr_size);
 +
 +      /* Even though the packet is not for us, we might save it to use for
 +       * decoding a later received coded packet
 +       */
 +      if (check == -EREMOTE)
 +              batadv_nc_skb_store_sniffed_unicast(bat_priv, skb);
 +
 +      if (check < 0)
                return NET_RX_DROP;
-       if (!batadv_check_unicast_ttvn(bat_priv, skb))
+       if (!batadv_check_unicast_ttvn(bat_priv, skb, hdr_size))
                return NET_RX_DROP;
  
        /* packet for me */
@@@ -1117,7 -1092,7 +1116,7 @@@ int batadv_recv_ucast_frag_packet(struc
        if (batadv_check_unicast_packet(bat_priv, skb, hdr_size) < 0)
                return NET_RX_DROP;
  
-       if (!batadv_check_unicast_ttvn(bat_priv, skb))
+       if (!batadv_check_unicast_ttvn(bat_priv, skb, hdr_size))
                return NET_RX_DROP;
  
        unicast_packet = (struct batadv_unicast_frag_packet *)skb->data;
diff --combined net/core/datagram.c
index ebba65d7e0da58b077bfe85a7c83f3805d52a55a,b5d48ac2a9c1f2e8be7dcb503799f5143b3c8734..b71423db77851eed9bd91f06a6ac0630f967a3e5
@@@ -78,9 -78,10 +78,10 @@@ static int receiver_wake_function(wait_
        return autoremove_wake_function(wait, mode, sync, key);
  }
  /*
-  * Wait for a packet..
+  * Wait for the last received packet to be different from skb
   */
- static int wait_for_packet(struct sock *sk, int *err, long *timeo_p)
+ static int wait_for_more_packets(struct sock *sk, int *err, long *timeo_p,
+                                const struct sk_buff *skb)
  {
        int error;
        DEFINE_WAIT_FUNC(wait, receiver_wake_function);
@@@ -92,7 -93,7 +93,7 @@@
        if (error)
                goto out_err;
  
-       if (!skb_queue_empty(&sk->sk_receive_queue))
+       if (sk->sk_receive_queue.prev != skb)
                goto out;
  
        /* Socket shut down? */
@@@ -131,9 -132,9 +132,9 @@@ out_noerr
   *    __skb_recv_datagram - Receive a datagram skbuff
   *    @sk: socket
   *    @flags: MSG_ flags
+  *    @peeked: returns non-zero if this packet has been seen before
   *    @off: an offset in bytes to peek skb from. Returns an offset
   *          within an skb where data actually starts
-  *    @peeked: returns non-zero if this packet has been seen before
   *    @err: error code returned
   *
   *    Get a datagram skbuff, understands the peeking, nonblocking wakeups
  struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
                                    int *peeked, int *off, int *err)
  {
-       struct sk_buff *skb;
+       struct sk_buff *skb, *last;
        long timeo;
        /*
         * Caller is allowed not to check sk->sk_err before skb_recv_datagram()
                 */
                unsigned long cpu_flags;
                struct sk_buff_head *queue = &sk->sk_receive_queue;
+               int _off = *off;
  
+               last = (struct sk_buff *)queue;
                spin_lock_irqsave(&queue->lock, cpu_flags);
                skb_queue_walk(queue, skb) {
+                       last = skb;
                        *peeked = skb->peeked;
                        if (flags & MSG_PEEK) {
-                               if (*off >= skb->len && skb->len) {
-                                       *off -= skb->len;
+                               if (_off >= skb->len && (skb->len || _off ||
+                                                        skb->peeked)) {
+                                       _off -= skb->len;
                                        continue;
                                }
                                skb->peeked = 1;
                                __skb_unlink(skb, queue);
  
                        spin_unlock_irqrestore(&queue->lock, cpu_flags);
+                       *off = _off;
                        return skb;
                }
                spin_unlock_irqrestore(&queue->lock, cpu_flags);
                if (!timeo)
                        goto no_packet;
  
-       } while (!wait_for_packet(sk, err, &timeo));
+       } while (!wait_for_more_packets(sk, err, &timeo, last));
  
        return NULL;
  
@@@ -749,9 -755,7 +755,9 @@@ unsigned int datagram_poll(struct file 
  
        /* exceptional events? */
        if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
 -              mask |= POLLERR;
 +              mask |= POLLERR |
 +                      (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0);
 +
        if (sk->sk_shutdown & RCV_SHUTDOWN)
                mask |= POLLRDHUP | POLLIN | POLLRDNORM;
        if (sk->sk_shutdown == SHUTDOWN_MASK)
diff --combined net/ipv4/tcp_ipv4.c
index 8ea9751645968d075e3808b22c86c02c016d0094,d09203c63264d2d8c11f7b40d42200f82934213b..d979657b8a122fe869c807f8d3e796651f668781
@@@ -838,6 -838,7 +838,6 @@@ static void tcp_v4_reqsk_send_ack(struc
   */
  static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
                              struct request_sock *req,
 -                            struct request_values *rvp,
                              u16 queue_mapping,
                              bool nocache)
  {
        if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
                return -1;
  
 -      skb = tcp_make_synack(sk, dst, req, rvp, NULL);
 +      skb = tcp_make_synack(sk, dst, req, NULL);
  
        if (skb) {
                __tcp_v4_send_check(skb, ireq->loc_addr, ireq->rmt_addr);
        return err;
  }
  
 -static int tcp_v4_rtx_synack(struct sock *sk, struct request_sock *req,
 -                           struct request_values *rvp)
 +static int tcp_v4_rtx_synack(struct sock *sk, struct request_sock *req)
  {
 -      int res = tcp_v4_send_synack(sk, NULL, req, rvp, 0, false);
 +      int res = tcp_v4_send_synack(sk, NULL, req, 0, false);
  
        if (!res)
                TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
@@@ -1369,7 -1371,8 +1369,7 @@@ static bool tcp_fastopen_check(struct s
  static int tcp_v4_conn_req_fastopen(struct sock *sk,
                                    struct sk_buff *skb,
                                    struct sk_buff *skb_synack,
 -                                  struct request_sock *req,
 -                                  struct request_values *rvp)
 +                                  struct request_sock *req)
  {
        struct tcp_sock *tp = tcp_sk(sk);
        struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
  
  int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
  {
 -      struct tcp_extend_values tmp_ext;
        struct tcp_options_received tmp_opt;
 -      const u8 *hash_location;
        struct request_sock *req;
        struct inet_request_sock *ireq;
        struct tcp_sock *tp = tcp_sk(sk);
        tcp_clear_options(&tmp_opt);
        tmp_opt.mss_clamp = TCP_MSS_DEFAULT;
        tmp_opt.user_mss  = tp->rx_opt.user_mss;
 -      tcp_parse_options(skb, &tmp_opt, &hash_location, 0,
 -          want_cookie ? NULL : &foc);
 -
 -      if (tmp_opt.cookie_plus > 0 &&
 -          tmp_opt.saw_tstamp &&
 -          !tp->rx_opt.cookie_out_never &&
 -          (sysctl_tcp_cookie_size > 0 ||
 -           (tp->cookie_values != NULL &&
 -            tp->cookie_values->cookie_desired > 0))) {
 -              u8 *c;
 -              u32 *mess = &tmp_ext.cookie_bakery[COOKIE_DIGEST_WORDS];
 -              int l = tmp_opt.cookie_plus - TCPOLEN_COOKIE_BASE;
 -
 -              if (tcp_cookie_generator(&tmp_ext.cookie_bakery[0]) != 0)
 -                      goto drop_and_release;
 -
 -              /* Secret recipe starts with IP addresses */
 -              *mess++ ^= (__force u32)daddr;
 -              *mess++ ^= (__force u32)saddr;
 -
 -              /* plus variable length Initiator Cookie */
 -              c = (u8 *)mess;
 -              while (l-- > 0)
 -                      *c++ ^= *hash_location++;
 -
 -              want_cookie = false;    /* not our kind of cookie */
 -              tmp_ext.cookie_out_never = 0; /* false */
 -              tmp_ext.cookie_plus = tmp_opt.cookie_plus;
 -      } else if (!tp->rx_opt.cookie_in_always) {
 -              /* redundant indications, but ensure initialization. */
 -              tmp_ext.cookie_out_never = 1; /* true */
 -              tmp_ext.cookie_plus = 0;
 -      } else {
 -              goto drop_and_release;
 -      }
 -      tmp_ext.cookie_in_always = tp->rx_opt.cookie_in_always;
 +      tcp_parse_options(skb, &tmp_opt, 0, want_cookie ? NULL : &foc);
  
        if (want_cookie && !tmp_opt.saw_tstamp)
                tcp_clear_options(&tmp_opt);
         * of tcp_v4_send_synack()->tcp_select_initial_window().
         */
        skb_synack = tcp_make_synack(sk, dst, req,
 -          (struct request_values *)&tmp_ext,
            fastopen_cookie_present(&valid_foc) ? &valid_foc : NULL);
  
        if (skb_synack) {
                if (fastopen_cookie_present(&foc) && foc.len != 0)
                        NET_INC_STATS_BH(sock_net(sk),
                            LINUX_MIB_TCPFASTOPENPASSIVEFAIL);
 -      } else if (tcp_v4_conn_req_fastopen(sk, skb, skb_synack, req,
 -          (struct request_values *)&tmp_ext))
 +      } else if (tcp_v4_conn_req_fastopen(sk, skb, skb_synack, req))
                goto drop_and_free;
  
        return 0;
@@@ -1866,7 -1908,6 +1866,7 @@@ discard
        return 0;
  
  csum_err:
 +      TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_CSUMERRORS);
        TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
        goto discard;
  }
@@@ -1909,50 -1950,6 +1909,51 @@@ void tcp_v4_early_demux(struct sk_buff 
        }
  }
  
 +/* Packet is added to VJ-style prequeue for processing in process
 + * context, if a reader task is waiting. Apparently, this exciting
 + * idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93)
 + * failed somewhere. Latency? Burstiness? Well, at least now we will
 + * see, why it failed. 8)8)                             --ANK
 + *
 + */
 +bool tcp_prequeue(struct sock *sk, struct sk_buff *skb)
 +{
 +      struct tcp_sock *tp = tcp_sk(sk);
 +
 +      if (sysctl_tcp_low_latency || !tp->ucopy.task)
 +              return false;
 +
 +      if (skb->len <= tcp_hdrlen(skb) &&
 +          skb_queue_len(&tp->ucopy.prequeue) == 0)
 +              return false;
 +
++      skb_dst_force(skb);
 +      __skb_queue_tail(&tp->ucopy.prequeue, skb);
 +      tp->ucopy.memory += skb->truesize;
 +      if (tp->ucopy.memory > sk->sk_rcvbuf) {
 +              struct sk_buff *skb1;
 +
 +              BUG_ON(sock_owned_by_user(sk));
 +
 +              while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {
 +                      sk_backlog_rcv(sk, skb1);
 +                      NET_INC_STATS_BH(sock_net(sk),
 +                                       LINUX_MIB_TCPPREQUEUEDROPPED);
 +              }
 +
 +              tp->ucopy.memory = 0;
 +      } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
 +              wake_up_interruptible_sync_poll(sk_sleep(sk),
 +                                         POLLIN | POLLRDNORM | POLLRDBAND);
 +              if (!inet_csk_ack_scheduled(sk))
 +                      inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
 +                                                (3 * tcp_rto_min(sk)) / 4,
 +                                                TCP_RTO_MAX);
 +      }
 +      return true;
 +}
 +EXPORT_SYMBOL(tcp_prequeue);
 +
  /*
   *    From tcp_input.c
   */
@@@ -1986,7 -1983,7 +1987,7 @@@ int tcp_v4_rcv(struct sk_buff *skb
         * provided case of th->doff==0 is eliminated.
         * So, we defer the checks. */
        if (!skb_csum_unnecessary(skb) && tcp_v4_checksum_init(skb))
 -              goto bad_packet;
 +              goto csum_error;
  
        th = tcp_hdr(skb);
        iph = ip_hdr(skb);
@@@ -2052,8 -2049,6 +2053,8 @@@ no_tcp_socket
                goto discard_it;
  
        if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
 +csum_error:
 +              TCP_INC_STATS_BH(net, TCP_MIB_CSUMERRORS);
  bad_packet:
                TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
        } else {
@@@ -2075,13 -2070,10 +2076,13 @@@ do_time_wait
                goto discard_it;
        }
  
 -      if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
 -              TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
 +      if (skb->len < (th->doff << 2)) {
                inet_twsk_put(inet_twsk(sk));
 -              goto discard_it;
 +              goto bad_packet;
 +      }
 +      if (tcp_checksum_complete(skb)) {
 +              inet_twsk_put(inet_twsk(sk));
 +              goto csum_error;
        }
        switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
        case TCP_TW_SYN: {
@@@ -2205,6 -2197,12 +2206,6 @@@ void tcp_v4_destroy_sock(struct sock *s
        if (inet_csk(sk)->icsk_bind_hash)
                inet_put_port(sk);
  
 -      /* TCP Cookie Transactions */
 -      if (tp->cookie_values != NULL) {
 -              kref_put(&tp->cookie_values->kref,
 -                       tcp_cookie_values_release);
 -              tp->cookie_values = NULL;
 -      }
        BUG_ON(tp->fastopen_rsk != NULL);
  
        /* If socket is aborted during connect operation */
@@@ -2661,9 -2659,7 +2662,9 @@@ static void get_tcp4_sock(struct sock *
        __u16 srcp = ntohs(inet->inet_sport);
        int rx_queue;
  
 -      if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
 +      if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
 +          icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
 +          icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
                timer_active    = 1;
                timer_expires   = icsk->icsk_timeout;
        } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
index 5c9e021994ba89243dd3356dd0e454659d3f85a5,703c1210d2230760f00629cd7302d2909713f235..d48422e271109a47d2e7139cf33e2bc727c38f3d
@@@ -88,7 -88,9 +88,7 @@@ struct mac802154_sub_if_data 
  
  #define mac802154_to_priv(_hw)        container_of(_hw, struct mac802154_priv, hw)
  
- #define MAC802154_CHAN_NONE           (~(u8)0) /* No channel is assigned */
 -#define MAC802154_MAX_XMIT_ATTEMPTS   3
 -
+ #define MAC802154_CHAN_NONE           0xff /* No channel is assigned */
  
  extern struct ieee802154_reduced_mlme_ops mac802154_mlme_reduced;
  extern struct ieee802154_mlme_ops mac802154_mlme_wpan;
@@@ -112,6 -114,5 +112,6 @@@ void mac802154_dev_set_ieee_addr(struc
  u16 mac802154_dev_get_pan_id(const struct net_device *dev);
  void mac802154_dev_set_pan_id(struct net_device *dev, u16 val);
  void mac802154_dev_set_page_channel(struct net_device *dev, u8 page, u8 chan);
 +u8 mac802154_dev_get_dsn(const struct net_device *dev);
  
  #endif /* MAC802154_H */
index 9a8f4213e8a69af3c05176c203f1bbde62434b2c,e5920fb7ad01ac0d0927f6d7e1daffe85e597856..9ef22bdce9f192344f524c5337b60878dab8e480
@@@ -13,8 -13,7 +13,8 @@@ static const char *ip_vs_dbg_callid(cha
                                    const char *callid, size_t callid_len,
                                    int *idx)
  {
 -      size_t len = min(min(callid_len, (size_t)64), buf_len - *idx - 1);
 +      size_t max_len = 64;
 +      size_t len = min3(max_len, callid_len, buf_len - *idx - 1);
        memcpy(buf + *idx, callid, len);
        buf[*idx+len] = '\0';
        *idx += len + 1;
@@@ -38,14 -37,10 +38,10 @@@ static int get_callid(const char *dptr
                if (ret > 0)
                        break;
                if (!ret)
-                       return 0;
+                       return -EINVAL;
                dataoff += *matchoff;
        }
  
-       /* Empty callid is useless */
-       if (!*matchlen)
-               return -EINVAL;
        /* Too large is useless */
        if (*matchlen > IP_VS_PEDATA_MAXLEN)
                return -EINVAL;
@@@ -173,7 -168,6 +169,7 @@@ static int __init ip_vs_sip_init(void
  static void __exit ip_vs_sip_cleanup(void)
  {
        unregister_ip_vs_pe(&ip_vs_sip_pe);
 +      synchronize_rcu();
  }
  
  module_init(ip_vs_sip_init);
diff --combined net/nfc/llcp_sock.c
index 38f08c31cdd878dea45df5dd8d01d2c21d90911f,e16315719eafe731366a6dfd3e6b6f2aee7a22ed..380253eccb74a87c3ca01f98daacd79c0a9deeea
@@@ -24,7 -24,7 +24,7 @@@
  #include <linux/module.h>
  #include <linux/nfc.h>
  
 -#include "../nfc.h"
 +#include "nfc.h"
  #include "llcp.h"
  
  static int sock_wait_state(struct sock *sk, int state, unsigned long timeo)
@@@ -223,156 -223,6 +223,156 @@@ error
        return ret;
  }
  
 +static int nfc_llcp_setsockopt(struct socket *sock, int level, int optname,
 +                             char __user *optval, unsigned int optlen)
 +{
 +      struct sock *sk = sock->sk;
 +      struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk);
 +      u32 opt;
 +      int err = 0;
 +
 +      pr_debug("%p optname %d\n", sk, optname);
 +
 +      if (level != SOL_NFC)
 +              return -ENOPROTOOPT;
 +
 +      lock_sock(sk);
 +
 +      switch (optname) {
 +      case NFC_LLCP_RW:
 +              if (sk->sk_state == LLCP_CONNECTED ||
 +                  sk->sk_state == LLCP_BOUND ||
 +                  sk->sk_state == LLCP_LISTEN) {
 +                      err = -EINVAL;
 +                      break;
 +              }
 +
 +              if (get_user(opt, (u32 __user *) optval)) {
 +                      err = -EFAULT;
 +                      break;
 +              }
 +
 +              if (opt > LLCP_MAX_RW) {
 +                      err = -EINVAL;
 +                      break;
 +              }
 +
 +              llcp_sock->rw = (u8) opt;
 +
 +              break;
 +
 +      case NFC_LLCP_MIUX:
 +              if (sk->sk_state == LLCP_CONNECTED ||
 +                  sk->sk_state == LLCP_BOUND ||
 +                  sk->sk_state == LLCP_LISTEN) {
 +                      err = -EINVAL;
 +                      break;
 +              }
 +
 +              if (get_user(opt, (u32 __user *) optval)) {
 +                      err = -EFAULT;
 +                      break;
 +              }
 +
 +              if (opt > LLCP_MAX_MIUX) {
 +                      err = -EINVAL;
 +                      break;
 +              }
 +
 +              llcp_sock->miux = cpu_to_be16((u16) opt);
 +
 +              break;
 +
 +      default:
 +              err = -ENOPROTOOPT;
 +              break;
 +      }
 +
 +      release_sock(sk);
 +
 +      pr_debug("%p rw %d miux %d\n", llcp_sock,
 +               llcp_sock->rw, llcp_sock->miux);
 +
 +      return err;
 +}
 +
 +static int nfc_llcp_getsockopt(struct socket *sock, int level, int optname,
 +                             char __user *optval, int __user *optlen)
 +{
 +      struct nfc_llcp_local *local;
 +      struct sock *sk = sock->sk;
 +      struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk);
 +      int len, err = 0;
 +      u16 miux, remote_miu;
 +      u8 rw;
 +
 +      pr_debug("%p optname %d\n", sk, optname);
 +
 +      if (level != SOL_NFC)
 +              return -ENOPROTOOPT;
 +
 +      if (get_user(len, optlen))
 +              return -EFAULT;
 +
 +      local = llcp_sock->local;
 +      if (!local)
 +              return -ENODEV;
 +
 +      len = min_t(u32, len, sizeof(u32));
 +
 +      lock_sock(sk);
 +
 +      switch (optname) {
 +      case NFC_LLCP_RW:
 +              rw = llcp_sock->rw > LLCP_MAX_RW ? local->rw : llcp_sock->rw;
 +              if (put_user(rw, (u32 __user *) optval))
 +                      err = -EFAULT;
 +
 +              break;
 +
 +      case NFC_LLCP_MIUX:
 +              miux = be16_to_cpu(llcp_sock->miux) > LLCP_MAX_MIUX ?
 +                      be16_to_cpu(local->miux) : be16_to_cpu(llcp_sock->miux);
 +
 +              if (put_user(miux, (u32 __user *) optval))
 +                      err = -EFAULT;
 +
 +              break;
 +
 +      case NFC_LLCP_REMOTE_MIU:
 +              remote_miu = llcp_sock->remote_miu > LLCP_MAX_MIU ?
 +                              local->remote_miu : llcp_sock->remote_miu;
 +
 +              if (put_user(remote_miu, (u32 __user *) optval))
 +                      err = -EFAULT;
 +
 +              break;
 +
 +      case NFC_LLCP_REMOTE_LTO:
 +              if (put_user(local->remote_lto / 10, (u32 __user *) optval))
 +                      err = -EFAULT;
 +
 +              break;
 +
 +      case NFC_LLCP_REMOTE_RW:
 +              if (put_user(llcp_sock->remote_rw, (u32 __user *) optval))
 +                      err = -EFAULT;
 +
 +              break;
 +
 +      default:
 +              err = -ENOPROTOOPT;
 +              break;
 +      }
 +
 +      release_sock(sk);
 +
 +      if (put_user(len, optlen))
 +              return -EFAULT;
 +
 +      return err;
 +}
 +
  void nfc_llcp_accept_unlink(struct sock *sk)
  {
        struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk);
@@@ -508,12 -358,13 +508,13 @@@ static int llcp_sock_getname(struct soc
        pr_debug("%p %d %d %d\n", sk, llcp_sock->target_idx,
                 llcp_sock->dsap, llcp_sock->ssap);
  
-       uaddr->sa_family = AF_NFC;
+       memset(llcp_addr, 0, sizeof(*llcp_addr));
        *len = sizeof(struct sockaddr_nfc_llcp);
  
+       llcp_addr->sa_family = AF_NFC;
        llcp_addr->dev_idx = llcp_sock->dev->idx;
        llcp_addr->target_idx = llcp_sock->target_idx;
+       llcp_addr->nfc_protocol = llcp_sock->nfc_protocol;
        llcp_addr->dsap = llcp_sock->dsap;
        llcp_addr->ssap = llcp_sock->ssap;
        llcp_addr->service_name_len = llcp_sock->service_name_len;
@@@ -555,8 -406,7 +556,8 @@@ static unsigned int llcp_sock_poll(stru
                return llcp_accept_poll(sk);
  
        if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
 -              mask |= POLLERR;
 +              mask |= POLLERR |
 +                      (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0);
  
        if (!skb_queue_empty(&sk->sk_receive_queue))
                mask |= POLLIN | POLLRDNORM;
@@@ -694,7 -544,7 +695,7 @@@ static int llcp_sock_connect(struct soc
  
        llcp_sock->dev = dev;
        llcp_sock->local = nfc_llcp_local_get(local);
 -      llcp_sock->miu = llcp_sock->local->remote_miu;
 +      llcp_sock->remote_miu = llcp_sock->local->remote_miu;
        llcp_sock->ssap = nfc_llcp_get_local_ssap(local);
        if (llcp_sock->ssap == LLCP_SAP_MAX) {
                ret = -ENOMEM;
@@@ -891,8 -741,8 +892,8 @@@ static const struct proto_ops llcp_sock
        .ioctl          = sock_no_ioctl,
        .listen         = llcp_sock_listen,
        .shutdown       = sock_no_shutdown,
 -      .setsockopt     = sock_no_setsockopt,
 -      .getsockopt     = sock_no_getsockopt,
 +      .setsockopt     = nfc_llcp_setsockopt,
 +      .getsockopt     = nfc_llcp_getsockopt,
        .sendmsg        = llcp_sock_sendmsg,
        .recvmsg        = llcp_sock_recvmsg,
        .mmap           = sock_no_mmap,
@@@ -956,13 -806,12 +957,13 @@@ struct sock *nfc_llcp_sock_alloc(struc
  
        llcp_sock->ssap = 0;
        llcp_sock->dsap = LLCP_SAP_SDP;
 -      llcp_sock->rw = LLCP_DEFAULT_RW;
 -      llcp_sock->miu = LLCP_DEFAULT_MIU;
 +      llcp_sock->rw = LLCP_MAX_RW + 1;
 +      llcp_sock->miux = cpu_to_be16(LLCP_MAX_MIUX + 1);
        llcp_sock->send_n = llcp_sock->send_ack_n = 0;
        llcp_sock->recv_n = llcp_sock->recv_ack_n = 0;
        llcp_sock->remote_ready = 1;
        llcp_sock->reserved_ssap = LLCP_SAP_MAX;
 +      nfc_llcp_socket_remote_param_init(llcp_sock);
        skb_queue_head_init(&llcp_sock->tx_queue);
        skb_queue_head_init(&llcp_sock->tx_pending_queue);
        INIT_LIST_HEAD(&llcp_sock->accept_queue);
diff --combined net/unix/af_unix.c
index 9efe01113c5c5bc898dd6b26a4e0ced9733e1c01,1a02af0e3049eb42f9c66e2e2b70ddf26d1e5d70..826e09938bff4203def6e5af33db49c4360f3019
@@@ -1340,6 -1340,7 +1340,6 @@@ static void unix_destruct_scm(struct sk
        struct scm_cookie scm;
        memset(&scm, 0, sizeof(scm));
        scm.pid  = UNIXCB(skb).pid;
 -      scm.cred = UNIXCB(skb).cred;
        if (UNIXCB(skb).fp)
                unix_detach_fds(&scm, skb);
  
@@@ -1390,8 -1391,8 +1390,8 @@@ static int unix_scm_to_skb(struct scm_c
        int err = 0;
  
        UNIXCB(skb).pid  = get_pid(scm->pid);
 -      if (scm->cred)
 -              UNIXCB(skb).cred = get_cred(scm->cred);
 +      UNIXCB(skb).uid = scm->creds.uid;
 +      UNIXCB(skb).gid = scm->creds.gid;
        UNIXCB(skb).fp = NULL;
        if (scm->fp && send_fds)
                err = unix_attach_fds(scm, skb);
  static void maybe_add_creds(struct sk_buff *skb, const struct socket *sock,
                            const struct sock *other)
  {
 -      if (UNIXCB(skb).cred)
 +      if (UNIXCB(skb).pid)
                return;
        if (test_bit(SOCK_PASSCRED, &sock->flags) ||
            !other->sk_socket ||
            test_bit(SOCK_PASSCRED, &other->sk_socket->flags)) {
                UNIXCB(skb).pid  = get_pid(task_tgid(current));
 -              UNIXCB(skb).cred = get_current_cred();
 +              current_uid_gid(&UNIXCB(skb).uid, &UNIXCB(skb).gid);
        }
  }
  
@@@ -1818,7 -1819,7 +1818,7 @@@ static int unix_dgram_recvmsg(struct ki
                siocb->scm = &tmp_scm;
                memset(&tmp_scm, 0, sizeof(tmp_scm));
        }
 -      scm_set_cred(siocb->scm, UNIXCB(skb).pid, UNIXCB(skb).cred);
 +      scm_set_cred(siocb->scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid);
        unix_set_secdata(siocb->scm, skb);
  
        if (!(flags & MSG_PEEK)) {
  }
  
  /*
-  *    Sleep until data has arrive. But check for races..
+  *    Sleep until more data has arrived. But check for races..
   */
static long unix_stream_data_wait(struct sock *sk, long timeo)
+ static long unix_stream_data_wait(struct sock *sk, long timeo,
                                struct sk_buff *last)
  {
        DEFINE_WAIT(wait);
  
        for (;;) {
                prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
  
-               if (!skb_queue_empty(&sk->sk_receive_queue) ||
+               if (skb_peek_tail(&sk->sk_receive_queue) != last ||
                    sk->sk_err ||
                    (sk->sk_shutdown & RCV_SHUTDOWN) ||
                    signal_pending(current) ||
        return timeo;
  }
  
  static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
                               struct msghdr *msg, size_t size,
                               int flags)
                goto out;
        }
  
-       skip = sk_peek_offset(sk, flags);
        do {
                int chunk;
-               struct sk_buff *skb;
+               struct sk_buff *skb, *last;
  
                unix_state_lock(sk);
-               skb = skb_peek(&sk->sk_receive_queue);
+               last = skb = skb_peek(&sk->sk_receive_queue);
  again:
                if (skb == NULL) {
                        unix_sk(sk)->recursion_level = 0;
                                break;
                        mutex_unlock(&u->readlock);
  
-                       timeo = unix_stream_data_wait(sk, timeo);
+                       timeo = unix_stream_data_wait(sk, timeo, last);
  
                        if (signal_pending(current)
                            ||  mutex_lock_interruptible(&u->readlock)) {
                        break;
                }
  
-               if (skip >= skb->len) {
+               skip = sk_peek_offset(sk, flags);
+               while (skip >= skb->len) {
                        skip -= skb->len;
+                       last = skb;
                        skb = skb_peek_next(skb, &sk->sk_receive_queue);
-                       goto again;
+                       if (!skb)
+                               goto again;
                }
  
                unix_state_unlock(sk);
                if (check_creds) {
                        /* Never glue messages from different writers */
                        if ((UNIXCB(skb).pid  != siocb->scm->pid) ||
 -                          (UNIXCB(skb).cred != siocb->scm->cred))
 +                          !uid_eq(UNIXCB(skb).uid, siocb->scm->creds.uid) ||
 +                          !gid_eq(UNIXCB(skb).gid, siocb->scm->creds.gid))
                                break;
                } else if (test_bit(SOCK_PASSCRED, &sock->flags)) {
                        /* Copy credentials */
 -                      scm_set_cred(siocb->scm, UNIXCB(skb).pid, UNIXCB(skb).cred);
 +                      scm_set_cred(siocb->scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid);
                        check_creds = 1;
                }
  
@@@ -2196,9 -2195,7 +2195,9 @@@ static unsigned int unix_dgram_poll(str
  
        /* exceptional events? */
        if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
 -              mask |= POLLERR;
 +              mask |= POLLERR |
 +                      (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0);
 +
        if (sk->sk_shutdown & RCV_SHUTDOWN)
                mask |= POLLRDHUP | POLLIN | POLLRDNORM;
        if (sk->sk_shutdown == SHUTDOWN_MASK)