]> Pileus Git - ~andy/linux/blobdiff - drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
bridge: Add vlan support to static neighbors
[~andy/linux] / drivers / net / ethernet / intel / ixgbe / ixgbe_main.c
index 20a5af6d87d0e8747027964ba01a03bd7e3ca24c..1c0efcb7920f18053129c67b9a27998616841401 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2012 Intel Corporation.
+  Copyright(c) 1999 - 2013 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -66,7 +66,7 @@ static char ixgbe_default_device_descr[] =
 #define DRV_VERSION "3.11.33-k"
 const char ixgbe_driver_version[] = DRV_VERSION;
 static const char ixgbe_copyright[] =
-                               "Copyright (c) 1999-2012 Intel Corporation.";
+                               "Copyright (c) 1999-2013 Intel Corporation.";
 
 static const struct ixgbe_info *ixgbe_info_tbl[] = {
        [board_82598] = &ixgbe_82598_info,
@@ -803,6 +803,7 @@ static void ixgbe_tx_timeout_reset(struct ixgbe_adapter *adapter)
        /* Do the reset outside of interrupt context */
        if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
                adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED;
+               e_warn(drv, "initiating reset due to tx timeout\n");
                ixgbe_service_event_schedule(adapter);
        }
 }
@@ -837,7 +838,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
                        break;
 
                /* prevent any other reads prior to eop_desc */
-               rmb();
+               read_barrier_depends();
 
                /* if DD is not set pending work has not been completed */
                if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
@@ -850,9 +851,6 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
                total_bytes += tx_buffer->bytecount;
                total_packets += tx_buffer->gso_segs;
 
-               if (unlikely(tx_buffer->tx_flags & IXGBE_TX_FLAGS_TSTAMP))
-                       ixgbe_ptp_tx_hwtstamp(q_vector, tx_buffer->skb);
-
                /* free the skb */
                dev_kfree_skb_any(tx_buffer->skb);
 
@@ -1401,6 +1399,7 @@ static void ixgbe_set_rsc_gso_size(struct ixgbe_ring *ring,
        /* set gso_size to avoid messing up TCP MSS */
        skb_shinfo(skb)->gso_size = DIV_ROUND_UP((skb->len - hdr_len),
                                                 IXGBE_CB(skb)->append_cnt);
+       skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
 }
 
 static void ixgbe_update_rsc_stats(struct ixgbe_ring *rx_ring,
@@ -1441,7 +1440,7 @@ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
 
        ixgbe_rx_checksum(rx_ring, rx_desc, skb);
 
-       ixgbe_ptp_rx_hwtstamp(rx_ring->q_vector, rx_desc, skb);
+       ixgbe_ptp_rx_hwtstamp(rx_ring, rx_desc, skb);
 
        if ((dev->features & NETIF_F_HW_VLAN_RX) &&
            ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) {
@@ -2180,10 +2179,10 @@ static void ixgbe_check_overtemp_subtask(struct ixgbe_adapter *adapter)
                        return;
 
                if (!(eicr & IXGBE_EICR_LSC) && hw->mac.ops.check_link) {
-                       u32 autoneg;
+                       u32 speed;
                        bool link_up = false;
 
-                       hw->mac.ops.check_link(hw, &autoneg, &link_up, false);
+                       hw->mac.ops.check_link(hw, &speed, &link_up, false);
 
                        if (link_up)
                                return;
@@ -3996,25 +3995,25 @@ static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter)
  **/
 static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw)
 {
-       u32 autoneg;
-       bool negotiation, link_up = false;
+       u32 speed;
+       bool autoneg, link_up = false;
        u32 ret = IXGBE_ERR_LINK_SETUP;
 
        if (hw->mac.ops.check_link)
-               ret = hw->mac.ops.check_link(hw, &autoneg, &link_up, false);
+               ret = hw->mac.ops.check_link(hw, &speed, &link_up, false);
 
        if (ret)
                goto link_cfg_out;
 
-       autoneg = hw->phy.autoneg_advertised;
-       if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
-               ret = hw->mac.ops.get_link_capabilities(hw, &autoneg,
-                                                       &negotiation);
+       speed = hw->phy.autoneg_advertised;
+       if ((!speed) && (hw->mac.ops.get_link_capabilities))
+               ret = hw->mac.ops.get_link_capabilities(hw, &speed,
+                                                       &autoneg);
        if (ret)
                goto link_cfg_out;
 
        if (hw->mac.ops.setup_link)
-               ret = hw->mac.ops.setup_link(hw, autoneg, negotiation, link_up);
+               ret = hw->mac.ops.setup_link(hw, speed, link_up);
 link_cfg_out:
        return ret;
 }
@@ -4871,7 +4870,7 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
         */
        if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) &&
            (adapter->hw.mac.type == ixgbe_mac_82599EB) &&
-           (max_frame > MAXIMUM_ETHERNET_VLAN_SIZE))
+           (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)))
                e_warn(probe, "Setting MTU > 1500 will disable legacy VFs\n");
 
        e_info(probe, "changing MTU from %d to %d\n", netdev->mtu, new_mtu);
@@ -5534,6 +5533,8 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
                break;
        }
 
+       adapter->last_rx_ptp_check = jiffies;
+
        if (adapter->flags2 & IXGBE_FLAG2_PTP_ENABLED)
                ixgbe_ptp_start_cyclecounter(adapter);
 
@@ -5614,6 +5615,7 @@ static void ixgbe_watchdog_flush_tx(struct ixgbe_adapter *adapter)
                         * to get done, so reset controller to flush Tx.
                         * (Do the reset outside of interrupt context).
                         */
+                       e_warn(drv, "initiating reset to clear Tx work after link loss\n");
                        adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED;
                }
        }
@@ -5738,8 +5740,8 @@ sfp_out:
 static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter)
 {
        struct ixgbe_hw *hw = &adapter->hw;
-       u32 autoneg;
-       bool negotiation;
+       u32 speed;
+       bool autoneg = false;
 
        if (!(adapter->flags & IXGBE_FLAG_NEED_LINK_CONFIG))
                return;
@@ -5750,11 +5752,11 @@ static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter)
 
        adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG;
 
-       autoneg = hw->phy.autoneg_advertised;
-       if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
-               hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiation);
+       speed = hw->phy.autoneg_advertised;
+       if ((!speed) && (hw->mac.ops.get_link_capabilities))
+               hw->mac.ops.get_link_capabilities(hw, &speed, &autoneg);
        if (hw->mac.ops.setup_link)
-               hw->mac.ops.setup_link(hw, autoneg, negotiation, true);
+               hw->mac.ops.setup_link(hw, speed, true);
 
        adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
        adapter->link_check_timeout = jiffies;
@@ -5878,7 +5880,6 @@ static void ixgbe_service_task(struct work_struct *work)
        struct ixgbe_adapter *adapter = container_of(work,
                                                     struct ixgbe_adapter,
                                                     service_task);
-
        ixgbe_reset_subtask(adapter);
        ixgbe_sfp_detection_subtask(adapter);
        ixgbe_sfp_link_config_subtask(adapter);
@@ -5886,7 +5887,11 @@ static void ixgbe_service_task(struct work_struct *work)
        ixgbe_watchdog_subtask(adapter);
        ixgbe_fdir_reinit_subtask(adapter);
        ixgbe_check_hang_subtask(adapter);
-       ixgbe_ptp_overflow_check(adapter);
+
+       if (adapter->flags2 & IXGBE_FLAG2_PTP_ENABLED) {
+               ixgbe_ptp_overflow_check(adapter);
+               ixgbe_ptp_rx_hang(adapter);
+       }
 
        ixgbe_service_event_complete(adapter);
 }
@@ -5899,6 +5904,9 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring,
        u32 vlan_macip_lens, type_tucmd;
        u32 mss_l4len_idx, l4len;
 
+       if (skb->ip_summed != CHECKSUM_PARTIAL)
+               return 0;
+
        if (!skb_is_gso(skb))
                return 0;
 
@@ -5941,10 +5949,9 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring,
        first->gso_segs = skb_shinfo(skb)->gso_segs;
        first->bytecount += (first->gso_segs - 1) * *hdr_len;
 
-       /* mss_l4len_id: use 1 as index for TSO */
+       /* mss_l4len_id: use 0 as index for TSO */
        mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT;
        mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
-       mss_l4len_idx |= 1 << IXGBE_ADVTXD_IDX_SHIFT;
 
        /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
        vlan_macip_lens = skb_network_header_len(skb);
@@ -5966,12 +5973,9 @@ static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring,
        u32 type_tucmd = 0;
 
        if (skb->ip_summed != CHECKSUM_PARTIAL) {
-               if (!(first->tx_flags & IXGBE_TX_FLAGS_HW_VLAN)) {
-                       if (unlikely(skb->no_fcs))
-                               first->tx_flags |= IXGBE_TX_FLAGS_NO_IFCS;
-                       if (!(first->tx_flags & IXGBE_TX_FLAGS_TXSW))
-                               return;
-               }
+               if (!(first->tx_flags & IXGBE_TX_FLAGS_HW_VLAN) &&
+                   !(first->tx_flags & IXGBE_TX_FLAGS_CC))
+                       return;
        } else {
                u8 l4_hdr = 0;
                switch (first->protocol) {
@@ -6029,30 +6033,32 @@ static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring,
                          type_tucmd, mss_l4len_idx);
 }
 
-static __le32 ixgbe_tx_cmd_type(u32 tx_flags)
+#define IXGBE_SET_FLAG(_input, _flag, _result) \
+       ((_flag <= _result) ? \
+        ((u32)(_input & _flag) * (_result / _flag)) : \
+        ((u32)(_input & _flag) / (_flag / _result)))
+
+static u32 ixgbe_tx_cmd_type(struct sk_buff *skb, u32 tx_flags)
 {
        /* set type for advanced descriptor with frame checksum insertion */
-       __le32 cmd_type = cpu_to_le32(IXGBE_ADVTXD_DTYP_DATA |
-                                     IXGBE_ADVTXD_DCMD_DEXT);
+       u32 cmd_type = IXGBE_ADVTXD_DTYP_DATA |
+                      IXGBE_ADVTXD_DCMD_DEXT |
+                      IXGBE_ADVTXD_DCMD_IFCS;
 
        /* set HW vlan bit if vlan is present */
-       if (tx_flags & IXGBE_TX_FLAGS_HW_VLAN)
-               cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_VLE);
-
-       if (tx_flags & IXGBE_TX_FLAGS_TSTAMP)
-               cmd_type |= cpu_to_le32(IXGBE_ADVTXD_MAC_TSTAMP);
+       cmd_type |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_HW_VLAN,
+                                  IXGBE_ADVTXD_DCMD_VLE);
 
        /* set segmentation enable bits for TSO/FSO */
-#ifdef IXGBE_FCOE
-       if (tx_flags & (IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_FSO))
-#else
-       if (tx_flags & IXGBE_TX_FLAGS_TSO)
-#endif
-               cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_TSE);
+       cmd_type |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_TSO,
+                                  IXGBE_ADVTXD_DCMD_TSE);
+
+       /* set timestamp bit if present */
+       cmd_type |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_TSTAMP,
+                                  IXGBE_ADVTXD_MAC_TSTAMP);
 
        /* insert frame checksum */
-       if (!(tx_flags & IXGBE_TX_FLAGS_NO_IFCS))
-               cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_IFCS);
+       cmd_type ^= IXGBE_SET_FLAG(skb->no_fcs, 1, IXGBE_ADVTXD_DCMD_IFCS);
 
        return cmd_type;
 }
@@ -6060,36 +6066,27 @@ static __le32 ixgbe_tx_cmd_type(u32 tx_flags)
 static void ixgbe_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc,
                                   u32 tx_flags, unsigned int paylen)
 {
-       __le32 olinfo_status = cpu_to_le32(paylen << IXGBE_ADVTXD_PAYLEN_SHIFT);
+       u32 olinfo_status = paylen << IXGBE_ADVTXD_PAYLEN_SHIFT;
 
        /* enable L4 checksum for TSO and TX checksum offload */
-       if (tx_flags & IXGBE_TX_FLAGS_CSUM)
-               olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_TXSM);
+       olinfo_status |= IXGBE_SET_FLAG(tx_flags,
+                                       IXGBE_TX_FLAGS_CSUM,
+                                       IXGBE_ADVTXD_POPTS_TXSM);
 
        /* enble IPv4 checksum for TSO */
-       if (tx_flags & IXGBE_TX_FLAGS_IPV4)
-               olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_IXSM);
-
-       /* use index 1 context for TSO/FSO/FCOE */
-#ifdef IXGBE_FCOE
-       if (tx_flags & (IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_FCOE))
-#else
-       if (tx_flags & IXGBE_TX_FLAGS_TSO)
-#endif
-               olinfo_status |= cpu_to_le32(1 << IXGBE_ADVTXD_IDX_SHIFT);
+       olinfo_status |= IXGBE_SET_FLAG(tx_flags,
+                                       IXGBE_TX_FLAGS_IPV4,
+                                       IXGBE_ADVTXD_POPTS_IXSM);
 
        /*
         * Check Context must be set if Tx switch is enabled, which it
         * always is for case where virtual functions are running
         */
-#ifdef IXGBE_FCOE
-       if (tx_flags & (IXGBE_TX_FLAGS_TXSW | IXGBE_TX_FLAGS_FCOE))
-#else
-       if (tx_flags & IXGBE_TX_FLAGS_TXSW)
-#endif
-               olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_CC);
+       olinfo_status |= IXGBE_SET_FLAG(tx_flags,
+                                       IXGBE_TX_FLAGS_CC,
+                                       IXGBE_ADVTXD_CC);
 
-       tx_desc->read.olinfo_status = olinfo_status;
+       tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
 }
 
 #define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | \
@@ -6099,22 +6096,22 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring,
                         struct ixgbe_tx_buffer *first,
                         const u8 hdr_len)
 {
-       dma_addr_t dma;
        struct sk_buff *skb = first->skb;
        struct ixgbe_tx_buffer *tx_buffer;
        union ixgbe_adv_tx_desc *tx_desc;
-       struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
-       unsigned int data_len = skb->data_len;
-       unsigned int size = skb_headlen(skb);
-       unsigned int paylen = skb->len - hdr_len;
+       struct skb_frag_struct *frag;
+       dma_addr_t dma;
+       unsigned int data_len, size;
        u32 tx_flags = first->tx_flags;
-       __le32 cmd_type;
+       u32 cmd_type = ixgbe_tx_cmd_type(skb, tx_flags);
        u16 i = tx_ring->next_to_use;
 
        tx_desc = IXGBE_TX_DESC(tx_ring, i);
 
-       ixgbe_tx_olinfo_status(tx_desc, tx_flags, paylen);
-       cmd_type = ixgbe_tx_cmd_type(tx_flags);
+       ixgbe_tx_olinfo_status(tx_desc, tx_flags, skb->len - hdr_len);
+
+       size = skb_headlen(skb);
+       data_len = skb->data_len;
 
 #ifdef IXGBE_FCOE
        if (tx_flags & IXGBE_TX_FLAGS_FCOE) {
@@ -6128,19 +6125,22 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring,
 
 #endif
        dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
-       if (dma_mapping_error(tx_ring->dev, dma))
-               goto dma_error;
 
-       /* record length, and DMA address */
-       dma_unmap_len_set(first, len, size);
-       dma_unmap_addr_set(first, dma, dma);
+       tx_buffer = first;
 
-       tx_desc->read.buffer_addr = cpu_to_le64(dma);
+       for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
+               if (dma_mapping_error(tx_ring->dev, dma))
+                       goto dma_error;
+
+               /* record length, and DMA address */
+               dma_unmap_len_set(tx_buffer, len, size);
+               dma_unmap_addr_set(tx_buffer, dma, dma);
+
+               tx_desc->read.buffer_addr = cpu_to_le64(dma);
 
-       for (;;) {
                while (unlikely(size > IXGBE_MAX_DATA_PER_TXD)) {
                        tx_desc->read.cmd_type_len =
-                               cmd_type | cpu_to_le32(IXGBE_MAX_DATA_PER_TXD);
+                               cpu_to_le32(cmd_type ^ IXGBE_MAX_DATA_PER_TXD);
 
                        i++;
                        tx_desc++;
@@ -6148,18 +6148,18 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring,
                                tx_desc = IXGBE_TX_DESC(tx_ring, 0);
                                i = 0;
                        }
+                       tx_desc->read.olinfo_status = 0;
 
                        dma += IXGBE_MAX_DATA_PER_TXD;
                        size -= IXGBE_MAX_DATA_PER_TXD;
 
                        tx_desc->read.buffer_addr = cpu_to_le64(dma);
-                       tx_desc->read.olinfo_status = 0;
                }
 
                if (likely(!data_len))
                        break;
 
-               tx_desc->read.cmd_type_len = cmd_type | cpu_to_le32(size);
+               tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size);
 
                i++;
                tx_desc++;
@@ -6167,6 +6167,7 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring,
                        tx_desc = IXGBE_TX_DESC(tx_ring, 0);
                        i = 0;
                }
+               tx_desc->read.olinfo_status = 0;
 
 #ifdef IXGBE_FCOE
                size = min_t(unsigned int, data_len, skb_frag_size(frag));
@@ -6177,22 +6178,13 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring,
 
                dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
                                       DMA_TO_DEVICE);
-               if (dma_mapping_error(tx_ring->dev, dma))
-                       goto dma_error;
 
                tx_buffer = &tx_ring->tx_buffer_info[i];
-               dma_unmap_len_set(tx_buffer, len, size);
-               dma_unmap_addr_set(tx_buffer, dma, dma);
-
-               tx_desc->read.buffer_addr = cpu_to_le64(dma);
-               tx_desc->read.olinfo_status = 0;
-
-               frag++;
        }
 
        /* write last descriptor with RS and EOP bits */
-       cmd_type |= cpu_to_le32(size) | cpu_to_le32(IXGBE_TXD_CMD);
-       tx_desc->read.cmd_type_len = cmd_type;
+       cmd_type |= size | IXGBE_TXD_CMD;
+       tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
 
        netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
 
@@ -6445,6 +6437,11 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
        if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
                skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
                tx_flags |= IXGBE_TX_FLAGS_TSTAMP;
+
+               /* schedule check for Tx timestamp */
+               adapter->ptp_tx_skb = skb_get(skb);
+               adapter->ptp_tx_start = jiffies;
+               schedule_work(&adapter->ptp_tx_work);
        }
 
 #ifdef CONFIG_PCI_IOV
@@ -6453,7 +6450,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
         * Tx switch had been disabled.
         */
        if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
-               tx_flags |= IXGBE_TX_FLAGS_TXSW;
+               tx_flags |= IXGBE_TX_FLAGS_CC;
 
 #endif
        /* DCB maps skb priorities 0-7 onto 3 bit PCP of VLAN tag. */
@@ -6840,6 +6837,26 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
 }
 
 #endif /* CONFIG_IXGBE_DCB */
+#ifdef CONFIG_PCI_IOV
+void ixgbe_sriov_reinit(struct ixgbe_adapter *adapter)
+{
+       struct net_device *netdev = adapter->netdev;
+
+       rtnl_lock();
+#ifdef CONFIG_IXGBE_DCB
+       ixgbe_setup_tc(netdev, netdev_get_num_tc(netdev));
+#else
+       if (netif_running(netdev))
+               ixgbe_close(netdev);
+       ixgbe_clear_interrupt_scheme(adapter);
+       ixgbe_init_interrupt_scheme(adapter);
+       if (netif_running(netdev))
+               ixgbe_open(netdev);
+#endif
+       rtnl_unlock();
+}
+
+#endif
 void ixgbe_do_reset(struct net_device *netdev)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
@@ -6985,7 +7002,7 @@ static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
        return err;
 }
 
-static int ixgbe_ndo_fdb_del(struct ndmsg *ndm,
+static int ixgbe_ndo_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
                             struct net_device *dev,
                             const unsigned char *addr)
 {
@@ -7062,7 +7079,8 @@ static int ixgbe_ndo_bridge_setlink(struct net_device *dev,
 }
 
 static int ixgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
-                                   struct net_device *dev)
+                                   struct net_device *dev,
+                                   u32 filter_mask)
 {
        struct ixgbe_adapter *adapter = netdev_priv(dev);
        u16 mode;
@@ -7366,7 +7384,15 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        }
 
 #ifdef CONFIG_PCI_IOV
-       ixgbe_enable_sriov(adapter, ii);
+       /* SR-IOV not supported on the 82598 */
+       if (adapter->hw.mac.type == ixgbe_mac_82598EB)
+               goto skip_sriov;
+       /* Mailbox */
+       ixgbe_init_mbx_params_pf(hw);
+       memcpy(&hw->mbx.ops, ii->mbx_ops, sizeof(hw->mbx.ops));
+       ixgbe_enable_sriov(adapter);
+       pci_sriov_set_totalvfs(pdev, 63);
+skip_sriov:
 
 #endif
        netdev->features = NETIF_F_SG |
@@ -7444,9 +7470,8 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        }
 
        memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len);
-       memcpy(netdev->perm_addr, hw->mac.perm_addr, netdev->addr_len);
 
-       if (!is_valid_ether_addr(netdev->perm_addr)) {
+       if (!is_valid_ether_addr(netdev->dev_addr)) {
                e_dev_err("invalid MAC address\n");
                err = -EIO;
                goto err_sw_init;
@@ -7623,8 +7648,14 @@ static void ixgbe_remove(struct pci_dev *pdev)
        if (netdev->reg_state == NETREG_REGISTERED)
                unregister_netdev(netdev);
 
-       ixgbe_disable_sriov(adapter);
-
+#ifdef CONFIG_PCI_IOV
+       /*
+        * Only disable SR-IOV on unload if the user specified the now
+        * deprecated max_vfs module parameter.
+        */
+       if (max_vfs)
+               ixgbe_disable_sriov(adapter);
+#endif
        ixgbe_clear_interrupt_scheme(adapter);
 
        ixgbe_release_hw_control(adapter);
@@ -7729,6 +7760,8 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
                if (vfdev) {
                        e_dev_err("Issuing VFLR to VF %d\n", vf);
                        pci_write_config_dword(vfdev, 0xA8, 0x00008000);
+                       /* Free device reference count */
+                       pci_dev_put(vfdev);
                }
 
                pci_cleanup_aer_uncorrect_error_status(pdev);
@@ -7838,6 +7871,7 @@ static struct pci_driver ixgbe_driver = {
        .resume   = ixgbe_resume,
 #endif
        .shutdown = ixgbe_shutdown,
+       .sriov_configure = ixgbe_pci_sriov_configure,
        .err_handler = &ixgbe_err_handler
 };