]> Pileus Git - ~andy/linux/blobdiff - drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
[~andy/linux] / drivers / net / ethernet / broadcom / bnx2x / bnx2x_main.c
index a8f1ee31de774cfbd85715daf5ee776a56f0c4a5..a024eec94be1eb166694d4d220c7f7d943d92676 100644 (file)
@@ -75,8 +75,6 @@
 #define FW_FILE_NAME_E1H       "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw"
 #define FW_FILE_NAME_E2                "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw"
 
-#define MAC_LEADING_ZERO_CNT (ALIGN(ETH_ALEN, sizeof(u32)) - ETH_ALEN)
-
 /* Time in jiffies before concluding the transmitter is hung */
 #define TX_TIMEOUT             (5*HZ)
 
@@ -2955,14 +2953,16 @@ static unsigned long bnx2x_get_common_flags(struct bnx2x *bp,
        __set_bit(BNX2X_Q_FLG_ACTIVE, &flags);
 
        /* tx only connections collect statistics (on the same index as the
-        *  parent connection). The statistics are zeroed when the parent
-        *  connection is initialized.
+        * parent connection). The statistics are zeroed when the parent
+        * connection is initialized.
         */
 
        __set_bit(BNX2X_Q_FLG_STATS, &flags);
        if (zero_stats)
                __set_bit(BNX2X_Q_FLG_ZERO_STATS, &flags);
 
+       __set_bit(BNX2X_Q_FLG_PCSUM_ON_PKT, &flags);
+       __set_bit(BNX2X_Q_FLG_TUN_INC_INNER_IP_ID, &flags);
 
 #ifdef BNX2X_STOP_ON_ERROR
        __set_bit(BNX2X_Q_FLG_TX_SEC, &flags);
@@ -3227,16 +3227,29 @@ static void bnx2x_drv_info_ether_stat(struct bnx2x *bp)
 {
        struct eth_stats_info *ether_stat =
                &bp->slowpath->drv_info_to_mcp.ether_stat;
+       struct bnx2x_vlan_mac_obj *mac_obj =
+               &bp->sp_objs->mac_obj;
+       int i;
 
        strlcpy(ether_stat->version, DRV_MODULE_VERSION,
                ETH_STAT_INFO_VERSION_LEN);
 
-       bp->sp_objs[0].mac_obj.get_n_elements(bp, &bp->sp_objs[0].mac_obj,
-                                       DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED,
-                                       ether_stat->mac_local);
-
+       /* get DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED macs, placing them in the
+        * mac_local field in ether_stat struct. The base address is offset by 2
+        * bytes to account for the field being 8 bytes but a mac address is
+        * only 6 bytes. Likewise, the stride for the get_n_elements function is
+        * 2 bytes to compensate from the 6 bytes of a mac to the 8 bytes
+        * allocated by the ether_stat struct, so the macs will land in their
+        * proper positions.
+        */
+       for (i = 0; i < DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED; i++)
+               memset(ether_stat->mac_local + i, 0,
+                      sizeof(ether_stat->mac_local[0]));
+       mac_obj->get_n_elements(bp, &bp->sp_objs[0].mac_obj,
+                               DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED,
+                               ether_stat->mac_local + MAC_PAD, MAC_PAD,
+                               ETH_ALEN);
        ether_stat->mtu_size = bp->dev->mtu;
-
        if (bp->dev->features & NETIF_F_RXCSUM)
                ether_stat->feature_flags |= FEATURE_ETH_CHKSUM_OFFLOAD_MASK;
        if (bp->dev->features & NETIF_F_TSO)
@@ -3258,8 +3271,7 @@ static void bnx2x_drv_info_fcoe_stat(struct bnx2x *bp)
        if (!CNIC_LOADED(bp))
                return;
 
-       memcpy(fcoe_stat->mac_local + MAC_LEADING_ZERO_CNT,
-              bp->fip_mac, ETH_ALEN);
+       memcpy(fcoe_stat->mac_local + MAC_PAD, bp->fip_mac, ETH_ALEN);
 
        fcoe_stat->qos_priority =
                app->traffic_type_priority[LLFC_TRAFFIC_TYPE_FCOE];
@@ -3361,8 +3373,8 @@ static void bnx2x_drv_info_iscsi_stat(struct bnx2x *bp)
        if (!CNIC_LOADED(bp))
                return;
 
-       memcpy(iscsi_stat->mac_local + MAC_LEADING_ZERO_CNT,
-              bp->cnic_eth_dev.iscsi_mac, ETH_ALEN);
+       memcpy(iscsi_stat->mac_local + MAC_PAD, bp->cnic_eth_dev.iscsi_mac,
+              ETH_ALEN);
 
        iscsi_stat->qos_priority =
                app->traffic_type_priority[LLFC_TRAFFIC_TYPE_ISCSI];
@@ -6031,6 +6043,11 @@ void bnx2x_pre_irq_nic_init(struct bnx2x *bp)
        bnx2x_init_rx_rings(bp);
        bnx2x_init_tx_rings(bp);
 
+       if (IS_VF(bp)) {
+               bnx2x_memset_stats(bp);
+               return;
+       }
+
        if (IS_PF(bp)) {
                /* Initialize MOD_ABS interrupts */
                bnx2x_init_mod_abs_int(bp, &bp->link_vars, bp->common.chip_id,
@@ -7773,7 +7790,7 @@ int bnx2x_alloc_mem_cnic(struct bnx2x *bp)
                                sizeof(struct
                                       host_hc_status_block_e1x));
 
-       if (CONFIGURE_NIC_MODE(bp))
+       if (CONFIGURE_NIC_MODE(bp) && !bp->t2)
                /* allocate searcher T2 table, as it wan't allocated before */
                BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ);
 
@@ -7796,7 +7813,7 @@ int bnx2x_alloc_mem(struct bnx2x *bp)
 {
        int i, allocated, context_size;
 
-       if (!CONFIGURE_NIC_MODE(bp))
+       if (!CONFIGURE_NIC_MODE(bp) && !bp->t2)
                /* allocate searcher T2 table */
                BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ);
 
@@ -7917,8 +7934,6 @@ int bnx2x_del_all_macs(struct bnx2x *bp,
 
 int bnx2x_set_eth_mac(struct bnx2x *bp, bool set)
 {
-       unsigned long ramrod_flags = 0;
-
        if (is_zero_ether_addr(bp->dev->dev_addr) &&
            (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) {
                DP(NETIF_MSG_IFUP | NETIF_MSG_IFDOWN,
@@ -7926,12 +7941,18 @@ int bnx2x_set_eth_mac(struct bnx2x *bp, bool set)
                return 0;
        }
 
-       DP(NETIF_MSG_IFUP, "Adding Eth MAC\n");
+       if (IS_PF(bp)) {
+               unsigned long ramrod_flags = 0;
 
-       __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
-       /* Eth MAC is set on RSS leading client (fp[0]) */
-       return bnx2x_set_mac_one(bp, bp->dev->dev_addr, &bp->sp_objs->mac_obj,
-                                set, BNX2X_ETH_MAC, &ramrod_flags);
+               DP(NETIF_MSG_IFUP, "Adding Eth MAC\n");
+               __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
+               return bnx2x_set_mac_one(bp, bp->dev->dev_addr,
+                                        &bp->sp_objs->mac_obj, set,
+                                        BNX2X_ETH_MAC, &ramrod_flags);
+       } else { /* vf */
+               return bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr,
+                                            bp->fp->index, true);
+       }
 }
 
 int bnx2x_setup_leading(struct bnx2x *bp)
@@ -9525,6 +9546,10 @@ sp_rtnl_not_reset:
                bnx2x_vfpf_storm_rx_mode(bp);
        }
 
+       if (test_and_clear_bit(BNX2X_SP_RTNL_HYPERVISOR_VLAN,
+                              &bp->sp_rtnl_state))
+               bnx2x_pf_set_vfs_vlan(bp);
+
        /* work which needs rtnl lock not-taken (as it takes the lock itself and
         * can be called from other contexts as well)
         */
@@ -9532,8 +9557,10 @@ sp_rtnl_not_reset:
 
        /* enable SR-IOV if applicable */
        if (IS_SRIOV(bp) && test_and_clear_bit(BNX2X_SP_RTNL_ENABLE_SRIOV,
-                                              &bp->sp_rtnl_state))
+                                              &bp->sp_rtnl_state)) {
+               bnx2x_disable_sriov(bp);
                bnx2x_enable_sriov(bp);
+       }
 }
 
 static void bnx2x_period_task(struct work_struct *work)
@@ -9701,6 +9728,31 @@ static struct bnx2x_prev_path_list *
        return NULL;
 }
 
+static int bnx2x_prev_path_mark_eeh(struct bnx2x *bp)
+{
+       struct bnx2x_prev_path_list *tmp_list;
+       int rc;
+
+       rc = down_interruptible(&bnx2x_prev_sem);
+       if (rc) {
+               BNX2X_ERR("Received %d when tried to take lock\n", rc);
+               return rc;
+       }
+
+       tmp_list = bnx2x_prev_path_get_entry(bp);
+       if (tmp_list) {
+               tmp_list->aer = 1;
+               rc = 0;
+       } else {
+               BNX2X_ERR("path %d: Entry does not exist for eeh; Flow occurs before initial insmod is over ?\n",
+                         BP_PATH(bp));
+       }
+
+       up(&bnx2x_prev_sem);
+
+       return rc;
+}
+
 static bool bnx2x_prev_is_path_marked(struct bnx2x *bp)
 {
        struct bnx2x_prev_path_list *tmp_list;
@@ -9709,14 +9761,15 @@ static bool bnx2x_prev_is_path_marked(struct bnx2x *bp)
        if (down_trylock(&bnx2x_prev_sem))
                return false;
 
-       list_for_each_entry(tmp_list, &bnx2x_prev_list, list) {
-               if (PCI_SLOT(bp->pdev->devfn) == tmp_list->slot &&
-                   bp->pdev->bus->number == tmp_list->bus &&
-                   BP_PATH(bp) == tmp_list->path) {
+       tmp_list = bnx2x_prev_path_get_entry(bp);
+       if (tmp_list) {
+               if (tmp_list->aer) {
+                       DP(NETIF_MSG_HW, "Path %d was marked by AER\n",
+                          BP_PATH(bp));
+               } else {
                        rc = true;
                        BNX2X_DEV_INFO("Path %d was already cleaned from previous drivers\n",
                                       BP_PATH(bp));
-                       break;
                }
        }
 
@@ -9730,6 +9783,28 @@ static int bnx2x_prev_mark_path(struct bnx2x *bp, bool after_undi)
        struct bnx2x_prev_path_list *tmp_list;
        int rc;
 
+       rc = down_interruptible(&bnx2x_prev_sem);
+       if (rc) {
+               BNX2X_ERR("Received %d when tried to take lock\n", rc);
+               return rc;
+       }
+
+       /* Check whether the entry for this path already exists */
+       tmp_list = bnx2x_prev_path_get_entry(bp);
+       if (tmp_list) {
+               if (!tmp_list->aer) {
+                       BNX2X_ERR("Re-Marking the path.\n");
+               } else {
+                       DP(NETIF_MSG_HW, "Removing AER indication from path %d\n",
+                          BP_PATH(bp));
+                       tmp_list->aer = 0;
+               }
+               up(&bnx2x_prev_sem);
+               return 0;
+       }
+       up(&bnx2x_prev_sem);
+
+       /* Create an entry for this path and add it */
        tmp_list = kmalloc(sizeof(struct bnx2x_prev_path_list), GFP_KERNEL);
        if (!tmp_list) {
                BNX2X_ERR("Failed to allocate 'bnx2x_prev_path_list'\n");
@@ -9739,6 +9814,7 @@ static int bnx2x_prev_mark_path(struct bnx2x *bp, bool after_undi)
        tmp_list->bus = bp->pdev->bus->number;
        tmp_list->slot = PCI_SLOT(bp->pdev->devfn);
        tmp_list->path = BP_PATH(bp);
+       tmp_list->aer = 0;
        tmp_list->undi = after_undi ? (1 << BP_PORT(bp)) : 0;
 
        rc = down_interruptible(&bnx2x_prev_sem);
@@ -9746,8 +9822,8 @@ static int bnx2x_prev_mark_path(struct bnx2x *bp, bool after_undi)
                BNX2X_ERR("Received %d when tried to take lock\n", rc);
                kfree(tmp_list);
        } else {
-               BNX2X_DEV_INFO("Marked path [%d] - finished previous unload\n",
-                               BP_PATH(bp));
+               DP(NETIF_MSG_HW, "Marked path [%d] - finished previous unload\n",
+                  BP_PATH(bp));
                list_add(&tmp_list->list, &bnx2x_prev_list);
                up(&bnx2x_prev_sem);
        }
@@ -9990,6 +10066,7 @@ static int bnx2x_prev_unload(struct bnx2x *bp)
        }
 
        do {
+               int aer = 0;
                /* Lock MCP using an unload request */
                fw = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS, 0);
                if (!fw) {
@@ -9998,7 +10075,18 @@ static int bnx2x_prev_unload(struct bnx2x *bp)
                        break;
                }
 
-               if (fw == FW_MSG_CODE_DRV_UNLOAD_COMMON) {
+               rc = down_interruptible(&bnx2x_prev_sem);
+               if (rc) {
+                       BNX2X_ERR("Cannot check for AER; Received %d when tried to take lock\n",
+                                 rc);
+               } else {
+                       /* If Path is marked by EEH, ignore unload status */
+                       aer = !!(bnx2x_prev_path_get_entry(bp) &&
+                                bnx2x_prev_path_get_entry(bp)->aer);
+                       up(&bnx2x_prev_sem);
+               }
+
+               if (fw == FW_MSG_CODE_DRV_UNLOAD_COMMON || aer) {
                        rc = bnx2x_prev_unload_common(bp);
                        break;
                }
@@ -10038,8 +10126,12 @@ static void bnx2x_get_common_hwinfo(struct bnx2x *bp)
        id = ((val & 0xffff) << 16);
        val = REG_RD(bp, MISC_REG_CHIP_REV);
        id |= ((val & 0xf) << 12);
-       val = REG_RD(bp, MISC_REG_CHIP_METAL);
-       id |= ((val & 0xff) << 4);
+
+       /* Metal is read from PCI regs, but we can't access >=0x400 from
+        * the configuration space (so we need to reg_rd)
+        */
+       val = REG_RD(bp, PCICFG_OFFSET + PCI_ID_VAL3);
+       id |= (((val >> 24) & 0xf) << 4);
        val = REG_RD(bp, MISC_REG_BOND_ID);
        id |= (val & 0xf);
        bp->common.chip_id = id;
@@ -10703,6 +10795,12 @@ static void bnx2x_get_fcoe_info(struct bnx2x *bp)
                (max_fcoe_conn & BNX2X_MAX_FCOE_INIT_CONN_MASK) >>
                BNX2X_MAX_FCOE_INIT_CONN_SHIFT;
 
+       /* Calculate the number of maximum allowed FCoE tasks */
+       bp->cnic_eth_dev.max_fcoe_exchanges = MAX_NUM_FCOE_TASKS_PER_ENGINE;
+       if (IS_MF(bp) || CHIP_MODE_IS_4_PORT(bp))
+               bp->cnic_eth_dev.max_fcoe_exchanges /=
+                                               MAX_FCOE_FUNCS_PER_ENGINE;
+
        /* Read the WWN: */
        if (!IS_MF(bp)) {
                /* Port info */
@@ -10816,14 +10914,12 @@ static void bnx2x_get_cnic_mac_hwinfo(struct bnx2x *bp)
                        }
                }
 
-               if (IS_MF_STORAGE_SD(bp))
-                       /* Zero primary MAC configuration */
-                       memset(bp->dev->dev_addr, 0, ETH_ALEN);
-
-               if (IS_MF_FCOE_AFEX(bp) || IS_MF_FCOE_SD(bp))
-                       /* use FIP MAC as primary MAC */
+               /* If this is a storage-only interface, use SAN mac as
+                * primary MAC. Notice that for SD this is already the case,
+                * as the SAN mac was copied from the primary MAC.
+                */
+               if (IS_MF_FCOE_AFEX(bp))
                        memcpy(bp->dev->dev_addr, fip_mac, ETH_ALEN);
-
        } else {
                val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
                                iscsi_mac_upper);
@@ -11060,6 +11156,9 @@ static int bnx2x_get_hwinfo(struct bnx2x *bp)
                                } else
                                        BNX2X_DEV_INFO("illegal OV for SD\n");
                                break;
+                       case SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF:
+                               bp->mf_config[vn] = 0;
+                               break;
                        default:
                                /* Unknown configuration: reset mf_config */
                                bp->mf_config[vn] = 0;
@@ -11406,26 +11505,6 @@ static int bnx2x_init_bp(struct bnx2x *bp)
  * net_device service functions
  */
 
-static int bnx2x_open_epilog(struct bnx2x *bp)
-{
-       /* Enable sriov via delayed work. This must be done via delayed work
-        * because it causes the probe of the vf devices to be run, which invoke
-        * register_netdevice which must have rtnl lock taken. As we are holding
-        * the lock right now, that could only work if the probe would not take
-        * the lock. However, as the probe of the vf may be called from other
-        * contexts as well (such as passthrough to vm failes) it can't assume
-        * the lock is being held for it. Using delayed work here allows the
-        * probe code to simply take the lock (i.e. wait for it to be released
-        * if it is being held).
-        */
-       smp_mb__before_clear_bit();
-       set_bit(BNX2X_SP_RTNL_ENABLE_SRIOV, &bp->sp_rtnl_state);
-       smp_mb__after_clear_bit();
-       schedule_delayed_work(&bp->sp_rtnl_task, 0);
-
-       return 0;
-}
-
 /* called with rtnl_lock */
 static int bnx2x_open(struct net_device *dev)
 {
@@ -11795,6 +11874,8 @@ static const struct net_device_ops bnx2x_netdev_ops = {
        .ndo_setup_tc           = bnx2x_setup_tc,
 #ifdef CONFIG_BNX2X_SRIOV
        .ndo_set_vf_mac         = bnx2x_set_vf_mac,
+       .ndo_set_vf_vlan        = bnx2x_set_vf_vlan,
+       .ndo_get_vf_config      = bnx2x_get_vf_config,
 #endif
 #ifdef NETDEV_FCOE_WWNN
        .ndo_fcoe_get_wwn       = bnx2x_fcoe_get_wwn,
@@ -11957,19 +12038,26 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
        dev->watchdog_timeo = TX_TIMEOUT;
 
        dev->netdev_ops = &bnx2x_netdev_ops;
-       bnx2x_set_ethtool_ops(dev);
+       bnx2x_set_ethtool_ops(bp, dev);
 
        dev->priv_flags |= IFF_UNICAST_FLT;
 
        dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
                NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 |
                NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO |
-               NETIF_F_RXHASH | NETIF_F_HW_VLAN_TX;
+               NETIF_F_RXHASH | NETIF_F_HW_VLAN_CTAG_TX;
+       if (!CHIP_IS_E1x(bp)) {
+               dev->hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL;
+               dev->hw_enc_features =
+                       NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
+                       NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 |
+                       NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL;
+       }
 
        dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
                NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA;
 
-       dev->features |= dev->hw_features | NETIF_F_HW_VLAN_RX;
+       dev->features |= dev->hw_features | NETIF_F_HW_VLAN_CTAG_RX;
        if (bp->flags & USING_DAC_FLAG)
                dev->features |= NETIF_F_HIGHDMA;
 
@@ -12451,7 +12539,7 @@ static int bnx2x_init_one(struct pci_dev *pdev,
         * l2 connections.
         */
        if (IS_VF(bp)) {
-               bnx2x_vf_map_doorbells(bp);
+               bp->doorbells = bnx2x_vf_doorbells(bp);
                rc = bnx2x_vf_pci_alloc(bp);
                if (rc)
                        goto init_one_exit;
@@ -12479,13 +12567,8 @@ static int bnx2x_init_one(struct pci_dev *pdev,
                        goto init_one_exit;
        }
 
-       /* Enable SRIOV if capability found in configuration space.
-        * Once the generic SR-IOV framework makes it in from the
-        * pci tree this will be revised, to allow dynamic control
-        * over the number of VFs. Right now, change the num of vfs
-        * param below to enable SR-IOV.
-        */
-       rc = bnx2x_iov_init_one(bp, int_mode, 0/*num vfs*/);
+       /* Enable SRIOV if capability found in configuration space */
+       rc = bnx2x_iov_init_one(bp, int_mode, BNX2X_MAX_NUM_OF_VFS);
        if (rc)
                goto init_one_exit;
 
@@ -12497,16 +12580,6 @@ static int bnx2x_init_one(struct pci_dev *pdev,
        if (CHIP_IS_E1x(bp))
                bp->flags |= NO_FCOE_FLAG;
 
-       /* disable FCOE for 57840 device, until FW supports it */
-       switch (ent->driver_data) {
-       case BCM57840_O:
-       case BCM57840_4_10:
-       case BCM57840_2_20:
-       case BCM57840_MFO:
-       case BCM57840_MF:
-               bp->flags |= NO_FCOE_FLAG;
-       }
-
        /* Set bp->num_queues for MSI-X mode*/
        bnx2x_set_num_queues(bp);
 
@@ -12640,9 +12713,7 @@ static void bnx2x_remove_one(struct pci_dev *pdev)
 
 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
 {
-       int i;
-
-       bp->state = BNX2X_STATE_ERROR;
+       bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
 
        bp->rx_mode = BNX2X_RX_MODE_NONE;
 
@@ -12651,29 +12722,21 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
 
        /* Stop Tx */
        bnx2x_tx_disable(bp);
-
-       bnx2x_netif_stop(bp, 0);
        /* Delete all NAPI objects */
        bnx2x_del_all_napi(bp);
        if (CNIC_LOADED(bp))
                bnx2x_del_all_napi_cnic(bp);
+       netdev_reset_tc(bp->dev);
 
        del_timer_sync(&bp->timer);
+       cancel_delayed_work(&bp->sp_task);
+       cancel_delayed_work(&bp->period_task);
 
-       bnx2x_stats_handle(bp, STATS_EVENT_STOP);
-
-       /* Release IRQs */
-       bnx2x_free_irq(bp);
-
-       /* Free SKBs, SGEs, TPA pool and driver internals */
-       bnx2x_free_skbs(bp);
-
-       for_each_rx_queue(bp, i)
-               bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
-
-       bnx2x_free_mem(bp);
+       spin_lock_bh(&bp->stats_lock);
+       bp->stats_state = STATS_STATE_DISABLED;
+       spin_unlock_bh(&bp->stats_lock);
 
-       bp->state = BNX2X_STATE_CLOSED;
+       bnx2x_save_statistics(bp);
 
        netif_carrier_off(bp->dev);
 
@@ -12709,6 +12772,8 @@ static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
 
        rtnl_lock();
 
+       BNX2X_ERR("IO error detected\n");
+
        netif_device_detach(dev);
 
        if (state == pci_channel_io_perm_failure) {
@@ -12719,6 +12784,8 @@ static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
        if (netif_running(dev))
                bnx2x_eeh_nic_unload(bp);
 
+       bnx2x_prev_path_mark_eeh(bp);
+
        pci_disable_device(pdev);
 
        rtnl_unlock();
@@ -12737,9 +12804,10 @@ static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
 {
        struct net_device *dev = pci_get_drvdata(pdev);
        struct bnx2x *bp = netdev_priv(dev);
+       int i;
 
        rtnl_lock();
-
+       BNX2X_ERR("IO slot reset initializing...\n");
        if (pci_enable_device(pdev)) {
                dev_err(&pdev->dev,
                        "Cannot re-enable PCI device after reset\n");
@@ -12749,10 +12817,47 @@ static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
 
        pci_set_master(pdev);
        pci_restore_state(pdev);
+       pci_save_state(pdev);
 
        if (netif_running(dev))
                bnx2x_set_power_state(bp, PCI_D0);
 
+       if (netif_running(dev)) {
+               BNX2X_ERR("IO slot reset --> driver unload\n");
+               if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
+                       u32 v;
+
+                       v = SHMEM2_RD(bp,
+                                     drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
+                       SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
+                                 v & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
+               }
+               bnx2x_drain_tx_queues(bp);
+               bnx2x_send_unload_req(bp, UNLOAD_RECOVERY);
+               bnx2x_netif_stop(bp, 1);
+               bnx2x_free_irq(bp);
+
+               /* Report UNLOAD_DONE to MCP */
+               bnx2x_send_unload_done(bp, true);
+
+               bp->sp_state = 0;
+               bp->port.pmf = 0;
+
+               bnx2x_prev_unload(bp);
+
+               /* We should have resetted the engine, so It's fair to
+                * assume the FW will no longer write to the bnx2x driver.
+                */
+               bnx2x_squeeze_objects(bp);
+               bnx2x_free_skbs(bp);
+               for_each_rx_queue(bp, i)
+                       bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
+               bnx2x_free_fp_mem(bp);
+               bnx2x_free_mem(bp);
+
+               bp->state = BNX2X_STATE_CLOSED;
+       }
+
        rtnl_unlock();
 
        return PCI_ERS_RESULT_RECOVERED;
@@ -12779,6 +12884,9 @@ static void bnx2x_io_resume(struct pci_dev *pdev)
 
        bnx2x_eeh_recover(bp);
 
+       bp->fw_seq = SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
+                                                       DRV_MSG_SEQ_NUMBER_MASK;
+
        if (netif_running(dev))
                bnx2x_nic_load(bp, LOAD_NORMAL);
 
@@ -12801,6 +12909,9 @@ static struct pci_driver bnx2x_pci_driver = {
        .suspend     = bnx2x_suspend,
        .resume      = bnx2x_resume,
        .err_handler = &bnx2x_err_handler,
+#ifdef CONFIG_BNX2X_SRIOV
+       .sriov_configure = bnx2x_sriov_configure,
+#endif
 };
 
 static int __init bnx2x_init(void)