]> Pileus Git - ~andy/linux/commitdiff
Merge branch 'ixgbe-next'
authorDavid S. Miller <davem@davemloft.net>
Thu, 16 Jan 2014 23:35:08 +0000 (15:35 -0800)
committerDavid S. Miller <davem@davemloft.net>
Thu, 16 Jan 2014 23:35:08 +0000 (15:35 -0800)
Aaron Brown says:

====================
Intel Wired LAN Driver Updates

This series contains updates to ixgbe and ixgbevf.

John adds rtnl lock / unlock semantics for ixgbe_reinit_locked()
which was being called without the rtnl lock being held.

Jacob corrects an issue where ixgbevf_qv_disable function does not
set the disabled bit correctly.

From the community, Wei uses a type of struct for pci driver-specific
data in ixgbevf_suspend()

Don changes the way we store ring arrays in a manner that allows
support of multiple queues on multiple nodes and creates new ring
initialization functions for work previously done across multiple
functions - making the code closer to ixgbe and hopefully more readable.
He also fixes incorrect fiber eeprom write logic.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/intel/ixgbevf/defines.h
drivers/net/ethernet/intel/ixgbevf/ethtool.c
drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c

index 007a0083a636785fcb473e87aafdc608b213ba51..edda6814108c9d120fe994740edfcc04f710943d 100644 (file)
@@ -626,7 +626,7 @@ static void ixgbe_set_fiber_fixed_speed(struct ixgbe_hw *hw,
                goto out;
        }
 
-       eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) & rs;
+       eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) | rs;
 
        status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
                                            IXGBE_I2C_EEPROM_DEV_ADDR2,
index 3ca59d21d0b2ee9ccde0488b6c6547975bb6e07a..b445ad121de1f5daf8e5d275f423b378d1d54e57 100644 (file)
@@ -6392,7 +6392,9 @@ static void ixgbe_reset_subtask(struct ixgbe_adapter *adapter)
        netdev_err(adapter->netdev, "Reset adapter\n");
        adapter->tx_timeout_count++;
 
+       rtnl_lock();
        ixgbe_reinit_locked(adapter);
+       rtnl_unlock();
 }
 
 /**
index 3147795bd135b57137105a3b4769d0f34a7c6faf..5426b2dee6a610aea8fedb89248b0b9a7673d876 100644 (file)
@@ -277,4 +277,21 @@ struct ixgbe_adv_tx_context_desc {
 #define IXGBE_ERR_RESET_FAILED                  -2
 #define IXGBE_ERR_INVALID_ARGUMENT              -3
 
+/* Transmit Config masks */
+#define IXGBE_TXDCTL_ENABLE            0x02000000 /* Ena specific Tx Queue */
+#define IXGBE_TXDCTL_SWFLSH            0x04000000 /* Tx Desc. wr-bk flushing */
+#define IXGBE_TXDCTL_WTHRESH_SHIFT     16         /* shift to WTHRESH bits */
+
+#define IXGBE_DCA_RXCTRL_DESC_DCA_EN   (1 << 5)  /* Rx Desc enable */
+#define IXGBE_DCA_RXCTRL_HEAD_DCA_EN   (1 << 6)  /* Rx Desc header ena */
+#define IXGBE_DCA_RXCTRL_DATA_DCA_EN   (1 << 7)  /* Rx Desc payload ena */
+#define IXGBE_DCA_RXCTRL_DESC_RRO_EN   (1 << 9)  /* Rx rd Desc Relax Order */
+#define IXGBE_DCA_RXCTRL_DATA_WRO_EN   (1 << 13) /* Rx wr data Relax Order */
+#define IXGBE_DCA_RXCTRL_HEAD_WRO_EN   (1 << 15) /* Rx wr header RO */
+
+#define IXGBE_DCA_TXCTRL_DESC_DCA_EN   (1 << 5)  /* DCA Tx Desc enable */
+#define IXGBE_DCA_TXCTRL_DESC_RRO_EN   (1 << 9)  /* Tx rd Desc Relax Order */
+#define IXGBE_DCA_TXCTRL_DESC_WRO_EN   (1 << 11) /* Tx Desc writeback RO bit */
+#define IXGBE_DCA_TXCTRL_DATA_RRO_EN   (1 << 13) /* Tx rd data Relax Order */
+
 #endif /* _IXGBEVF_DEFINES_H_ */
index 54d9acef9c4e8dab993d7a810a1fe896e22070cb..515ba4e29760c85e00a4afe264169d2a4152262c 100644 (file)
@@ -286,9 +286,9 @@ static int ixgbevf_set_ringparam(struct net_device *netdev,
 
        if (!netif_running(adapter->netdev)) {
                for (i = 0; i < adapter->num_tx_queues; i++)
-                       adapter->tx_ring[i].count = new_tx_count;
+                       adapter->tx_ring[i]->count = new_tx_count;
                for (i = 0; i < adapter->num_rx_queues; i++)
-                       adapter->rx_ring[i].count = new_rx_count;
+                       adapter->rx_ring[i]->count = new_rx_count;
                adapter->tx_ring_count = new_tx_count;
                adapter->rx_ring_count = new_rx_count;
                goto clear_reset;
@@ -303,7 +303,7 @@ static int ixgbevf_set_ringparam(struct net_device *netdev,
 
                for (i = 0; i < adapter->num_tx_queues; i++) {
                        /* clone ring and setup updated count */
-                       tx_ring[i] = adapter->tx_ring[i];
+                       tx_ring[i] = *adapter->tx_ring[i];
                        tx_ring[i].count = new_tx_count;
                        err = ixgbevf_setup_tx_resources(adapter, &tx_ring[i]);
                        if (!err)
@@ -329,7 +329,7 @@ static int ixgbevf_set_ringparam(struct net_device *netdev,
 
                for (i = 0; i < adapter->num_rx_queues; i++) {
                        /* clone ring and setup updated count */
-                       rx_ring[i] = adapter->rx_ring[i];
+                       rx_ring[i] = *adapter->rx_ring[i];
                        rx_ring[i].count = new_rx_count;
                        err = ixgbevf_setup_rx_resources(adapter, &rx_ring[i]);
                        if (!err)
@@ -352,9 +352,8 @@ static int ixgbevf_set_ringparam(struct net_device *netdev,
        /* Tx */
        if (tx_ring) {
                for (i = 0; i < adapter->num_tx_queues; i++) {
-                       ixgbevf_free_tx_resources(adapter,
-                                                 &adapter->tx_ring[i]);
-                       adapter->tx_ring[i] = tx_ring[i];
+                       ixgbevf_free_tx_resources(adapter, adapter->tx_ring[i]);
+                       *adapter->tx_ring[i] = tx_ring[i];
                }
                adapter->tx_ring_count = new_tx_count;
 
@@ -365,9 +364,8 @@ static int ixgbevf_set_ringparam(struct net_device *netdev,
        /* Rx */
        if (rx_ring) {
                for (i = 0; i < adapter->num_rx_queues; i++) {
-                       ixgbevf_free_rx_resources(adapter,
-                                                 &adapter->rx_ring[i]);
-                       adapter->rx_ring[i] = rx_ring[i];
+                       ixgbevf_free_rx_resources(adapter, adapter->rx_ring[i]);
+                       *adapter->rx_ring[i] = rx_ring[i];
                }
                adapter->rx_ring_count = new_rx_count;
 
@@ -413,15 +411,15 @@ static void ixgbevf_get_ethtool_stats(struct net_device *netdev,
            tx_yields = 0, tx_cleaned = 0, tx_missed = 0;
 
        for (i = 0; i < adapter->num_rx_queues; i++) {
-               rx_yields += adapter->rx_ring[i].bp_yields;
-               rx_cleaned += adapter->rx_ring[i].bp_cleaned;
-               rx_yields += adapter->rx_ring[i].bp_yields;
+               rx_yields += adapter->rx_ring[i]->bp_yields;
+               rx_cleaned += adapter->rx_ring[i]->bp_cleaned;
+               rx_yields += adapter->rx_ring[i]->bp_yields;
        }
 
        for (i = 0; i < adapter->num_tx_queues; i++) {
-               tx_yields += adapter->tx_ring[i].bp_yields;
-               tx_cleaned += adapter->tx_ring[i].bp_cleaned;
-               tx_yields += adapter->tx_ring[i].bp_yields;
+               tx_yields += adapter->tx_ring[i]->bp_yields;
+               tx_cleaned += adapter->tx_ring[i]->bp_cleaned;
+               tx_yields += adapter->tx_ring[i]->bp_yields;
        }
 
        adapter->bp_rx_yields = rx_yields;
index bb76e96f8278caf09c11fbd049ccac83b1acc32c..0547e40980cb490f30fd2402c03faf16c42e0b6d 100644 (file)
@@ -260,6 +260,7 @@ static inline bool ixgbevf_qv_disable(struct ixgbevf_q_vector *q_vector)
        spin_lock_bh(&q_vector->lock);
        if (q_vector->state & IXGBEVF_QV_OWNED)
                rc = false;
+       q_vector->state |= IXGBEVF_QV_STATE_DISABLED;
        spin_unlock_bh(&q_vector->lock);
        return rc;
 }
@@ -326,7 +327,7 @@ struct ixgbevf_adapter {
        u32 eims_other;
 
        /* TX */
-       struct ixgbevf_ring *tx_ring;   /* One per active queue */
+       struct ixgbevf_ring *tx_ring[MAX_TX_QUEUES]; /* One per active queue */
        int num_tx_queues;
        u64 restart_queue;
        u64 hw_csum_tx_good;
@@ -336,7 +337,7 @@ struct ixgbevf_adapter {
        u32 tx_timeout_count;
 
        /* RX */
-       struct ixgbevf_ring *rx_ring;   /* One per active queue */
+       struct ixgbevf_ring *rx_ring[MAX_TX_QUEUES]; /* One per active queue */
        int num_rx_queues;
        u64 hw_csum_rx_error;
        u64 hw_rx_no_dma_resources;
index a5d31674ff42430a7cbb9107cae8b26749c6bcc6..6cf41207a31d0880dd4c29a709451e5cf2c0cfe4 100644 (file)
@@ -848,8 +848,8 @@ static inline void map_vector_to_rxq(struct ixgbevf_adapter *a, int v_idx,
 {
        struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
 
-       a->rx_ring[r_idx].next = q_vector->rx.ring;
-       q_vector->rx.ring = &a->rx_ring[r_idx];
+       a->rx_ring[r_idx]->next = q_vector->rx.ring;
+       q_vector->rx.ring = a->rx_ring[r_idx];
        q_vector->rx.count++;
 }
 
@@ -858,8 +858,8 @@ static inline void map_vector_to_txq(struct ixgbevf_adapter *a, int v_idx,
 {
        struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
 
-       a->tx_ring[t_idx].next = q_vector->tx.ring;
-       q_vector->tx.ring = &a->tx_ring[t_idx];
+       a->tx_ring[t_idx]->next = q_vector->tx.ring;
+       q_vector->tx.ring = a->tx_ring[t_idx];
        q_vector->tx.count++;
 }
 
@@ -1086,6 +1086,70 @@ static inline void ixgbevf_irq_enable(struct ixgbevf_adapter *adapter)
        IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_enable_mask);
 }
 
+/**
+ * ixgbevf_configure_tx_ring - Configure 82599 VF Tx ring after Reset
+ * @adapter: board private structure
+ * @ring: structure containing ring specific data
+ *
+ * Configure the Tx descriptor ring after a reset.
+ **/
+static void ixgbevf_configure_tx_ring(struct ixgbevf_adapter *adapter,
+                                     struct ixgbevf_ring *ring)
+{
+       struct ixgbe_hw *hw = &adapter->hw;
+       u64 tdba = ring->dma;
+       int wait_loop = 10;
+       u32 txdctl = IXGBE_TXDCTL_ENABLE;
+       u8 reg_idx = ring->reg_idx;
+
+       /* disable queue to avoid issues while updating state */
+       IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
+       IXGBE_WRITE_FLUSH(hw);
+
+       IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(reg_idx), tdba & DMA_BIT_MASK(32));
+       IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(reg_idx), tdba >> 32);
+       IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(reg_idx),
+                       ring->count * sizeof(union ixgbe_adv_tx_desc));
+
+       /* disable head writeback */
+       IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAH(reg_idx), 0);
+       IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAL(reg_idx), 0);
+
+       /* enable relaxed ordering */
+       IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(reg_idx),
+                       (IXGBE_DCA_TXCTRL_DESC_RRO_EN |
+                        IXGBE_DCA_TXCTRL_DATA_RRO_EN));
+
+       /* reset head and tail pointers */
+       IXGBE_WRITE_REG(hw, IXGBE_VFTDH(reg_idx), 0);
+       IXGBE_WRITE_REG(hw, IXGBE_VFTDT(reg_idx), 0);
+       ring->tail = hw->hw_addr + IXGBE_VFTDT(reg_idx);
+
+       /* reset ntu and ntc to place SW in sync with hardwdare */
+       ring->next_to_clean = 0;
+       ring->next_to_use = 0;
+
+       /* In order to avoid issues WTHRESH + PTHRESH should always be equal
+        * to or less than the number of on chip descriptors, which is
+        * currently 40.
+        */
+       txdctl |= (8 << 16);    /* WTHRESH = 8 */
+
+       /* Setting PTHRESH to 32 both improves performance */
+       txdctl |= (1 << 8) |    /* HTHRESH = 1 */
+                 32;          /* PTHRESH = 32 */
+
+       IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), txdctl);
+
+       /* poll to verify queue is enabled */
+       do {
+               usleep_range(1000, 2000);
+               txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(reg_idx));
+       }  while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE));
+       if (!wait_loop)
+               pr_err("Could not enable Tx Queue %d\n", reg_idx);
+}
+
 /**
  * ixgbevf_configure_tx - Configure 82599 VF Transmit Unit after Reset
  * @adapter: board private structure
@@ -1094,32 +1158,11 @@ static inline void ixgbevf_irq_enable(struct ixgbevf_adapter *adapter)
  **/
 static void ixgbevf_configure_tx(struct ixgbevf_adapter *adapter)
 {
-       u64 tdba;
-       struct ixgbe_hw *hw = &adapter->hw;
-       u32 i, j, tdlen, txctrl;
+       u32 i;
 
        /* Setup the HW Tx Head and Tail descriptor pointers */
-       for (i = 0; i < adapter->num_tx_queues; i++) {
-               struct ixgbevf_ring *ring = &adapter->tx_ring[i];
-               j = ring->reg_idx;
-               tdba = ring->dma;
-               tdlen = ring->count * sizeof(union ixgbe_adv_tx_desc);
-               IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(j),
-                               (tdba & DMA_BIT_MASK(32)));
-               IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(j), (tdba >> 32));
-               IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(j), tdlen);
-               IXGBE_WRITE_REG(hw, IXGBE_VFTDH(j), 0);
-               IXGBE_WRITE_REG(hw, IXGBE_VFTDT(j), 0);
-               ring->tail = hw->hw_addr + IXGBE_VFTDT(j);
-               ring->next_to_clean = 0;
-               ring->next_to_use = 0;
-               /* Disable Tx Head Writeback RO bit, since this hoses
-                * bookkeeping if things aren't delivered in order.
-                */
-               txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(j));
-               txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
-               IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(j), txctrl);
-       }
+       for (i = 0; i < adapter->num_tx_queues; i++)
+               ixgbevf_configure_tx_ring(adapter, adapter->tx_ring[i]);
 }
 
 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT        2
@@ -1130,7 +1173,7 @@ static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index)
        struct ixgbe_hw *hw = &adapter->hw;
        u32 srrctl;
 
-       rx_ring = &adapter->rx_ring[index];
+       rx_ring = adapter->rx_ring[index];
 
        srrctl = IXGBE_SRRCTL_DROP_EN;
 
@@ -1188,7 +1231,93 @@ static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter *adapter)
                rx_buf_len = IXGBEVF_RXBUFFER_10K;
 
        for (i = 0; i < adapter->num_rx_queues; i++)
-               adapter->rx_ring[i].rx_buf_len = rx_buf_len;
+               adapter->rx_ring[i]->rx_buf_len = rx_buf_len;
+}
+
+#define IXGBEVF_MAX_RX_DESC_POLL 10
+static void ixgbevf_disable_rx_queue(struct ixgbevf_adapter *adapter,
+                                    struct ixgbevf_ring *ring)
+{
+       struct ixgbe_hw *hw = &adapter->hw;
+       int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
+       u32 rxdctl;
+       u8 reg_idx = ring->reg_idx;
+
+       rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
+       rxdctl &= ~IXGBE_RXDCTL_ENABLE;
+
+       /* write value back with RXDCTL.ENABLE bit cleared */
+       IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl);
+
+       /* the hardware may take up to 100us to really disable the rx queue */
+       do {
+               udelay(10);
+               rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
+       } while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE));
+
+       if (!wait_loop)
+               pr_err("RXDCTL.ENABLE queue %d not cleared while polling\n",
+                      reg_idx);
+}
+
+static void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter,
+                                        struct ixgbevf_ring *ring)
+{
+       struct ixgbe_hw *hw = &adapter->hw;
+       int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
+       u32 rxdctl;
+       u8 reg_idx = ring->reg_idx;
+
+       do {
+               usleep_range(1000, 2000);
+               rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
+       } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE));
+
+       if (!wait_loop)
+               pr_err("RXDCTL.ENABLE queue %d not set while polling\n",
+                      reg_idx);
+}
+
+static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter,
+                                     struct ixgbevf_ring *ring)
+{
+       struct ixgbe_hw *hw = &adapter->hw;
+       u64 rdba = ring->dma;
+       u32 rxdctl;
+       u8 reg_idx = ring->reg_idx;
+
+       /* disable queue to avoid issues while updating state */
+       rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
+       ixgbevf_disable_rx_queue(adapter, ring);
+
+       IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(reg_idx), rdba & DMA_BIT_MASK(32));
+       IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(reg_idx), rdba >> 32);
+       IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(reg_idx),
+                       ring->count * sizeof(union ixgbe_adv_rx_desc));
+
+       /* enable relaxed ordering */
+       IXGBE_WRITE_REG(hw, IXGBE_VFDCA_RXCTRL(reg_idx),
+                       IXGBE_DCA_RXCTRL_DESC_RRO_EN);
+
+       /* reset head and tail pointers */
+       IXGBE_WRITE_REG(hw, IXGBE_VFRDH(reg_idx), 0);
+       IXGBE_WRITE_REG(hw, IXGBE_VFRDT(reg_idx), 0);
+       ring->tail = hw->hw_addr + IXGBE_VFRDT(reg_idx);
+
+       /* reset ntu and ntc to place SW in sync with hardwdare */
+       ring->next_to_clean = 0;
+       ring->next_to_use = 0;
+
+       ixgbevf_configure_srrctl(adapter, reg_idx);
+
+       /* prevent DMA from exceeding buffer space available */
+       rxdctl &= ~IXGBE_RXDCTL_RLPMLMASK;
+       rxdctl |= ring->rx_buf_len | IXGBE_RXDCTL_RLPML_EN;
+       rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
+       IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl);
+
+       ixgbevf_rx_desc_queue_enable(adapter, ring);
+       ixgbevf_alloc_rx_buffers(adapter, ring, ixgbevf_desc_unused(ring));
 }
 
 /**
@@ -1199,10 +1328,7 @@ static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter *adapter)
  **/
 static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
 {
-       u64 rdba;
-       struct ixgbe_hw *hw = &adapter->hw;
-       int i, j;
-       u32 rdlen;
+       int i;
 
        ixgbevf_setup_psrtype(adapter);
 
@@ -1211,23 +1337,8 @@ static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
 
        /* Setup the HW Rx Head and Tail Descriptor Pointers and
         * the Base and Length of the Rx Descriptor Ring */
-       for (i = 0; i < adapter->num_rx_queues; i++) {
-               struct ixgbevf_ring *ring = &adapter->rx_ring[i];
-               rdba = ring->dma;
-               j = ring->reg_idx;
-               rdlen = ring->count * sizeof(union ixgbe_adv_rx_desc);
-               IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(j),
-                               (rdba & DMA_BIT_MASK(32)));
-               IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(j), (rdba >> 32));
-               IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(j), rdlen);
-               IXGBE_WRITE_REG(hw, IXGBE_VFRDH(j), 0);
-               IXGBE_WRITE_REG(hw, IXGBE_VFRDT(j), 0);
-               ring->tail = hw->hw_addr + IXGBE_VFRDT(j);
-               ring->next_to_clean = 0;
-               ring->next_to_use = 0;
-
-               ixgbevf_configure_srrctl(adapter, j);
-       }
+       for (i = 0; i < adapter->num_rx_queues; i++)
+               ixgbevf_configure_rx_ring(adapter, adapter->rx_ring[i]);
 }
 
 static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev,
@@ -1389,7 +1500,7 @@ static int ixgbevf_configure_dcb(struct ixgbevf_adapter *adapter)
 
        if (num_tcs > 1) {
                /* update default Tx ring register index */
-               adapter->tx_ring[0].reg_idx = def_q;
+               adapter->tx_ring[0]->reg_idx = def_q;
 
                /* we need as many queues as traffic classes */
                num_rx_queues = num_tcs;
@@ -1409,69 +1520,14 @@ static int ixgbevf_configure_dcb(struct ixgbevf_adapter *adapter)
 
 static void ixgbevf_configure(struct ixgbevf_adapter *adapter)
 {
-       struct net_device *netdev = adapter->netdev;
-       int i;
-
        ixgbevf_configure_dcb(adapter);
 
-       ixgbevf_set_rx_mode(netdev);
+       ixgbevf_set_rx_mode(adapter->netdev);
 
        ixgbevf_restore_vlan(adapter);
 
        ixgbevf_configure_tx(adapter);
        ixgbevf_configure_rx(adapter);
-       for (i = 0; i < adapter->num_rx_queues; i++) {
-               struct ixgbevf_ring *ring = &adapter->rx_ring[i];
-               ixgbevf_alloc_rx_buffers(adapter, ring,
-                                        ixgbevf_desc_unused(ring));
-       }
-}
-
-#define IXGBEVF_MAX_RX_DESC_POLL 10
-static void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter,
-                                        int rxr)
-{
-       struct ixgbe_hw *hw = &adapter->hw;
-       int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
-       u32 rxdctl;
-       int j = adapter->rx_ring[rxr].reg_idx;
-
-       do {
-               usleep_range(1000, 2000);
-               rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
-       } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE));
-
-       if (!wait_loop)
-               hw_dbg(hw, "RXDCTL.ENABLE queue %d not set while polling\n",
-                      rxr);
-
-       ixgbevf_release_rx_desc(&adapter->rx_ring[rxr],
-                               (adapter->rx_ring[rxr].count - 1));
-}
-
-static void ixgbevf_disable_rx_queue(struct ixgbevf_adapter *adapter,
-                                    struct ixgbevf_ring *ring)
-{
-       struct ixgbe_hw *hw = &adapter->hw;
-       int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
-       u32 rxdctl;
-       u8 reg_idx = ring->reg_idx;
-
-       rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
-       rxdctl &= ~IXGBE_RXDCTL_ENABLE;
-
-       /* write value back with RXDCTL.ENABLE bit cleared */
-       IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl);
-
-       /* the hardware may take up to 100us to really disable the rx queue */
-       do {
-               udelay(10);
-               rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
-       } while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE));
-
-       if (!wait_loop)
-               hw_dbg(hw, "RXDCTL.ENABLE queue %d not cleared while polling\n",
-                      reg_idx);
 }
 
 static void ixgbevf_save_reset_stats(struct ixgbevf_adapter *adapter)
@@ -1536,37 +1592,6 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
 {
        struct net_device *netdev = adapter->netdev;
        struct ixgbe_hw *hw = &adapter->hw;
-       int i, j = 0;
-       int num_rx_rings = adapter->num_rx_queues;
-       u32 txdctl, rxdctl;
-
-       for (i = 0; i < adapter->num_tx_queues; i++) {
-               j = adapter->tx_ring[i].reg_idx;
-               txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
-               /* enable WTHRESH=8 descriptors, to encourage burst writeback */
-               txdctl |= (8 << 16);
-               IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
-       }
-
-       for (i = 0; i < adapter->num_tx_queues; i++) {
-               j = adapter->tx_ring[i].reg_idx;
-               txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
-               txdctl |= IXGBE_TXDCTL_ENABLE;
-               IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
-       }
-
-       for (i = 0; i < num_rx_rings; i++) {
-               j = adapter->rx_ring[i].reg_idx;
-               rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
-               rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
-               if (hw->mac.type == ixgbe_mac_X540_vf) {
-                       rxdctl &= ~IXGBE_RXDCTL_RLPMLMASK;
-                       rxdctl |= ((netdev->mtu + ETH_HLEN + ETH_FCS_LEN) |
-                                  IXGBE_RXDCTL_RLPML_EN);
-               }
-               IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
-               ixgbevf_rx_desc_queue_enable(adapter, i);
-       }
 
        ixgbevf_configure_msix(adapter);
 
@@ -1686,7 +1711,7 @@ static void ixgbevf_clean_all_rx_rings(struct ixgbevf_adapter *adapter)
        int i;
 
        for (i = 0; i < adapter->num_rx_queues; i++)
-               ixgbevf_clean_rx_ring(adapter, &adapter->rx_ring[i]);
+               ixgbevf_clean_rx_ring(adapter, adapter->rx_ring[i]);
 }
 
 /**
@@ -1698,22 +1723,21 @@ static void ixgbevf_clean_all_tx_rings(struct ixgbevf_adapter *adapter)
        int i;
 
        for (i = 0; i < adapter->num_tx_queues; i++)
-               ixgbevf_clean_tx_ring(adapter, &adapter->tx_ring[i]);
+               ixgbevf_clean_tx_ring(adapter, adapter->tx_ring[i]);
 }
 
 void ixgbevf_down(struct ixgbevf_adapter *adapter)
 {
        struct net_device *netdev = adapter->netdev;
        struct ixgbe_hw *hw = &adapter->hw;
-       u32 txdctl;
-       int i, j;
+       int i;
 
        /* signal that we are down to the interrupt handler */
        set_bit(__IXGBEVF_DOWN, &adapter->state);
 
        /* disable all enabled rx queues */
        for (i = 0; i < adapter->num_rx_queues; i++)
-               ixgbevf_disable_rx_queue(adapter, &adapter->rx_ring[i]);
+               ixgbevf_disable_rx_queue(adapter, adapter->rx_ring[i]);
 
        netif_tx_disable(netdev);
 
@@ -1734,10 +1758,10 @@ void ixgbevf_down(struct ixgbevf_adapter *adapter)
 
        /* disable transmits in the hardware now that interrupts are off */
        for (i = 0; i < adapter->num_tx_queues; i++) {
-               j = adapter->tx_ring[i].reg_idx;
-               txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
-               IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j),
-                               (txdctl & ~IXGBE_TXDCTL_ENABLE));
+               u8 reg_idx = adapter->tx_ring[i]->reg_idx;
+
+               IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx),
+                               IXGBE_TXDCTL_SWFLSH);
        }
 
        netif_carrier_off(netdev);
@@ -1875,40 +1899,50 @@ static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
  **/
 static int ixgbevf_alloc_queues(struct ixgbevf_adapter *adapter)
 {
-       int i;
+       struct ixgbevf_ring *ring;
+       int rx = 0, tx = 0;
 
-       adapter->tx_ring = kcalloc(adapter->num_tx_queues,
-                                  sizeof(struct ixgbevf_ring), GFP_KERNEL);
-       if (!adapter->tx_ring)
-               goto err_tx_ring_allocation;
+       for (; tx < adapter->num_tx_queues; tx++) {
+               ring = kzalloc(sizeof(*ring), GFP_KERNEL);
+               if (!ring)
+                       goto err_allocation;
 
-       adapter->rx_ring = kcalloc(adapter->num_rx_queues,
-                                  sizeof(struct ixgbevf_ring), GFP_KERNEL);
-       if (!adapter->rx_ring)
-               goto err_rx_ring_allocation;
+               ring->dev = &adapter->pdev->dev;
+               ring->netdev = adapter->netdev;
+               ring->count = adapter->tx_ring_count;
+               ring->queue_index = tx;
+               ring->reg_idx = tx;
 
-       for (i = 0; i < adapter->num_tx_queues; i++) {
-               adapter->tx_ring[i].count = adapter->tx_ring_count;
-               adapter->tx_ring[i].queue_index = i;
-               /* reg_idx may be remapped later by DCB config */
-               adapter->tx_ring[i].reg_idx = i;
-               adapter->tx_ring[i].dev = &adapter->pdev->dev;
-               adapter->tx_ring[i].netdev = adapter->netdev;
+               adapter->tx_ring[tx] = ring;
        }
 
-       for (i = 0; i < adapter->num_rx_queues; i++) {
-               adapter->rx_ring[i].count = adapter->rx_ring_count;
-               adapter->rx_ring[i].queue_index = i;
-               adapter->rx_ring[i].reg_idx = i;
-               adapter->rx_ring[i].dev = &adapter->pdev->dev;
-               adapter->rx_ring[i].netdev = adapter->netdev;
+       for (; rx < adapter->num_rx_queues; rx++) {
+               ring = kzalloc(sizeof(*ring), GFP_KERNEL);
+               if (!ring)
+                       goto err_allocation;
+
+               ring->dev = &adapter->pdev->dev;
+               ring->netdev = adapter->netdev;
+
+               ring->count = adapter->rx_ring_count;
+               ring->queue_index = rx;
+               ring->reg_idx = rx;
+
+               adapter->rx_ring[rx] = ring;
        }
 
        return 0;
 
-err_rx_ring_allocation:
-       kfree(adapter->tx_ring);
-err_tx_ring_allocation:
+err_allocation:
+       while (tx) {
+               kfree(adapter->tx_ring[--tx]);
+               adapter->tx_ring[tx] = NULL;
+       }
+
+       while (rx) {
+               kfree(adapter->rx_ring[--rx]);
+               adapter->rx_ring[rx] = NULL;
+       }
        return -ENOMEM;
 }
 
@@ -2099,6 +2133,17 @@ err_set_interrupt:
  **/
 static void ixgbevf_clear_interrupt_scheme(struct ixgbevf_adapter *adapter)
 {
+       int i;
+
+       for (i = 0; i < adapter->num_tx_queues; i++) {
+               kfree(adapter->tx_ring[i]);
+               adapter->tx_ring[i] = NULL;
+       }
+       for (i = 0; i < adapter->num_rx_queues; i++) {
+               kfree(adapter->rx_ring[i]);
+               adapter->rx_ring[i] = NULL;
+       }
+
        adapter->num_tx_queues = 0;
        adapter->num_rx_queues = 0;
 
@@ -2229,11 +2274,11 @@ void ixgbevf_update_stats(struct ixgbevf_adapter *adapter)
 
        for (i = 0;  i  < adapter->num_rx_queues;  i++) {
                adapter->hw_csum_rx_error +=
-                       adapter->rx_ring[i].hw_csum_rx_error;
+                       adapter->rx_ring[i]->hw_csum_rx_error;
                adapter->hw_csum_rx_good +=
-                       adapter->rx_ring[i].hw_csum_rx_good;
-               adapter->rx_ring[i].hw_csum_rx_error = 0;
-               adapter->rx_ring[i].hw_csum_rx_good = 0;
+                       adapter->rx_ring[i]->hw_csum_rx_good;
+               adapter->rx_ring[i]->hw_csum_rx_error = 0;
+               adapter->rx_ring[i]->hw_csum_rx_good = 0;
        }
 }
 
@@ -2396,6 +2441,10 @@ void ixgbevf_free_tx_resources(struct ixgbevf_adapter *adapter,
        vfree(tx_ring->tx_buffer_info);
        tx_ring->tx_buffer_info = NULL;
 
+       /* if not set, then don't free */
+       if (!tx_ring->desc)
+               return;
+
        dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
                          tx_ring->dma);
 
@@ -2413,10 +2462,8 @@ static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter *adapter)
        int i;
 
        for (i = 0; i < adapter->num_tx_queues; i++)
-               if (adapter->tx_ring[i].desc)
-                       ixgbevf_free_tx_resources(adapter,
-                                                 &adapter->tx_ring[i]);
-
+               if (adapter->tx_ring[i]->desc)
+                       ixgbevf_free_tx_resources(adapter, adapter->tx_ring[i]);
 }
 
 /**
@@ -2471,7 +2518,7 @@ static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter)
        int i, err = 0;
 
        for (i = 0; i < adapter->num_tx_queues; i++) {
-               err = ixgbevf_setup_tx_resources(adapter, &adapter->tx_ring[i]);
+               err = ixgbevf_setup_tx_resources(adapter, adapter->tx_ring[i]);
                if (!err)
                        continue;
                hw_dbg(&adapter->hw,
@@ -2533,7 +2580,7 @@ static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter *adapter)
        int i, err = 0;
 
        for (i = 0; i < adapter->num_rx_queues; i++) {
-               err = ixgbevf_setup_rx_resources(adapter, &adapter->rx_ring[i]);
+               err = ixgbevf_setup_rx_resources(adapter, adapter->rx_ring[i]);
                if (!err)
                        continue;
                hw_dbg(&adapter->hw,
@@ -2577,9 +2624,8 @@ static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter)
        int i;
 
        for (i = 0; i < adapter->num_rx_queues; i++)
-               if (adapter->rx_ring[i].desc)
-                       ixgbevf_free_rx_resources(adapter,
-                                                 &adapter->rx_ring[i]);
+               if (adapter->rx_ring[i]->desc)
+                       ixgbevf_free_rx_resources(adapter, adapter->rx_ring[i]);
 }
 
 /**
@@ -3069,7 +3115,7 @@ static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
                return NETDEV_TX_OK;
        }
 
-       tx_ring = &adapter->tx_ring[r_idx];
+       tx_ring = adapter->tx_ring[r_idx];
 
        /*
         * need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD,
@@ -3222,8 +3268,8 @@ static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state)
 #ifdef CONFIG_PM
 static int ixgbevf_resume(struct pci_dev *pdev)
 {
-       struct ixgbevf_adapter *adapter = pci_get_drvdata(pdev);
-       struct net_device *netdev = adapter->netdev;
+       struct net_device *netdev = pci_get_drvdata(pdev);
+       struct ixgbevf_adapter *adapter = netdev_priv(netdev);
        u32 err;
 
        pci_set_power_state(pdev, PCI_D0);
@@ -3282,7 +3328,7 @@ static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev,
        stats->multicast = adapter->stats.vfmprc - adapter->stats.base_vfmprc;
 
        for (i = 0; i < adapter->num_rx_queues; i++) {
-               ring = &adapter->rx_ring[i];
+               ring = adapter->rx_ring[i];
                do {
                        start = u64_stats_fetch_begin_bh(&ring->syncp);
                        bytes = ring->total_bytes;
@@ -3293,7 +3339,7 @@ static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev,
        }
 
        for (i = 0; i < adapter->num_tx_queues; i++) {
-               ring = &adapter->tx_ring[i];
+               ring = adapter->tx_ring[i];
                do {
                        start = u64_stats_fetch_begin_bh(&ring->syncp);
                        bytes = ring->total_bytes;
@@ -3528,9 +3574,6 @@ static void ixgbevf_remove(struct pci_dev *pdev)
 
        hw_dbg(&adapter->hw, "Remove complete\n");
 
-       kfree(adapter->tx_ring);
-       kfree(adapter->rx_ring);
-
        free_netdev(netdev);
 
        pci_disable_device(pdev);