]> Pileus Git - ~andy/linux/blobdiff - drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
ixgbevf: Reduce size of maximum rx buffer
[~andy/linux] / drivers / net / ethernet / intel / ixgbevf / ixgbevf_main.c
index de1ad506665d6a33c223c8a73fbada54f39df832..9d88153023633c5426b9b1841b269d7738f98c34 100644 (file)
@@ -58,7 +58,7 @@ const char ixgbevf_driver_name[] = "ixgbevf";
 static const char ixgbevf_driver_string[] =
        "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver";
 
-#define DRV_VERSION "2.6.0-k"
+#define DRV_VERSION "2.7.12-k"
 const char ixgbevf_driver_version[] = DRV_VERSION;
 static char ixgbevf_copyright[] =
        "Copyright (c) 2009 - 2012 Intel Corporation.";
@@ -99,6 +99,7 @@ MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
 
 /* forward decls */
 static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector);
+static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter);
 
 static inline void ixgbevf_release_rx_desc(struct ixgbe_hw *hw,
                                           struct ixgbevf_ring *rx_ring,
@@ -287,7 +288,10 @@ static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector,
        if (is_vlan && test_bit(tag & VLAN_VID_MASK, adapter->active_vlans))
                __vlan_hwaccel_put_tag(skb, tag);
 
-       napi_gro_receive(&q_vector->napi, skb);
+       if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL))
+               napi_gro_receive(&q_vector->napi, skb);
+       else
+               netif_rx(skb);
 }
 
 /**
@@ -358,6 +362,12 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter,
                        bi->dma = dma_map_single(&pdev->dev, skb->data,
                                                 rx_ring->rx_buf_len,
                                                 DMA_FROM_DEVICE);
+                       if (dma_mapping_error(&pdev->dev, bi->dma)) {
+                               dev_kfree_skb(skb);
+                               bi->skb = NULL;
+                               dev_err(&pdev->dev, "RX DMA map failed\n");
+                               break;
+                       }
                }
                rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
 
@@ -471,6 +481,16 @@ static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
                }
                skb->protocol = eth_type_trans(skb, rx_ring->netdev);
 
+               /* Workaround hardware that can't do proper VEPA multicast
+                * source pruning.
+                */
+               if ((skb->pkt_type & (PACKET_BROADCAST | PACKET_MULTICAST)) &&
+                   !(compare_ether_addr(adapter->netdev->dev_addr,
+                                       eth_hdr(skb)->h_source))) {
+                       dev_kfree_skb_irq(skb);
+                       goto next_desc;
+               }
+
                ixgbevf_receive_skb(q_vector, skb, staterr, rx_desc);
 
 next_desc:
@@ -533,9 +553,11 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget)
        else
                per_ring_budget = budget;
 
+       adapter->flags |= IXGBE_FLAG_IN_NETPOLL;
        ixgbevf_for_each_ring(ring, q_vector->rx)
                clean_complete &= ixgbevf_clean_rx_irq(q_vector, ring,
                                                       per_ring_budget);
+       adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL;
 
        /* If all work not completed, return budget and keep polling */
        if (!clean_complete)
@@ -1065,20 +1087,20 @@ static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter *adapter)
        max_frame += VLAN_HLEN;
 
        /*
-        * Make best use of allocation by using all but 1K of a
-        * power of 2 allocation that will be used for skb->head.
+        * Allocate buffer sizes that fit well into 32K and
+        * take into account max frame size of 9.5K
         */
        if ((hw->mac.type == ixgbe_mac_X540_vf) &&
            (max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE))
                rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
-       else if (max_frame <= IXGBEVF_RXBUFFER_3K)
-               rx_buf_len = IXGBEVF_RXBUFFER_3K;
-       else if (max_frame <= IXGBEVF_RXBUFFER_7K)
-               rx_buf_len = IXGBEVF_RXBUFFER_7K;
-       else if (max_frame <= IXGBEVF_RXBUFFER_15K)
-               rx_buf_len = IXGBEVF_RXBUFFER_15K;
+       else if (max_frame <= IXGBEVF_RXBUFFER_2K)
+               rx_buf_len = IXGBEVF_RXBUFFER_2K;
+       else if (max_frame <= IXGBEVF_RXBUFFER_4K)
+               rx_buf_len = IXGBEVF_RXBUFFER_4K;
+       else if (max_frame <= IXGBEVF_RXBUFFER_8K)
+               rx_buf_len = IXGBEVF_RXBUFFER_8K;
        else
-               rx_buf_len = IXGBEVF_MAX_RXBUFFER;
+               rx_buf_len = IXGBEVF_RXBUFFER_10K;
 
        for (i = 0; i < adapter->num_rx_queues; i++)
                adapter->rx_ring[i].rx_buf_len = rx_buf_len;
@@ -1131,12 +1153,12 @@ static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
        if (!hw->mac.ops.set_vfta)
                return -EOPNOTSUPP;
 
-       spin_lock(&adapter->mbx_lock);
+       spin_lock_bh(&adapter->mbx_lock);
 
        /* add VID to filter table */
        err = hw->mac.ops.set_vfta(hw, vid, 0, true);
 
-       spin_unlock(&adapter->mbx_lock);
+       spin_unlock_bh(&adapter->mbx_lock);
 
        /* translate error return types so error makes sense */
        if (err == IXGBE_ERR_MBX)
@@ -1156,13 +1178,13 @@ static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
        struct ixgbe_hw *hw = &adapter->hw;
        int err = -EOPNOTSUPP;
 
-       spin_lock(&adapter->mbx_lock);
+       spin_lock_bh(&adapter->mbx_lock);
 
        /* remove VID from filter table */
        if (hw->mac.ops.set_vfta)
                err = hw->mac.ops.set_vfta(hw, vid, 0, false);
 
-       spin_unlock(&adapter->mbx_lock);
+       spin_unlock_bh(&adapter->mbx_lock);
 
        clear_bit(vid, adapter->active_vlans);
 
@@ -1218,7 +1240,7 @@ static void ixgbevf_set_rx_mode(struct net_device *netdev)
        struct ixgbevf_adapter *adapter = netdev_priv(netdev);
        struct ixgbe_hw *hw = &adapter->hw;
 
-       spin_lock(&adapter->mbx_lock);
+       spin_lock_bh(&adapter->mbx_lock);
 
        /* reprogram multicast list */
        if (hw->mac.ops.update_mc_addr_list)
@@ -1226,7 +1248,7 @@ static void ixgbevf_set_rx_mode(struct net_device *netdev)
 
        ixgbevf_write_uc_addr_list(netdev);
 
-       spin_unlock(&adapter->mbx_lock);
+       spin_unlock_bh(&adapter->mbx_lock);
 }
 
 static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter)
@@ -1335,11 +1357,12 @@ static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter)
 static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter)
 {
        struct ixgbe_hw *hw = &adapter->hw;
-       int api[] = { ixgbe_mbox_api_10,
+       int api[] = { ixgbe_mbox_api_11,
+                     ixgbe_mbox_api_10,
                      ixgbe_mbox_api_unknown };
        int err = 0, idx = 0;
 
-       spin_lock(&adapter->mbx_lock);
+       spin_lock_bh(&adapter->mbx_lock);
 
        while (api[idx] != ixgbe_mbox_api_unknown) {
                err = ixgbevf_negotiate_api_version(hw, api[idx]);
@@ -1348,7 +1371,7 @@ static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter)
                idx++;
        }
 
-       spin_unlock(&adapter->mbx_lock);
+       spin_unlock_bh(&adapter->mbx_lock);
 }
 
 static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
@@ -1389,7 +1412,7 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
 
        ixgbevf_configure_msix(adapter);
 
-       spin_lock(&adapter->mbx_lock);
+       spin_lock_bh(&adapter->mbx_lock);
 
        if (hw->mac.ops.set_rar) {
                if (is_valid_ether_addr(hw->mac.addr))
@@ -1398,7 +1421,7 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
                        hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0);
        }
 
-       spin_unlock(&adapter->mbx_lock);
+       spin_unlock_bh(&adapter->mbx_lock);
 
        clear_bit(__IXGBEVF_DOWN, &adapter->state);
        ixgbevf_napi_enable_all(adapter);
@@ -1413,12 +1436,87 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
        mod_timer(&adapter->watchdog_timer, jiffies);
 }
 
+static int ixgbevf_reset_queues(struct ixgbevf_adapter *adapter)
+{
+       struct ixgbe_hw *hw = &adapter->hw;
+       struct ixgbevf_ring *rx_ring;
+       unsigned int def_q = 0;
+       unsigned int num_tcs = 0;
+       unsigned int num_rx_queues = 1;
+       int err, i;
+
+       spin_lock_bh(&adapter->mbx_lock);
+
+       /* fetch queue configuration from the PF */
+       err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
+
+       spin_unlock_bh(&adapter->mbx_lock);
+
+       if (err)
+               return err;
+
+       if (num_tcs > 1) {
+               /* update default Tx ring register index */
+               adapter->tx_ring[0].reg_idx = def_q;
+
+               /* we need as many queues as traffic classes */
+               num_rx_queues = num_tcs;
+       }
+
+       /* nothing to do if we have the correct number of queues */
+       if (adapter->num_rx_queues == num_rx_queues)
+               return 0;
+
+       /* allocate new rings */
+       rx_ring = kcalloc(num_rx_queues,
+                         sizeof(struct ixgbevf_ring), GFP_KERNEL);
+       if (!rx_ring)
+               return -ENOMEM;
+
+       /* setup ring fields */
+       for (i = 0; i < num_rx_queues; i++) {
+               rx_ring[i].count = adapter->rx_ring_count;
+               rx_ring[i].queue_index = i;
+               rx_ring[i].reg_idx = i;
+               rx_ring[i].dev = &adapter->pdev->dev;
+               rx_ring[i].netdev = adapter->netdev;
+
+               /* allocate resources on the ring */
+               err = ixgbevf_setup_rx_resources(adapter, &rx_ring[i]);
+               if (err) {
+                       while (i) {
+                               i--;
+                               ixgbevf_free_rx_resources(adapter, &rx_ring[i]);
+                       }
+                       kfree(rx_ring);
+                       return err;
+               }
+       }
+
+       /* free the existing rings and queues */
+       ixgbevf_free_all_rx_resources(adapter);
+       adapter->num_rx_queues = 0;
+       kfree(adapter->rx_ring);
+
+       /* move new rings into position on the adapter struct */
+       adapter->rx_ring = rx_ring;
+       adapter->num_rx_queues = num_rx_queues;
+
+       /* reset ring to vector mapping */
+       ixgbevf_reset_q_vectors(adapter);
+       ixgbevf_map_rings_to_vectors(adapter);
+
+       return 0;
+}
+
 void ixgbevf_up(struct ixgbevf_adapter *adapter)
 {
        struct ixgbe_hw *hw = &adapter->hw;
 
        ixgbevf_negotiate_api(adapter);
 
+       ixgbevf_reset_queues(adapter);
+
        ixgbevf_configure(adapter);
 
        ixgbevf_up_complete(adapter);
@@ -1611,14 +1709,14 @@ void ixgbevf_reset(struct ixgbevf_adapter *adapter)
        struct ixgbe_hw *hw = &adapter->hw;
        struct net_device *netdev = adapter->netdev;
 
-       spin_lock(&adapter->mbx_lock);
+       spin_lock_bh(&adapter->mbx_lock);
 
        if (hw->mac.ops.reset_hw(hw))
                hw_dbg(hw, "PF still resetting\n");
        else
                hw->mac.ops.init_hw(hw);
 
-       spin_unlock(&adapter->mbx_lock);
+       spin_unlock_bh(&adapter->mbx_lock);
 
        if (is_valid_ether_addr(adapter->hw.mac.addr)) {
                memcpy(netdev->dev_addr, adapter->hw.mac.addr,
@@ -1628,10 +1726,11 @@ void ixgbevf_reset(struct ixgbevf_adapter *adapter)
        }
 }
 
-static void ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
-                                        int vectors)
+static int ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
+                                       int vectors)
 {
-       int err, vector_threshold;
+       int err = 0;
+       int vector_threshold;
 
        /* We'll want at least 2 (vector_threshold):
         * 1) TxQ[0] + RxQ[0] handler
@@ -1647,21 +1746,18 @@ static void ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
        while (vectors >= vector_threshold) {
                err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
                                      vectors);
-               if (!err) /* Success in acquiring all requested vectors. */
+               if (!err || err < 0) /* Success or a nasty failure. */
                        break;
-               else if (err < 0)
-                       vectors = 0; /* Nasty failure, quit now */
                else /* err == number of vectors we should try again with */
                        vectors = err;
        }
 
-       if (vectors < vector_threshold) {
-               /* Can't allocate enough MSI-X interrupts?  Oh well.
-                * This just means we'll go with either a single MSI
-                * vector or fall back to legacy interrupts.
-                */
-               hw_dbg(&adapter->hw,
-                      "Unable to allocate MSI-X interrupts\n");
+       if (vectors < vector_threshold)
+               err = -ENOMEM;
+
+       if (err) {
+               dev_err(&adapter->pdev->dev,
+                       "Unable to allocate MSI-X interrupts\n");
                kfree(adapter->msix_entries);
                adapter->msix_entries = NULL;
        } else {
@@ -1672,6 +1768,7 @@ static void ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
                 */
                adapter->num_msix_vectors = vectors;
        }
+       return err;
 }
 
 /**
@@ -1717,6 +1814,7 @@ static int ixgbevf_alloc_queues(struct ixgbevf_adapter *adapter)
        for (i = 0; i < adapter->num_tx_queues; i++) {
                adapter->tx_ring[i].count = adapter->tx_ring_count;
                adapter->tx_ring[i].queue_index = i;
+               /* reg_idx may be remapped later by DCB config */
                adapter->tx_ring[i].reg_idx = i;
                adapter->tx_ring[i].dev = &adapter->pdev->dev;
                adapter->tx_ring[i].netdev = adapter->netdev;
@@ -1774,7 +1872,9 @@ static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter)
        for (vector = 0; vector < v_budget; vector++)
                adapter->msix_entries[vector].entry = vector;
 
-       ixgbevf_acquire_msix_vectors(adapter, v_budget);
+       err = ixgbevf_acquire_msix_vectors(adapter, v_budget);
+       if (err)
+               goto out;
 
        err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues);
        if (err)
@@ -1834,18 +1934,13 @@ err_out:
  **/
 static void ixgbevf_free_q_vectors(struct ixgbevf_adapter *adapter)
 {
-       int q_idx, num_q_vectors;
-       int napi_vectors;
-
-       num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
-       napi_vectors = adapter->num_rx_queues;
+       int q_idx, num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
 
        for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
                struct ixgbevf_q_vector *q_vector = adapter->q_vector[q_idx];
 
                adapter->q_vector[q_idx] = NULL;
-               if (q_idx < napi_vectors)
-                       netif_napi_del(&q_vector->napi);
+               netif_napi_del(&q_vector->napi);
                kfree(q_vector);
        }
 }
@@ -1950,8 +2045,11 @@ static int __devinit ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
        hw->subsystem_device_id = pdev->subsystem_device;
 
        hw->mbx.ops.init_params(hw);
-       hw->mac.max_tx_queues = MAX_TX_QUEUES;
-       hw->mac.max_rx_queues = MAX_RX_QUEUES;
+
+       /* assume legacy case in which PF would only give VF 2 queues */
+       hw->mac.max_tx_queues = 2;
+       hw->mac.max_rx_queues = 2;
+
        err = hw->mac.ops.reset_hw(hw);
        if (err) {
                dev_info(&pdev->dev,
@@ -2113,12 +2211,12 @@ static void ixgbevf_watchdog_task(struct work_struct *work)
        if (hw->mac.ops.check_link) {
                s32 need_reset;
 
-               spin_lock(&adapter->mbx_lock);
+               spin_lock_bh(&adapter->mbx_lock);
 
                need_reset = hw->mac.ops.check_link(hw, &link_speed,
                                                    &link_up, false);
 
-               spin_unlock(&adapter->mbx_lock);
+               spin_unlock_bh(&adapter->mbx_lock);
 
                if (need_reset) {
                        adapter->link_up = link_up;
@@ -2377,6 +2475,63 @@ static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter)
                                                  &adapter->rx_ring[i]);
 }
 
+static int ixgbevf_setup_queues(struct ixgbevf_adapter *adapter)
+{
+       struct ixgbe_hw *hw = &adapter->hw;
+       struct ixgbevf_ring *rx_ring;
+       unsigned int def_q = 0;
+       unsigned int num_tcs = 0;
+       unsigned int num_rx_queues = 1;
+       int err, i;
+
+       spin_lock_bh(&adapter->mbx_lock);
+
+       /* fetch queue configuration from the PF */
+       err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
+
+       spin_unlock_bh(&adapter->mbx_lock);
+
+       if (err)
+               return err;
+
+       if (num_tcs > 1) {
+               /* update default Tx ring register index */
+               adapter->tx_ring[0].reg_idx = def_q;
+
+               /* we need as many queues as traffic classes */
+               num_rx_queues = num_tcs;
+       }
+
+       /* nothing to do if we have the correct number of queues */
+       if (adapter->num_rx_queues == num_rx_queues)
+               return 0;
+
+       /* allocate new rings */
+       rx_ring = kcalloc(num_rx_queues,
+                         sizeof(struct ixgbevf_ring), GFP_KERNEL);
+       if (!rx_ring)
+               return -ENOMEM;
+
+       /* setup ring fields */
+       for (i = 0; i < num_rx_queues; i++) {
+               rx_ring[i].count = adapter->rx_ring_count;
+               rx_ring[i].queue_index = i;
+               rx_ring[i].reg_idx = i;
+               rx_ring[i].dev = &adapter->pdev->dev;
+               rx_ring[i].netdev = adapter->netdev;
+       }
+
+       /* free the existing ring and queues */
+       adapter->num_rx_queues = 0;
+       kfree(adapter->rx_ring);
+
+       /* move new rings into position on the adapter struct */
+       adapter->rx_ring = rx_ring;
+       adapter->num_rx_queues = num_rx_queues;
+
+       return 0;
+}
+
 /**
  * ixgbevf_open - Called when a network interface is made active
  * @netdev: network interface device structure
@@ -2413,6 +2568,11 @@ static int ixgbevf_open(struct net_device *netdev)
 
        ixgbevf_negotiate_api(adapter);
 
+       /* setup queue reg_idx and Rx queue count */
+       err = ixgbevf_setup_queues(adapter);
+       if (err)
+               goto err_setup_queues;
+
        /* allocate transmit descriptors */
        err = ixgbevf_setup_all_tx_resources(adapter);
        if (err)
@@ -2451,6 +2611,7 @@ err_setup_rx:
        ixgbevf_free_all_rx_resources(adapter);
 err_setup_tx:
        ixgbevf_free_all_tx_resources(adapter);
+err_setup_queues:
        ixgbevf_reset(adapter);
 
 err_setup_reset:
@@ -2678,10 +2839,10 @@ static int ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
                        tx_buffer_info->dma =
                                skb_frag_dma_map(tx_ring->dev, frag,
                                                 offset, size, DMA_TO_DEVICE);
-                       tx_buffer_info->mapped_as_page = true;
                        if (dma_mapping_error(tx_ring->dev,
                                              tx_buffer_info->dma))
                                goto dma_error;
+                       tx_buffer_info->mapped_as_page = true;
                        tx_buffer_info->next_to_watch = i;
 
                        len -= size;
@@ -2823,6 +2984,11 @@ static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
 #if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
        unsigned short f;
 #endif
+       u8 *dst_mac = skb_header_pointer(skb, 0, 0, NULL);
+       if (!dst_mac || is_link_local_ether_addr(dst_mac)) {
+               dev_kfree_skb(skb);
+               return NETDEV_TX_OK;
+       }
 
        tx_ring = &adapter->tx_ring[r_idx];
 
@@ -2902,12 +3068,12 @@ static int ixgbevf_set_mac(struct net_device *netdev, void *p)
        memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
        memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
 
-       spin_lock(&adapter->mbx_lock);
+       spin_lock_bh(&adapter->mbx_lock);
 
        if (hw->mac.ops.set_rar)
                hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
 
-       spin_unlock(&adapter->mbx_lock);
+       spin_unlock_bh(&adapter->mbx_lock);
 
        return 0;
 }
@@ -2925,8 +3091,15 @@ static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
        int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
        int max_possible_frame = MAXIMUM_ETHERNET_VLAN_SIZE;
 
-       if (adapter->hw.mac.type == ixgbe_mac_X540_vf)
+       switch (adapter->hw.api_version) {
+       case ixgbe_mbox_api_11:
                max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE;
+               break;
+       default:
+               if (adapter->hw.mac.type == ixgbe_mac_X540_vf)
+                       max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE;
+               break;
+       }
 
        /* MTU < 68 is an error and causes problems on some kernels */
        if ((new_mtu < 68) || (max_frame > max_possible_frame))