1 /*******************************************************************************
3 Intel 82599 Virtual Function driver
4 Copyright(c) 1999 - 2012 Intel Corporation.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 *******************************************************************************/
29 /******************************************************************************
30 Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code
31 ******************************************************************************/
33 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
35 #include <linux/types.h>
36 #include <linux/bitops.h>
37 #include <linux/module.h>
38 #include <linux/pci.h>
39 #include <linux/netdevice.h>
40 #include <linux/vmalloc.h>
41 #include <linux/string.h>
44 #include <linux/tcp.h>
45 #include <linux/sctp.h>
46 #include <linux/ipv6.h>
47 #include <linux/slab.h>
48 #include <net/checksum.h>
49 #include <net/ip6_checksum.h>
50 #include <linux/ethtool.h>
52 #include <linux/if_vlan.h>
53 #include <linux/prefetch.h>
57 const char ixgbevf_driver_name[] = "ixgbevf";
58 static const char ixgbevf_driver_string[] =
59 "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver";
61 #define DRV_VERSION "2.11.3-k"
62 const char ixgbevf_driver_version[] = DRV_VERSION;
63 static char ixgbevf_copyright[] =
64 "Copyright (c) 2009 - 2012 Intel Corporation.";
66 static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
67 [board_82599_vf] = &ixgbevf_82599_vf_info,
68 [board_X540_vf] = &ixgbevf_X540_vf_info,
71 /* ixgbevf_pci_tbl - PCI Device ID Table
73 * Wildcard entries (PCI_ANY_ID) should come last
74 * Last entry must be all 0s
76 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
77 * Class, Class Mask, private data (not used) }
79 static DEFINE_PCI_DEVICE_TABLE(ixgbevf_pci_tbl) = {
80 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF), board_82599_vf },
81 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF), board_X540_vf },
82 /* required last entry */
85 MODULE_DEVICE_TABLE(pci, ixgbevf_pci_tbl);
87 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
88 MODULE_DESCRIPTION("Intel(R) 82599 Virtual Function Driver");
89 MODULE_LICENSE("GPL");
90 MODULE_VERSION(DRV_VERSION);
92 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
93 static int debug = -1;
94 module_param(debug, int, 0);
95 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
98 static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector);
99 static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter);
101 static inline void ixgbevf_release_rx_desc(struct ixgbe_hw *hw,
102 struct ixgbevf_ring *rx_ring,
106 * Force memory writes to complete before letting h/w
107 * know there are new descriptors to fetch. (Only
108 * applicable for weak-ordered memory model archs,
112 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rx_ring->reg_idx), val);
116 * ixgbevf_set_ivar - set IVAR registers - maps interrupt causes to vectors
117 * @adapter: pointer to adapter struct
118 * @direction: 0 for Rx, 1 for Tx, -1 for other causes
119 * @queue: queue to map the corresponding interrupt to
120 * @msix_vector: the vector to map to the corresponding queue
122 static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction,
123 u8 queue, u8 msix_vector)
126 struct ixgbe_hw *hw = &adapter->hw;
127 if (direction == -1) {
129 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
130 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
133 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
135 /* tx or rx causes */
136 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
137 index = ((16 * (queue & 1)) + (8 * direction));
138 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1));
139 ivar &= ~(0xFF << index);
140 ivar |= (msix_vector << index);
141 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), ivar);
145 static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_ring *tx_ring,
146 struct ixgbevf_tx_buffer
149 if (tx_buffer_info->dma) {
150 if (tx_buffer_info->mapped_as_page)
151 dma_unmap_page(tx_ring->dev,
153 tx_buffer_info->length,
156 dma_unmap_single(tx_ring->dev,
158 tx_buffer_info->length,
160 tx_buffer_info->dma = 0;
162 if (tx_buffer_info->skb) {
163 dev_kfree_skb_any(tx_buffer_info->skb);
164 tx_buffer_info->skb = NULL;
166 tx_buffer_info->time_stamp = 0;
167 /* tx_buffer_info must be completely set up in the transmit path */
170 #define IXGBE_MAX_TXD_PWR 14
171 #define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
173 /* Tx Descriptors needed, worst case */
174 #define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD)
175 #define DESC_NEEDED (MAX_SKB_FRAGS + 4)
177 static void ixgbevf_tx_timeout(struct net_device *netdev);
180 * ixgbevf_clean_tx_irq - Reclaim resources after transmit completes
181 * @q_vector: board private structure
182 * @tx_ring: tx ring to clean
184 static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
185 struct ixgbevf_ring *tx_ring)
187 struct ixgbevf_adapter *adapter = q_vector->adapter;
188 union ixgbe_adv_tx_desc *tx_desc, *eop_desc;
189 struct ixgbevf_tx_buffer *tx_buffer_info;
190 unsigned int i, count = 0;
191 unsigned int total_bytes = 0, total_packets = 0;
193 if (test_bit(__IXGBEVF_DOWN, &adapter->state))
196 i = tx_ring->next_to_clean;
197 tx_buffer_info = &tx_ring->tx_buffer_info[i];
198 eop_desc = tx_buffer_info->next_to_watch;
201 bool cleaned = false;
203 /* if next_to_watch is not set then there is no work pending */
207 /* prevent any other reads prior to eop_desc */
208 read_barrier_depends();
210 /* if DD is not set pending work has not been completed */
211 if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
214 /* clear next_to_watch to prevent false hangs */
215 tx_buffer_info->next_to_watch = NULL;
217 for ( ; !cleaned; count++) {
219 tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
220 cleaned = (tx_desc == eop_desc);
221 skb = tx_buffer_info->skb;
223 if (cleaned && skb) {
224 unsigned int segs, bytecount;
226 /* gso_segs is currently only valid for tcp */
227 segs = skb_shinfo(skb)->gso_segs ?: 1;
228 /* multiply data chunks by size of headers */
229 bytecount = ((segs - 1) * skb_headlen(skb)) +
231 total_packets += segs;
232 total_bytes += bytecount;
235 ixgbevf_unmap_and_free_tx_resource(tx_ring,
238 tx_desc->wb.status = 0;
241 if (i == tx_ring->count)
244 tx_buffer_info = &tx_ring->tx_buffer_info[i];
247 eop_desc = tx_buffer_info->next_to_watch;
248 } while (count < tx_ring->count);
250 tx_ring->next_to_clean = i;
252 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
253 if (unlikely(count && netif_carrier_ok(tx_ring->netdev) &&
254 (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
255 /* Make sure that anybody stopping the queue after this
256 * sees the new next_to_clean.
259 if (__netif_subqueue_stopped(tx_ring->netdev,
260 tx_ring->queue_index) &&
261 !test_bit(__IXGBEVF_DOWN, &adapter->state)) {
262 netif_wake_subqueue(tx_ring->netdev,
263 tx_ring->queue_index);
264 ++adapter->restart_queue;
268 u64_stats_update_begin(&tx_ring->syncp);
269 tx_ring->total_bytes += total_bytes;
270 tx_ring->total_packets += total_packets;
271 u64_stats_update_end(&tx_ring->syncp);
272 q_vector->tx.total_bytes += total_bytes;
273 q_vector->tx.total_packets += total_packets;
275 return count < tx_ring->count;
279 * ixgbevf_receive_skb - Send a completed packet up the stack
280 * @q_vector: structure containing interrupt and ring information
281 * @skb: packet to send up
282 * @status: hardware indication of status of receive
283 * @rx_desc: rx descriptor
285 static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector,
286 struct sk_buff *skb, u8 status,
287 union ixgbe_adv_rx_desc *rx_desc)
289 struct ixgbevf_adapter *adapter = q_vector->adapter;
290 bool is_vlan = (status & IXGBE_RXD_STAT_VP);
291 u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan);
293 if (is_vlan && test_bit(tag & VLAN_VID_MASK, adapter->active_vlans))
294 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tag);
296 if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL))
297 napi_gro_receive(&q_vector->napi, skb);
303 * ixgbevf_rx_skb - Helper function to determine proper Rx method
304 * @q_vector: structure containing interrupt and ring information
305 * @skb: packet to send up
306 * @status: hardware indication of status of receive
307 * @rx_desc: rx descriptor
309 static void ixgbevf_rx_skb(struct ixgbevf_q_vector *q_vector,
310 struct sk_buff *skb, u8 status,
311 union ixgbe_adv_rx_desc *rx_desc)
313 #ifdef CONFIG_NET_RX_BUSY_POLL
314 skb_mark_napi_id(skb, &q_vector->napi);
316 if (ixgbevf_qv_busy_polling(q_vector)) {
317 netif_receive_skb(skb);
318 /* exit early if we busy polled */
321 #endif /* CONFIG_NET_RX_BUSY_POLL */
323 ixgbevf_receive_skb(q_vector, skb, status, rx_desc);
327 * ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum
328 * @ring: pointer to Rx descriptor ring structure
329 * @status_err: hardware indication of status of receive
330 * @skb: skb currently being received and modified
332 static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring,
333 u32 status_err, struct sk_buff *skb)
335 skb_checksum_none_assert(skb);
337 /* Rx csum disabled */
338 if (!(ring->netdev->features & NETIF_F_RXCSUM))
341 /* if IP and error */
342 if ((status_err & IXGBE_RXD_STAT_IPCS) &&
343 (status_err & IXGBE_RXDADV_ERR_IPE)) {
344 ring->hw_csum_rx_error++;
348 if (!(status_err & IXGBE_RXD_STAT_L4CS))
351 if (status_err & IXGBE_RXDADV_ERR_TCPE) {
352 ring->hw_csum_rx_error++;
356 /* It must be a TCP or UDP packet with a valid checksum */
357 skb->ip_summed = CHECKSUM_UNNECESSARY;
358 ring->hw_csum_rx_good++;
362 * ixgbevf_alloc_rx_buffers - Replace used receive buffers; packet split
363 * @adapter: address of board private structure
365 static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter,
366 struct ixgbevf_ring *rx_ring,
369 struct pci_dev *pdev = adapter->pdev;
370 union ixgbe_adv_rx_desc *rx_desc;
371 struct ixgbevf_rx_buffer *bi;
372 unsigned int i = rx_ring->next_to_use;
374 bi = &rx_ring->rx_buffer_info[i];
376 while (cleaned_count--) {
377 rx_desc = IXGBEVF_RX_DESC(rx_ring, i);
382 skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
383 rx_ring->rx_buf_len);
385 adapter->alloc_rx_buff_failed++;
390 bi->dma = dma_map_single(&pdev->dev, skb->data,
393 if (dma_mapping_error(&pdev->dev, bi->dma)) {
396 dev_err(&pdev->dev, "RX DMA map failed\n");
400 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
403 if (i == rx_ring->count)
405 bi = &rx_ring->rx_buffer_info[i];
409 if (rx_ring->next_to_use != i) {
410 rx_ring->next_to_use = i;
411 ixgbevf_release_rx_desc(&adapter->hw, rx_ring, i);
415 static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter,
418 struct ixgbe_hw *hw = &adapter->hw;
420 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, qmask);
423 static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
424 struct ixgbevf_ring *rx_ring,
427 struct ixgbevf_adapter *adapter = q_vector->adapter;
428 struct pci_dev *pdev = adapter->pdev;
429 union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
430 struct ixgbevf_rx_buffer *rx_buffer_info, *next_buffer;
434 int cleaned_count = 0;
435 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
437 i = rx_ring->next_to_clean;
438 rx_desc = IXGBEVF_RX_DESC(rx_ring, i);
439 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
440 rx_buffer_info = &rx_ring->rx_buffer_info[i];
442 while (staterr & IXGBE_RXD_STAT_DD) {
447 rmb(); /* read descriptor and rx_buffer_info after status DD */
448 len = le16_to_cpu(rx_desc->wb.upper.length);
449 skb = rx_buffer_info->skb;
450 prefetch(skb->data - NET_IP_ALIGN);
451 rx_buffer_info->skb = NULL;
453 if (rx_buffer_info->dma) {
454 dma_unmap_single(&pdev->dev, rx_buffer_info->dma,
457 rx_buffer_info->dma = 0;
462 if (i == rx_ring->count)
465 next_rxd = IXGBEVF_RX_DESC(rx_ring, i);
469 next_buffer = &rx_ring->rx_buffer_info[i];
471 if (!(staterr & IXGBE_RXD_STAT_EOP)) {
472 skb->next = next_buffer->skb;
473 IXGBE_CB(skb->next)->prev = skb;
474 adapter->non_eop_descs++;
478 /* we should not be chaining buffers, if we did drop the skb */
479 if (IXGBE_CB(skb)->prev) {
481 struct sk_buff *this = skb;
482 skb = IXGBE_CB(skb)->prev;
488 /* ERR_MASK will only have valid bits if EOP set */
489 if (unlikely(staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK)) {
490 dev_kfree_skb_irq(skb);
494 ixgbevf_rx_checksum(rx_ring, staterr, skb);
496 /* probably a little skewed due to removing CRC */
497 total_rx_bytes += skb->len;
501 * Work around issue of some types of VM to VM loop back
502 * packets not getting split correctly
504 if (staterr & IXGBE_RXD_STAT_LB) {
505 u32 header_fixup_len = skb_headlen(skb);
506 if (header_fixup_len < 14)
507 skb_push(skb, header_fixup_len);
509 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
511 /* Workaround hardware that can't do proper VEPA multicast
514 if ((skb->pkt_type & (PACKET_BROADCAST | PACKET_MULTICAST)) &&
515 ether_addr_equal(adapter->netdev->dev_addr,
516 eth_hdr(skb)->h_source)) {
517 dev_kfree_skb_irq(skb);
521 ixgbevf_rx_skb(q_vector, skb, staterr, rx_desc);
524 rx_desc->wb.upper.status_error = 0;
526 /* return some buffers to hardware, one at a time is too slow */
527 if (cleaned_count >= IXGBEVF_RX_BUFFER_WRITE) {
528 ixgbevf_alloc_rx_buffers(adapter, rx_ring,
533 /* use prefetched values */
535 rx_buffer_info = &rx_ring->rx_buffer_info[i];
537 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
540 rx_ring->next_to_clean = i;
541 cleaned_count = IXGBE_DESC_UNUSED(rx_ring);
544 ixgbevf_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
546 u64_stats_update_begin(&rx_ring->syncp);
547 rx_ring->total_packets += total_rx_packets;
548 rx_ring->total_bytes += total_rx_bytes;
549 u64_stats_update_end(&rx_ring->syncp);
550 q_vector->rx.total_packets += total_rx_packets;
551 q_vector->rx.total_bytes += total_rx_bytes;
553 return total_rx_packets;
557 * ixgbevf_poll - NAPI polling calback
558 * @napi: napi struct with our devices info in it
559 * @budget: amount of work driver is allowed to do this pass, in packets
561 * This function will clean more than one or more rings associated with a
564 static int ixgbevf_poll(struct napi_struct *napi, int budget)
566 struct ixgbevf_q_vector *q_vector =
567 container_of(napi, struct ixgbevf_q_vector, napi);
568 struct ixgbevf_adapter *adapter = q_vector->adapter;
569 struct ixgbevf_ring *ring;
571 bool clean_complete = true;
573 ixgbevf_for_each_ring(ring, q_vector->tx)
574 clean_complete &= ixgbevf_clean_tx_irq(q_vector, ring);
576 #ifdef CONFIG_NET_RX_BUSY_POLL
577 if (!ixgbevf_qv_lock_napi(q_vector))
581 /* attempt to distribute budget to each queue fairly, but don't allow
582 * the budget to go below 1 because we'll exit polling */
583 if (q_vector->rx.count > 1)
584 per_ring_budget = max(budget/q_vector->rx.count, 1);
586 per_ring_budget = budget;
588 adapter->flags |= IXGBE_FLAG_IN_NETPOLL;
589 ixgbevf_for_each_ring(ring, q_vector->rx)
590 clean_complete &= (ixgbevf_clean_rx_irq(q_vector, ring,
593 adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL;
595 #ifdef CONFIG_NET_RX_BUSY_POLL
596 ixgbevf_qv_unlock_napi(q_vector);
599 /* If all work not completed, return budget and keep polling */
602 /* all work done, exit the polling mode */
604 if (adapter->rx_itr_setting & 1)
605 ixgbevf_set_itr(q_vector);
606 if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
607 ixgbevf_irq_enable_queues(adapter,
608 1 << q_vector->v_idx);
614 * ixgbevf_write_eitr - write VTEITR register in hardware specific way
615 * @q_vector: structure containing interrupt and ring information
617 void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector)
619 struct ixgbevf_adapter *adapter = q_vector->adapter;
620 struct ixgbe_hw *hw = &adapter->hw;
621 int v_idx = q_vector->v_idx;
622 u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR;
625 * set the WDIS bit to not clear the timer bits and cause an
626 * immediate assertion of the interrupt
628 itr_reg |= IXGBE_EITR_CNT_WDIS;
630 IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg);
633 #ifdef CONFIG_NET_RX_BUSY_POLL
634 /* must be called with local_bh_disable()d */
635 static int ixgbevf_busy_poll_recv(struct napi_struct *napi)
637 struct ixgbevf_q_vector *q_vector =
638 container_of(napi, struct ixgbevf_q_vector, napi);
639 struct ixgbevf_adapter *adapter = q_vector->adapter;
640 struct ixgbevf_ring *ring;
643 if (test_bit(__IXGBEVF_DOWN, &adapter->state))
644 return LL_FLUSH_FAILED;
646 if (!ixgbevf_qv_lock_poll(q_vector))
647 return LL_FLUSH_BUSY;
649 ixgbevf_for_each_ring(ring, q_vector->rx) {
650 found = ixgbevf_clean_rx_irq(q_vector, ring, 4);
651 #ifdef BP_EXTENDED_STATS
653 ring->bp_cleaned += found;
661 ixgbevf_qv_unlock_poll(q_vector);
665 #endif /* CONFIG_NET_RX_BUSY_POLL */
668 * ixgbevf_configure_msix - Configure MSI-X hardware
669 * @adapter: board private structure
671 * ixgbevf_configure_msix sets up the hardware to properly generate MSI-X
674 static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter)
676 struct ixgbevf_q_vector *q_vector;
677 int q_vectors, v_idx;
679 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
680 adapter->eims_enable_mask = 0;
683 * Populate the IVAR table and set the ITR values to the
684 * corresponding register.
686 for (v_idx = 0; v_idx < q_vectors; v_idx++) {
687 struct ixgbevf_ring *ring;
688 q_vector = adapter->q_vector[v_idx];
690 ixgbevf_for_each_ring(ring, q_vector->rx)
691 ixgbevf_set_ivar(adapter, 0, ring->reg_idx, v_idx);
693 ixgbevf_for_each_ring(ring, q_vector->tx)
694 ixgbevf_set_ivar(adapter, 1, ring->reg_idx, v_idx);
696 if (q_vector->tx.ring && !q_vector->rx.ring) {
698 if (adapter->tx_itr_setting == 1)
699 q_vector->itr = IXGBE_10K_ITR;
701 q_vector->itr = adapter->tx_itr_setting;
703 /* rx or rx/tx vector */
704 if (adapter->rx_itr_setting == 1)
705 q_vector->itr = IXGBE_20K_ITR;
707 q_vector->itr = adapter->rx_itr_setting;
710 /* add q_vector eims value to global eims_enable_mask */
711 adapter->eims_enable_mask |= 1 << v_idx;
713 ixgbevf_write_eitr(q_vector);
716 ixgbevf_set_ivar(adapter, -1, 1, v_idx);
717 /* setup eims_other and add value to global eims_enable_mask */
718 adapter->eims_other = 1 << v_idx;
719 adapter->eims_enable_mask |= adapter->eims_other;
726 latency_invalid = 255
730 * ixgbevf_update_itr - update the dynamic ITR value based on statistics
731 * @q_vector: structure containing interrupt and ring information
732 * @ring_container: structure containing ring performance data
734 * Stores a new ITR value based on packets and byte
735 * counts during the last interrupt. The advantage of per interrupt
736 * computation is faster updates and more accurate ITR for the current
737 * traffic pattern. Constants in this function were computed
738 * based on theoretical maximum wire speed and thresholds were set based
739 * on testing data as well as attempting to minimize response time
740 * while increasing bulk throughput.
742 static void ixgbevf_update_itr(struct ixgbevf_q_vector *q_vector,
743 struct ixgbevf_ring_container *ring_container)
745 int bytes = ring_container->total_bytes;
746 int packets = ring_container->total_packets;
749 u8 itr_setting = ring_container->itr;
754 /* simple throttlerate management
755 * 0-20MB/s lowest (100000 ints/s)
756 * 20-100MB/s low (20000 ints/s)
757 * 100-1249MB/s bulk (8000 ints/s)
759 /* what was last interrupt timeslice? */
760 timepassed_us = q_vector->itr >> 2;
761 bytes_perint = bytes / timepassed_us; /* bytes/usec */
763 switch (itr_setting) {
765 if (bytes_perint > 10)
766 itr_setting = low_latency;
769 if (bytes_perint > 20)
770 itr_setting = bulk_latency;
771 else if (bytes_perint <= 10)
772 itr_setting = lowest_latency;
775 if (bytes_perint <= 20)
776 itr_setting = low_latency;
780 /* clear work counters since we have the values we need */
781 ring_container->total_bytes = 0;
782 ring_container->total_packets = 0;
784 /* write updated itr to ring container */
785 ring_container->itr = itr_setting;
788 static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector)
790 u32 new_itr = q_vector->itr;
793 ixgbevf_update_itr(q_vector, &q_vector->tx);
794 ixgbevf_update_itr(q_vector, &q_vector->rx);
796 current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
798 switch (current_itr) {
799 /* counts and packets in update_itr are dependent on these numbers */
801 new_itr = IXGBE_100K_ITR;
804 new_itr = IXGBE_20K_ITR;
808 new_itr = IXGBE_8K_ITR;
812 if (new_itr != q_vector->itr) {
813 /* do an exponential smoothing */
814 new_itr = (10 * new_itr * q_vector->itr) /
815 ((9 * new_itr) + q_vector->itr);
817 /* save the algorithm value here */
818 q_vector->itr = new_itr;
820 ixgbevf_write_eitr(q_vector);
824 static irqreturn_t ixgbevf_msix_other(int irq, void *data)
826 struct ixgbevf_adapter *adapter = data;
827 struct ixgbe_hw *hw = &adapter->hw;
829 hw->mac.get_link_status = 1;
831 if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
832 mod_timer(&adapter->watchdog_timer, jiffies);
834 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other);
840 * ixgbevf_msix_clean_rings - single unshared vector rx clean (all queues)
842 * @data: pointer to our q_vector struct for this interrupt vector
844 static irqreturn_t ixgbevf_msix_clean_rings(int irq, void *data)
846 struct ixgbevf_q_vector *q_vector = data;
848 /* EIAM disabled interrupts (on this vector) for us */
849 if (q_vector->rx.ring || q_vector->tx.ring)
850 napi_schedule(&q_vector->napi);
855 static inline void map_vector_to_rxq(struct ixgbevf_adapter *a, int v_idx,
858 struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
860 a->rx_ring[r_idx].next = q_vector->rx.ring;
861 q_vector->rx.ring = &a->rx_ring[r_idx];
862 q_vector->rx.count++;
865 static inline void map_vector_to_txq(struct ixgbevf_adapter *a, int v_idx,
868 struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
870 a->tx_ring[t_idx].next = q_vector->tx.ring;
871 q_vector->tx.ring = &a->tx_ring[t_idx];
872 q_vector->tx.count++;
876 * ixgbevf_map_rings_to_vectors - Maps descriptor rings to vectors
877 * @adapter: board private structure to initialize
879 * This function maps descriptor rings to the queue-specific vectors
880 * we were allotted through the MSI-X enabling code. Ideally, we'd have
881 * one vector per ring/queue, but on a constrained vector budget, we
882 * group the rings as "efficiently" as possible. You would add new
883 * mapping configurations in here.
885 static int ixgbevf_map_rings_to_vectors(struct ixgbevf_adapter *adapter)
889 int rxr_idx = 0, txr_idx = 0;
890 int rxr_remaining = adapter->num_rx_queues;
891 int txr_remaining = adapter->num_tx_queues;
896 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
899 * The ideal configuration...
900 * We have enough vectors to map one per queue.
902 if (q_vectors == adapter->num_rx_queues + adapter->num_tx_queues) {
903 for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
904 map_vector_to_rxq(adapter, v_start, rxr_idx);
906 for (; txr_idx < txr_remaining; v_start++, txr_idx++)
907 map_vector_to_txq(adapter, v_start, txr_idx);
912 * If we don't have enough vectors for a 1-to-1
913 * mapping, we'll have to group them so there are
914 * multiple queues per vector.
916 /* Re-adjusting *qpv takes care of the remainder. */
917 for (i = v_start; i < q_vectors; i++) {
918 rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - i);
919 for (j = 0; j < rqpv; j++) {
920 map_vector_to_rxq(adapter, i, rxr_idx);
925 for (i = v_start; i < q_vectors; i++) {
926 tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i);
927 for (j = 0; j < tqpv; j++) {
928 map_vector_to_txq(adapter, i, txr_idx);
939 * ixgbevf_request_msix_irqs - Initialize MSI-X interrupts
940 * @adapter: board private structure
942 * ixgbevf_request_msix_irqs allocates MSI-X vectors and requests
943 * interrupts from the kernel.
945 static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter)
947 struct net_device *netdev = adapter->netdev;
948 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
952 for (vector = 0; vector < q_vectors; vector++) {
953 struct ixgbevf_q_vector *q_vector = adapter->q_vector[vector];
954 struct msix_entry *entry = &adapter->msix_entries[vector];
956 if (q_vector->tx.ring && q_vector->rx.ring) {
957 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
958 "%s-%s-%d", netdev->name, "TxRx", ri++);
960 } else if (q_vector->rx.ring) {
961 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
962 "%s-%s-%d", netdev->name, "rx", ri++);
963 } else if (q_vector->tx.ring) {
964 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
965 "%s-%s-%d", netdev->name, "tx", ti++);
967 /* skip this unused q_vector */
970 err = request_irq(entry->vector, &ixgbevf_msix_clean_rings, 0,
971 q_vector->name, q_vector);
974 "request_irq failed for MSIX interrupt "
976 goto free_queue_irqs;
980 err = request_irq(adapter->msix_entries[vector].vector,
981 &ixgbevf_msix_other, 0, netdev->name, adapter);
984 "request_irq for msix_other failed: %d\n", err);
985 goto free_queue_irqs;
993 free_irq(adapter->msix_entries[vector].vector,
994 adapter->q_vector[vector]);
996 /* This failure is non-recoverable - it indicates the system is
997 * out of MSIX vector resources and the VF driver cannot run
998 * without them. Set the number of msix vectors to zero
999 * indicating that not enough can be allocated. The error
1000 * will be returned to the user indicating device open failed.
1001 * Any further attempts to force the driver to open will also
1002 * fail. The only way to recover is to unload the driver and
1003 * reload it again. If the system has recovered some MSIX
1004 * vectors then it may succeed.
1006 adapter->num_msix_vectors = 0;
1010 static inline void ixgbevf_reset_q_vectors(struct ixgbevf_adapter *adapter)
1012 int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1014 for (i = 0; i < q_vectors; i++) {
1015 struct ixgbevf_q_vector *q_vector = adapter->q_vector[i];
1016 q_vector->rx.ring = NULL;
1017 q_vector->tx.ring = NULL;
1018 q_vector->rx.count = 0;
1019 q_vector->tx.count = 0;
1024 * ixgbevf_request_irq - initialize interrupts
1025 * @adapter: board private structure
1027 * Attempts to configure interrupts using the best available
1028 * capabilities of the hardware and kernel.
1030 static int ixgbevf_request_irq(struct ixgbevf_adapter *adapter)
1034 err = ixgbevf_request_msix_irqs(adapter);
1037 hw_dbg(&adapter->hw,
1038 "request_irq failed, Error %d\n", err);
1043 static void ixgbevf_free_irq(struct ixgbevf_adapter *adapter)
1047 q_vectors = adapter->num_msix_vectors;
1050 free_irq(adapter->msix_entries[i].vector, adapter);
1053 for (; i >= 0; i--) {
1054 /* free only the irqs that were actually requested */
1055 if (!adapter->q_vector[i]->rx.ring &&
1056 !adapter->q_vector[i]->tx.ring)
1059 free_irq(adapter->msix_entries[i].vector,
1060 adapter->q_vector[i]);
1063 ixgbevf_reset_q_vectors(adapter);
1067 * ixgbevf_irq_disable - Mask off interrupt generation on the NIC
1068 * @adapter: board private structure
1070 static inline void ixgbevf_irq_disable(struct ixgbevf_adapter *adapter)
1072 struct ixgbe_hw *hw = &adapter->hw;
1075 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, 0);
1076 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, ~0);
1077 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, 0);
1079 IXGBE_WRITE_FLUSH(hw);
1081 for (i = 0; i < adapter->num_msix_vectors; i++)
1082 synchronize_irq(adapter->msix_entries[i].vector);
1086 * ixgbevf_irq_enable - Enable default interrupt generation settings
1087 * @adapter: board private structure
1089 static inline void ixgbevf_irq_enable(struct ixgbevf_adapter *adapter)
1091 struct ixgbe_hw *hw = &adapter->hw;
1093 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, adapter->eims_enable_mask);
1094 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, adapter->eims_enable_mask);
1095 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_enable_mask);
1099 * ixgbevf_configure_tx - Configure 82599 VF Transmit Unit after Reset
1100 * @adapter: board private structure
1102 * Configure the Tx unit of the MAC after a reset.
1104 static void ixgbevf_configure_tx(struct ixgbevf_adapter *adapter)
1107 struct ixgbe_hw *hw = &adapter->hw;
1108 u32 i, j, tdlen, txctrl;
1110 /* Setup the HW Tx Head and Tail descriptor pointers */
1111 for (i = 0; i < adapter->num_tx_queues; i++) {
1112 struct ixgbevf_ring *ring = &adapter->tx_ring[i];
1115 tdlen = ring->count * sizeof(union ixgbe_adv_tx_desc);
1116 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(j),
1117 (tdba & DMA_BIT_MASK(32)));
1118 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(j), (tdba >> 32));
1119 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(j), tdlen);
1120 IXGBE_WRITE_REG(hw, IXGBE_VFTDH(j), 0);
1121 IXGBE_WRITE_REG(hw, IXGBE_VFTDT(j), 0);
1122 adapter->tx_ring[i].head = IXGBE_VFTDH(j);
1123 adapter->tx_ring[i].tail = IXGBE_VFTDT(j);
1124 /* Disable Tx Head Writeback RO bit, since this hoses
1125 * bookkeeping if things aren't delivered in order.
1127 txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(j));
1128 txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
1129 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(j), txctrl);
1133 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
1135 static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index)
1137 struct ixgbevf_ring *rx_ring;
1138 struct ixgbe_hw *hw = &adapter->hw;
1141 rx_ring = &adapter->rx_ring[index];
1143 srrctl = IXGBE_SRRCTL_DROP_EN;
1145 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1147 srrctl |= ALIGN(rx_ring->rx_buf_len, 1024) >>
1148 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1150 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl);
1153 static void ixgbevf_setup_psrtype(struct ixgbevf_adapter *adapter)
1155 struct ixgbe_hw *hw = &adapter->hw;
1157 /* PSRTYPE must be initialized in 82599 */
1158 u32 psrtype = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR |
1159 IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR |
1160 IXGBE_PSRTYPE_L2HDR;
1162 if (adapter->num_rx_queues > 1)
1165 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
1168 static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter *adapter)
1170 struct ixgbe_hw *hw = &adapter->hw;
1171 struct net_device *netdev = adapter->netdev;
1172 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1176 /* notify the PF of our intent to use this size of frame */
1177 ixgbevf_rlpml_set_vf(hw, max_frame);
1179 /* PF will allow an extra 4 bytes past for vlan tagged frames */
1180 max_frame += VLAN_HLEN;
1183 * Allocate buffer sizes that fit well into 32K and
1184 * take into account max frame size of 9.5K
1186 if ((hw->mac.type == ixgbe_mac_X540_vf) &&
1187 (max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE))
1188 rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
1189 else if (max_frame <= IXGBEVF_RXBUFFER_2K)
1190 rx_buf_len = IXGBEVF_RXBUFFER_2K;
1191 else if (max_frame <= IXGBEVF_RXBUFFER_4K)
1192 rx_buf_len = IXGBEVF_RXBUFFER_4K;
1193 else if (max_frame <= IXGBEVF_RXBUFFER_8K)
1194 rx_buf_len = IXGBEVF_RXBUFFER_8K;
1196 rx_buf_len = IXGBEVF_RXBUFFER_10K;
1198 for (i = 0; i < adapter->num_rx_queues; i++)
1199 adapter->rx_ring[i].rx_buf_len = rx_buf_len;
1203 * ixgbevf_configure_rx - Configure 82599 VF Receive Unit after Reset
1204 * @adapter: board private structure
1206 * Configure the Rx unit of the MAC after a reset.
1208 static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
1211 struct ixgbe_hw *hw = &adapter->hw;
1215 ixgbevf_setup_psrtype(adapter);
1217 /* set_rx_buffer_len must be called before ring initialization */
1218 ixgbevf_set_rx_buffer_len(adapter);
1220 rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc);
1221 /* Setup the HW Rx Head and Tail Descriptor Pointers and
1222 * the Base and Length of the Rx Descriptor Ring */
1223 for (i = 0; i < adapter->num_rx_queues; i++) {
1224 rdba = adapter->rx_ring[i].dma;
1225 j = adapter->rx_ring[i].reg_idx;
1226 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(j),
1227 (rdba & DMA_BIT_MASK(32)));
1228 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(j), (rdba >> 32));
1229 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(j), rdlen);
1230 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(j), 0);
1231 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(j), 0);
1232 adapter->rx_ring[i].head = IXGBE_VFRDH(j);
1233 adapter->rx_ring[i].tail = IXGBE_VFRDT(j);
1235 ixgbevf_configure_srrctl(adapter, j);
1239 static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev,
1240 __be16 proto, u16 vid)
1242 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1243 struct ixgbe_hw *hw = &adapter->hw;
1246 spin_lock_bh(&adapter->mbx_lock);
1248 /* add VID to filter table */
1249 err = hw->mac.ops.set_vfta(hw, vid, 0, true);
1251 spin_unlock_bh(&adapter->mbx_lock);
1253 /* translate error return types so error makes sense */
1254 if (err == IXGBE_ERR_MBX)
1257 if (err == IXGBE_ERR_INVALID_ARGUMENT)
1260 set_bit(vid, adapter->active_vlans);
1265 static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev,
1266 __be16 proto, u16 vid)
1268 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1269 struct ixgbe_hw *hw = &adapter->hw;
1270 int err = -EOPNOTSUPP;
1272 spin_lock_bh(&adapter->mbx_lock);
1274 /* remove VID from filter table */
1275 err = hw->mac.ops.set_vfta(hw, vid, 0, false);
1277 spin_unlock_bh(&adapter->mbx_lock);
1279 clear_bit(vid, adapter->active_vlans);
1284 static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter)
1288 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
1289 ixgbevf_vlan_rx_add_vid(adapter->netdev,
1290 htons(ETH_P_8021Q), vid);
1293 static int ixgbevf_write_uc_addr_list(struct net_device *netdev)
1295 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1296 struct ixgbe_hw *hw = &adapter->hw;
1299 if ((netdev_uc_count(netdev)) > 10) {
1300 pr_err("Too many unicast filters - No Space\n");
1304 if (!netdev_uc_empty(netdev)) {
1305 struct netdev_hw_addr *ha;
1306 netdev_for_each_uc_addr(ha, netdev) {
1307 hw->mac.ops.set_uc_addr(hw, ++count, ha->addr);
1312 * If the list is empty then send message to PF driver to
1313 * clear all macvlans on this VF.
1315 hw->mac.ops.set_uc_addr(hw, 0, NULL);
1322 * ixgbevf_set_rx_mode - Multicast and unicast set
1323 * @netdev: network interface device structure
1325 * The set_rx_method entry point is called whenever the multicast address
1326 * list, unicast address list or the network interface flags are updated.
1327 * This routine is responsible for configuring the hardware for proper
1328 * multicast mode and configuring requested unicast filters.
1330 static void ixgbevf_set_rx_mode(struct net_device *netdev)
1332 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1333 struct ixgbe_hw *hw = &adapter->hw;
1335 spin_lock_bh(&adapter->mbx_lock);
1337 /* reprogram multicast list */
1338 hw->mac.ops.update_mc_addr_list(hw, netdev);
1340 ixgbevf_write_uc_addr_list(netdev);
1342 spin_unlock_bh(&adapter->mbx_lock);
1345 static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter)
1348 struct ixgbevf_q_vector *q_vector;
1349 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1351 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1352 q_vector = adapter->q_vector[q_idx];
1353 #ifdef CONFIG_NET_RX_BUSY_POLL
1354 ixgbevf_qv_init_lock(adapter->q_vector[q_idx]);
1356 napi_enable(&q_vector->napi);
1360 static void ixgbevf_napi_disable_all(struct ixgbevf_adapter *adapter)
1363 struct ixgbevf_q_vector *q_vector;
1364 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1366 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1367 q_vector = adapter->q_vector[q_idx];
1368 napi_disable(&q_vector->napi);
1369 #ifdef CONFIG_NET_RX_BUSY_POLL
1370 while (!ixgbevf_qv_disable(adapter->q_vector[q_idx])) {
1371 pr_info("QV %d locked\n", q_idx);
1372 usleep_range(1000, 20000);
1374 #endif /* CONFIG_NET_RX_BUSY_POLL */
1378 static void ixgbevf_configure(struct ixgbevf_adapter *adapter)
1380 struct net_device *netdev = adapter->netdev;
1383 ixgbevf_set_rx_mode(netdev);
1385 ixgbevf_restore_vlan(adapter);
1387 ixgbevf_configure_tx(adapter);
1388 ixgbevf_configure_rx(adapter);
1389 for (i = 0; i < adapter->num_rx_queues; i++) {
1390 struct ixgbevf_ring *ring = &adapter->rx_ring[i];
1391 ixgbevf_alloc_rx_buffers(adapter, ring,
1392 IXGBE_DESC_UNUSED(ring));
1396 #define IXGBEVF_MAX_RX_DESC_POLL 10
1397 static void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter,
1400 struct ixgbe_hw *hw = &adapter->hw;
1401 int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
1403 int j = adapter->rx_ring[rxr].reg_idx;
1406 usleep_range(1000, 2000);
1407 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
1408 } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE));
1411 hw_dbg(hw, "RXDCTL.ENABLE queue %d not set while polling\n",
1414 ixgbevf_release_rx_desc(&adapter->hw, &adapter->rx_ring[rxr],
1415 (adapter->rx_ring[rxr].count - 1));
1418 static void ixgbevf_disable_rx_queue(struct ixgbevf_adapter *adapter,
1419 struct ixgbevf_ring *ring)
1421 struct ixgbe_hw *hw = &adapter->hw;
1422 int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
1424 u8 reg_idx = ring->reg_idx;
1426 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1427 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
1429 /* write value back with RXDCTL.ENABLE bit cleared */
1430 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl);
1432 /* the hardware may take up to 100us to really disable the rx queue */
1435 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1436 } while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE));
1439 hw_dbg(hw, "RXDCTL.ENABLE queue %d not cleared while polling\n",
1443 static void ixgbevf_save_reset_stats(struct ixgbevf_adapter *adapter)
1445 /* Only save pre-reset stats if there are some */
1446 if (adapter->stats.vfgprc || adapter->stats.vfgptc) {
1447 adapter->stats.saved_reset_vfgprc += adapter->stats.vfgprc -
1448 adapter->stats.base_vfgprc;
1449 adapter->stats.saved_reset_vfgptc += adapter->stats.vfgptc -
1450 adapter->stats.base_vfgptc;
1451 adapter->stats.saved_reset_vfgorc += adapter->stats.vfgorc -
1452 adapter->stats.base_vfgorc;
1453 adapter->stats.saved_reset_vfgotc += adapter->stats.vfgotc -
1454 adapter->stats.base_vfgotc;
1455 adapter->stats.saved_reset_vfmprc += adapter->stats.vfmprc -
1456 adapter->stats.base_vfmprc;
1460 static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter)
1462 struct ixgbe_hw *hw = &adapter->hw;
1464 adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
1465 adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
1466 adapter->stats.last_vfgorc |=
1467 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
1468 adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
1469 adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
1470 adapter->stats.last_vfgotc |=
1471 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
1472 adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
1474 adapter->stats.base_vfgprc = adapter->stats.last_vfgprc;
1475 adapter->stats.base_vfgorc = adapter->stats.last_vfgorc;
1476 adapter->stats.base_vfgptc = adapter->stats.last_vfgptc;
1477 adapter->stats.base_vfgotc = adapter->stats.last_vfgotc;
1478 adapter->stats.base_vfmprc = adapter->stats.last_vfmprc;
1481 static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter)
1483 struct ixgbe_hw *hw = &adapter->hw;
1484 int api[] = { ixgbe_mbox_api_11,
1486 ixgbe_mbox_api_unknown };
1487 int err = 0, idx = 0;
1489 spin_lock_bh(&adapter->mbx_lock);
1491 while (api[idx] != ixgbe_mbox_api_unknown) {
1492 err = ixgbevf_negotiate_api_version(hw, api[idx]);
1498 spin_unlock_bh(&adapter->mbx_lock);
1501 static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
1503 struct net_device *netdev = adapter->netdev;
1504 struct ixgbe_hw *hw = &adapter->hw;
1506 int num_rx_rings = adapter->num_rx_queues;
1509 for (i = 0; i < adapter->num_tx_queues; i++) {
1510 j = adapter->tx_ring[i].reg_idx;
1511 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1512 /* enable WTHRESH=8 descriptors, to encourage burst writeback */
1513 txdctl |= (8 << 16);
1514 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
1517 for (i = 0; i < adapter->num_tx_queues; i++) {
1518 j = adapter->tx_ring[i].reg_idx;
1519 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1520 txdctl |= IXGBE_TXDCTL_ENABLE;
1521 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
1524 for (i = 0; i < num_rx_rings; i++) {
1525 j = adapter->rx_ring[i].reg_idx;
1526 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
1527 rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
1528 if (hw->mac.type == ixgbe_mac_X540_vf) {
1529 rxdctl &= ~IXGBE_RXDCTL_RLPMLMASK;
1530 rxdctl |= ((netdev->mtu + ETH_HLEN + ETH_FCS_LEN) |
1531 IXGBE_RXDCTL_RLPML_EN);
1533 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
1534 ixgbevf_rx_desc_queue_enable(adapter, i);
1537 ixgbevf_configure_msix(adapter);
1539 spin_lock_bh(&adapter->mbx_lock);
1541 if (is_valid_ether_addr(hw->mac.addr))
1542 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
1544 hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0);
1546 spin_unlock_bh(&adapter->mbx_lock);
1548 clear_bit(__IXGBEVF_DOWN, &adapter->state);
1549 ixgbevf_napi_enable_all(adapter);
1551 /* enable transmits */
1552 netif_tx_start_all_queues(netdev);
1554 ixgbevf_save_reset_stats(adapter);
1555 ixgbevf_init_last_counter_stats(adapter);
1557 hw->mac.get_link_status = 1;
1558 mod_timer(&adapter->watchdog_timer, jiffies);
1561 static int ixgbevf_reset_queues(struct ixgbevf_adapter *adapter)
1563 struct ixgbe_hw *hw = &adapter->hw;
1564 struct ixgbevf_ring *rx_ring;
1565 unsigned int def_q = 0;
1566 unsigned int num_tcs = 0;
1567 unsigned int num_rx_queues = 1;
1570 spin_lock_bh(&adapter->mbx_lock);
1572 /* fetch queue configuration from the PF */
1573 err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
1575 spin_unlock_bh(&adapter->mbx_lock);
1581 /* update default Tx ring register index */
1582 adapter->tx_ring[0].reg_idx = def_q;
1584 /* we need as many queues as traffic classes */
1585 num_rx_queues = num_tcs;
1588 /* nothing to do if we have the correct number of queues */
1589 if (adapter->num_rx_queues == num_rx_queues)
1592 /* allocate new rings */
1593 rx_ring = kcalloc(num_rx_queues,
1594 sizeof(struct ixgbevf_ring), GFP_KERNEL);
1598 /* setup ring fields */
1599 for (i = 0; i < num_rx_queues; i++) {
1600 rx_ring[i].count = adapter->rx_ring_count;
1601 rx_ring[i].queue_index = i;
1602 rx_ring[i].reg_idx = i;
1603 rx_ring[i].dev = &adapter->pdev->dev;
1604 rx_ring[i].netdev = adapter->netdev;
1606 /* allocate resources on the ring */
1607 err = ixgbevf_setup_rx_resources(adapter, &rx_ring[i]);
1611 ixgbevf_free_rx_resources(adapter, &rx_ring[i]);
1618 /* free the existing rings and queues */
1619 ixgbevf_free_all_rx_resources(adapter);
1620 adapter->num_rx_queues = 0;
1621 kfree(adapter->rx_ring);
1623 /* move new rings into position on the adapter struct */
1624 adapter->rx_ring = rx_ring;
1625 adapter->num_rx_queues = num_rx_queues;
1627 /* reset ring to vector mapping */
1628 ixgbevf_reset_q_vectors(adapter);
1629 ixgbevf_map_rings_to_vectors(adapter);
1634 void ixgbevf_up(struct ixgbevf_adapter *adapter)
1636 struct ixgbe_hw *hw = &adapter->hw;
1638 ixgbevf_reset_queues(adapter);
1640 ixgbevf_configure(adapter);
1642 ixgbevf_up_complete(adapter);
1644 /* clear any pending interrupts, may auto mask */
1645 IXGBE_READ_REG(hw, IXGBE_VTEICR);
1647 ixgbevf_irq_enable(adapter);
1651 * ixgbevf_clean_rx_ring - Free Rx Buffers per Queue
1652 * @adapter: board private structure
1653 * @rx_ring: ring to free buffers from
1655 static void ixgbevf_clean_rx_ring(struct ixgbevf_adapter *adapter,
1656 struct ixgbevf_ring *rx_ring)
1658 struct pci_dev *pdev = adapter->pdev;
1662 if (!rx_ring->rx_buffer_info)
1665 /* Free all the Rx ring sk_buffs */
1666 for (i = 0; i < rx_ring->count; i++) {
1667 struct ixgbevf_rx_buffer *rx_buffer_info;
1669 rx_buffer_info = &rx_ring->rx_buffer_info[i];
1670 if (rx_buffer_info->dma) {
1671 dma_unmap_single(&pdev->dev, rx_buffer_info->dma,
1672 rx_ring->rx_buf_len,
1674 rx_buffer_info->dma = 0;
1676 if (rx_buffer_info->skb) {
1677 struct sk_buff *skb = rx_buffer_info->skb;
1678 rx_buffer_info->skb = NULL;
1680 struct sk_buff *this = skb;
1681 skb = IXGBE_CB(skb)->prev;
1682 dev_kfree_skb(this);
1687 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
1688 memset(rx_ring->rx_buffer_info, 0, size);
1690 /* Zero out the descriptor ring */
1691 memset(rx_ring->desc, 0, rx_ring->size);
1693 rx_ring->next_to_clean = 0;
1694 rx_ring->next_to_use = 0;
1697 writel(0, adapter->hw.hw_addr + rx_ring->head);
1699 writel(0, adapter->hw.hw_addr + rx_ring->tail);
1703 * ixgbevf_clean_tx_ring - Free Tx Buffers
1704 * @adapter: board private structure
1705 * @tx_ring: ring to be cleaned
1707 static void ixgbevf_clean_tx_ring(struct ixgbevf_adapter *adapter,
1708 struct ixgbevf_ring *tx_ring)
1710 struct ixgbevf_tx_buffer *tx_buffer_info;
1714 if (!tx_ring->tx_buffer_info)
1717 /* Free all the Tx ring sk_buffs */
1718 for (i = 0; i < tx_ring->count; i++) {
1719 tx_buffer_info = &tx_ring->tx_buffer_info[i];
1720 ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
1723 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
1724 memset(tx_ring->tx_buffer_info, 0, size);
1726 memset(tx_ring->desc, 0, tx_ring->size);
1728 tx_ring->next_to_use = 0;
1729 tx_ring->next_to_clean = 0;
1732 writel(0, adapter->hw.hw_addr + tx_ring->head);
1734 writel(0, adapter->hw.hw_addr + tx_ring->tail);
1738 * ixgbevf_clean_all_rx_rings - Free Rx Buffers for all queues
1739 * @adapter: board private structure
1741 static void ixgbevf_clean_all_rx_rings(struct ixgbevf_adapter *adapter)
1745 for (i = 0; i < adapter->num_rx_queues; i++)
1746 ixgbevf_clean_rx_ring(adapter, &adapter->rx_ring[i]);
1750 * ixgbevf_clean_all_tx_rings - Free Tx Buffers for all queues
1751 * @adapter: board private structure
1753 static void ixgbevf_clean_all_tx_rings(struct ixgbevf_adapter *adapter)
1757 for (i = 0; i < adapter->num_tx_queues; i++)
1758 ixgbevf_clean_tx_ring(adapter, &adapter->tx_ring[i]);
1761 void ixgbevf_down(struct ixgbevf_adapter *adapter)
1763 struct net_device *netdev = adapter->netdev;
1764 struct ixgbe_hw *hw = &adapter->hw;
1768 /* signal that we are down to the interrupt handler */
1769 set_bit(__IXGBEVF_DOWN, &adapter->state);
1771 /* disable all enabled rx queues */
1772 for (i = 0; i < adapter->num_rx_queues; i++)
1773 ixgbevf_disable_rx_queue(adapter, &adapter->rx_ring[i]);
1775 netif_tx_disable(netdev);
1779 netif_tx_stop_all_queues(netdev);
1781 ixgbevf_irq_disable(adapter);
1783 ixgbevf_napi_disable_all(adapter);
1785 del_timer_sync(&adapter->watchdog_timer);
1786 /* can't call flush scheduled work here because it can deadlock
1787 * if linkwatch_event tries to acquire the rtnl_lock which we are
1789 while (adapter->flags & IXGBE_FLAG_IN_WATCHDOG_TASK)
1792 /* disable transmits in the hardware now that interrupts are off */
1793 for (i = 0; i < adapter->num_tx_queues; i++) {
1794 j = adapter->tx_ring[i].reg_idx;
1795 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1796 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j),
1797 (txdctl & ~IXGBE_TXDCTL_ENABLE));
1800 netif_carrier_off(netdev);
1802 if (!pci_channel_offline(adapter->pdev))
1803 ixgbevf_reset(adapter);
1805 ixgbevf_clean_all_tx_rings(adapter);
1806 ixgbevf_clean_all_rx_rings(adapter);
1809 void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter)
1811 WARN_ON(in_interrupt());
1813 while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state))
1816 ixgbevf_down(adapter);
1817 ixgbevf_up(adapter);
1819 clear_bit(__IXGBEVF_RESETTING, &adapter->state);
1822 void ixgbevf_reset(struct ixgbevf_adapter *adapter)
1824 struct ixgbe_hw *hw = &adapter->hw;
1825 struct net_device *netdev = adapter->netdev;
1827 if (hw->mac.ops.reset_hw(hw)) {
1828 hw_dbg(hw, "PF still resetting\n");
1830 hw->mac.ops.init_hw(hw);
1831 ixgbevf_negotiate_api(adapter);
1834 if (is_valid_ether_addr(adapter->hw.mac.addr)) {
1835 memcpy(netdev->dev_addr, adapter->hw.mac.addr,
1837 memcpy(netdev->perm_addr, adapter->hw.mac.addr,
1842 static int ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
1846 int vector_threshold;
1848 /* We'll want at least 2 (vector_threshold):
1849 * 1) TxQ[0] + RxQ[0] handler
1850 * 2) Other (Link Status Change, etc.)
1852 vector_threshold = MIN_MSIX_COUNT;
1854 /* The more we get, the more we will assign to Tx/Rx Cleanup
1855 * for the separate queues...where Rx Cleanup >= Tx Cleanup.
1856 * Right now, we simply care about how many we'll get; we'll
1857 * set them up later while requesting irq's.
1859 while (vectors >= vector_threshold) {
1860 err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
1862 if (!err || err < 0) /* Success or a nasty failure. */
1864 else /* err == number of vectors we should try again with */
1868 if (vectors < vector_threshold)
1872 dev_err(&adapter->pdev->dev,
1873 "Unable to allocate MSI-X interrupts\n");
1874 kfree(adapter->msix_entries);
1875 adapter->msix_entries = NULL;
1878 * Adjust for only the vectors we'll use, which is minimum
1879 * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
1880 * vectors we were allocated.
1882 adapter->num_msix_vectors = vectors;
1889 * ixgbevf_set_num_queues - Allocate queues for device, feature dependent
1890 * @adapter: board private structure to initialize
1892 * This is the top level queue allocation routine. The order here is very
1893 * important, starting with the "most" number of features turned on at once,
1894 * and ending with the smallest set of features. This way large combinations
1895 * can be allocated if they're turned on, and smaller combinations are the
1896 * fallthrough conditions.
1899 static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
1901 /* Start with base case */
1902 adapter->num_rx_queues = 1;
1903 adapter->num_tx_queues = 1;
1907 * ixgbevf_alloc_queues - Allocate memory for all rings
1908 * @adapter: board private structure to initialize
1910 * We allocate one ring per queue at run-time since we don't know the
1911 * number of queues at compile-time. The polling_netdev array is
1912 * intended for Multiqueue, but should work fine with a single queue.
1914 static int ixgbevf_alloc_queues(struct ixgbevf_adapter *adapter)
1918 adapter->tx_ring = kcalloc(adapter->num_tx_queues,
1919 sizeof(struct ixgbevf_ring), GFP_KERNEL);
1920 if (!adapter->tx_ring)
1921 goto err_tx_ring_allocation;
1923 adapter->rx_ring = kcalloc(adapter->num_rx_queues,
1924 sizeof(struct ixgbevf_ring), GFP_KERNEL);
1925 if (!adapter->rx_ring)
1926 goto err_rx_ring_allocation;
1928 for (i = 0; i < adapter->num_tx_queues; i++) {
1929 adapter->tx_ring[i].count = adapter->tx_ring_count;
1930 adapter->tx_ring[i].queue_index = i;
1931 /* reg_idx may be remapped later by DCB config */
1932 adapter->tx_ring[i].reg_idx = i;
1933 adapter->tx_ring[i].dev = &adapter->pdev->dev;
1934 adapter->tx_ring[i].netdev = adapter->netdev;
1937 for (i = 0; i < adapter->num_rx_queues; i++) {
1938 adapter->rx_ring[i].count = adapter->rx_ring_count;
1939 adapter->rx_ring[i].queue_index = i;
1940 adapter->rx_ring[i].reg_idx = i;
1941 adapter->rx_ring[i].dev = &adapter->pdev->dev;
1942 adapter->rx_ring[i].netdev = adapter->netdev;
1947 err_rx_ring_allocation:
1948 kfree(adapter->tx_ring);
1949 err_tx_ring_allocation:
1954 * ixgbevf_set_interrupt_capability - set MSI-X or FAIL if not supported
1955 * @adapter: board private structure to initialize
1957 * Attempt to configure the interrupts using the best available
1958 * capabilities of the hardware and the kernel.
1960 static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter)
1962 struct net_device *netdev = adapter->netdev;
1964 int vector, v_budget;
1967 * It's easy to be greedy for MSI-X vectors, but it really
1968 * doesn't do us much good if we have a lot more vectors
1969 * than CPU's. So let's be conservative and only ask for
1970 * (roughly) the same number of vectors as there are CPU's.
1971 * The default is to use pairs of vectors.
1973 v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues);
1974 v_budget = min_t(int, v_budget, num_online_cpus());
1975 v_budget += NON_Q_VECTORS;
1977 /* A failure in MSI-X entry allocation isn't fatal, but it does
1978 * mean we disable MSI-X capabilities of the adapter. */
1979 adapter->msix_entries = kcalloc(v_budget,
1980 sizeof(struct msix_entry), GFP_KERNEL);
1981 if (!adapter->msix_entries) {
1986 for (vector = 0; vector < v_budget; vector++)
1987 adapter->msix_entries[vector].entry = vector;
1989 err = ixgbevf_acquire_msix_vectors(adapter, v_budget);
1993 err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues);
1997 err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues);
2004 * ixgbevf_alloc_q_vectors - Allocate memory for interrupt vectors
2005 * @adapter: board private structure to initialize
2007 * We allocate one q_vector per queue interrupt. If allocation fails we
2010 static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter)
2012 int q_idx, num_q_vectors;
2013 struct ixgbevf_q_vector *q_vector;
2015 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2017 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
2018 q_vector = kzalloc(sizeof(struct ixgbevf_q_vector), GFP_KERNEL);
2021 q_vector->adapter = adapter;
2022 q_vector->v_idx = q_idx;
2023 netif_napi_add(adapter->netdev, &q_vector->napi,
2025 #ifdef CONFIG_NET_RX_BUSY_POLL
2026 napi_hash_add(&q_vector->napi);
2028 adapter->q_vector[q_idx] = q_vector;
2036 q_vector = adapter->q_vector[q_idx];
2037 #ifdef CONFIG_NET_RX_BUSY_POLL
2038 napi_hash_del(&q_vector->napi);
2040 netif_napi_del(&q_vector->napi);
2042 adapter->q_vector[q_idx] = NULL;
2048 * ixgbevf_free_q_vectors - Free memory allocated for interrupt vectors
2049 * @adapter: board private structure to initialize
2051 * This function frees the memory allocated to the q_vectors. In addition if
2052 * NAPI is enabled it will delete any references to the NAPI struct prior
2053 * to freeing the q_vector.
2055 static void ixgbevf_free_q_vectors(struct ixgbevf_adapter *adapter)
2057 int q_idx, num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2059 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
2060 struct ixgbevf_q_vector *q_vector = adapter->q_vector[q_idx];
2062 adapter->q_vector[q_idx] = NULL;
2063 #ifdef CONFIG_NET_RX_BUSY_POLL
2064 napi_hash_del(&q_vector->napi);
2066 netif_napi_del(&q_vector->napi);
2072 * ixgbevf_reset_interrupt_capability - Reset MSIX setup
2073 * @adapter: board private structure
2076 static void ixgbevf_reset_interrupt_capability(struct ixgbevf_adapter *adapter)
2078 pci_disable_msix(adapter->pdev);
2079 kfree(adapter->msix_entries);
2080 adapter->msix_entries = NULL;
2084 * ixgbevf_init_interrupt_scheme - Determine if MSIX is supported and init
2085 * @adapter: board private structure to initialize
2088 static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter)
2092 /* Number of supported queues */
2093 ixgbevf_set_num_queues(adapter);
2095 err = ixgbevf_set_interrupt_capability(adapter);
2097 hw_dbg(&adapter->hw,
2098 "Unable to setup interrupt capabilities\n");
2099 goto err_set_interrupt;
2102 err = ixgbevf_alloc_q_vectors(adapter);
2104 hw_dbg(&adapter->hw, "Unable to allocate memory for queue "
2106 goto err_alloc_q_vectors;
2109 err = ixgbevf_alloc_queues(adapter);
2111 pr_err("Unable to allocate memory for queues\n");
2112 goto err_alloc_queues;
2115 hw_dbg(&adapter->hw, "Multiqueue %s: Rx Queue count = %u, "
2116 "Tx Queue count = %u\n",
2117 (adapter->num_rx_queues > 1) ? "Enabled" :
2118 "Disabled", adapter->num_rx_queues, adapter->num_tx_queues);
2120 set_bit(__IXGBEVF_DOWN, &adapter->state);
2124 ixgbevf_free_q_vectors(adapter);
2125 err_alloc_q_vectors:
2126 ixgbevf_reset_interrupt_capability(adapter);
2132 * ixgbevf_clear_interrupt_scheme - Clear the current interrupt scheme settings
2133 * @adapter: board private structure to clear interrupt scheme on
2135 * We go through and clear interrupt specific resources and reset the structure
2136 * to pre-load conditions
2138 static void ixgbevf_clear_interrupt_scheme(struct ixgbevf_adapter *adapter)
2140 adapter->num_tx_queues = 0;
2141 adapter->num_rx_queues = 0;
2143 ixgbevf_free_q_vectors(adapter);
2144 ixgbevf_reset_interrupt_capability(adapter);
2148 * ixgbevf_sw_init - Initialize general software structures
2149 * (struct ixgbevf_adapter)
2150 * @adapter: board private structure to initialize
2152 * ixgbevf_sw_init initializes the Adapter private data structure.
2153 * Fields are initialized based on PCI device information and
2154 * OS network device settings (MTU size).
2156 static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
2158 struct ixgbe_hw *hw = &adapter->hw;
2159 struct pci_dev *pdev = adapter->pdev;
2160 struct net_device *netdev = adapter->netdev;
2163 /* PCI config space info */
2165 hw->vendor_id = pdev->vendor;
2166 hw->device_id = pdev->device;
2167 hw->revision_id = pdev->revision;
2168 hw->subsystem_vendor_id = pdev->subsystem_vendor;
2169 hw->subsystem_device_id = pdev->subsystem_device;
2171 hw->mbx.ops.init_params(hw);
2173 /* assume legacy case in which PF would only give VF 2 queues */
2174 hw->mac.max_tx_queues = 2;
2175 hw->mac.max_rx_queues = 2;
2177 /* lock to protect mailbox accesses */
2178 spin_lock_init(&adapter->mbx_lock);
2180 err = hw->mac.ops.reset_hw(hw);
2182 dev_info(&pdev->dev,
2183 "PF still in reset state. Is the PF interface up?\n");
2185 err = hw->mac.ops.init_hw(hw);
2187 pr_err("init_shared_code failed: %d\n", err);
2190 ixgbevf_negotiate_api(adapter);
2191 err = hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
2193 dev_info(&pdev->dev, "Error reading MAC address\n");
2194 else if (is_zero_ether_addr(adapter->hw.mac.addr))
2195 dev_info(&pdev->dev,
2196 "MAC address not assigned by administrator.\n");
2197 memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
2200 if (!is_valid_ether_addr(netdev->dev_addr)) {
2201 dev_info(&pdev->dev, "Assigning random MAC address\n");
2202 eth_hw_addr_random(netdev);
2203 memcpy(hw->mac.addr, netdev->dev_addr, netdev->addr_len);
2206 /* Enable dynamic interrupt throttling rates */
2207 adapter->rx_itr_setting = 1;
2208 adapter->tx_itr_setting = 1;
2210 /* set default ring sizes */
2211 adapter->tx_ring_count = IXGBEVF_DEFAULT_TXD;
2212 adapter->rx_ring_count = IXGBEVF_DEFAULT_RXD;
2214 set_bit(__IXGBEVF_DOWN, &adapter->state);
2221 #define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter) \
2223 u32 current_counter = IXGBE_READ_REG(hw, reg); \
2224 if (current_counter < last_counter) \
2225 counter += 0x100000000LL; \
2226 last_counter = current_counter; \
2227 counter &= 0xFFFFFFFF00000000LL; \
2228 counter |= current_counter; \
2231 #define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
2233 u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb); \
2234 u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb); \
2235 u64 current_counter = (current_counter_msb << 32) | \
2236 current_counter_lsb; \
2237 if (current_counter < last_counter) \
2238 counter += 0x1000000000LL; \
2239 last_counter = current_counter; \
2240 counter &= 0xFFFFFFF000000000LL; \
2241 counter |= current_counter; \
2244 * ixgbevf_update_stats - Update the board statistics counters.
2245 * @adapter: board private structure
2247 void ixgbevf_update_stats(struct ixgbevf_adapter *adapter)
2249 struct ixgbe_hw *hw = &adapter->hw;
2252 if (!adapter->link_up)
2255 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC, adapter->stats.last_vfgprc,
2256 adapter->stats.vfgprc);
2257 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPTC, adapter->stats.last_vfgptc,
2258 adapter->stats.vfgptc);
2259 UPDATE_VF_COUNTER_36bit(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
2260 adapter->stats.last_vfgorc,
2261 adapter->stats.vfgorc);
2262 UPDATE_VF_COUNTER_36bit(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
2263 adapter->stats.last_vfgotc,
2264 adapter->stats.vfgotc);
2265 UPDATE_VF_COUNTER_32bit(IXGBE_VFMPRC, adapter->stats.last_vfmprc,
2266 adapter->stats.vfmprc);
2268 for (i = 0; i < adapter->num_rx_queues; i++) {
2269 adapter->hw_csum_rx_error +=
2270 adapter->rx_ring[i].hw_csum_rx_error;
2271 adapter->hw_csum_rx_good +=
2272 adapter->rx_ring[i].hw_csum_rx_good;
2273 adapter->rx_ring[i].hw_csum_rx_error = 0;
2274 adapter->rx_ring[i].hw_csum_rx_good = 0;
2279 * ixgbevf_watchdog - Timer Call-back
2280 * @data: pointer to adapter cast into an unsigned long
2282 static void ixgbevf_watchdog(unsigned long data)
2284 struct ixgbevf_adapter *adapter = (struct ixgbevf_adapter *)data;
2285 struct ixgbe_hw *hw = &adapter->hw;
2290 * Do the watchdog outside of interrupt context due to the lovely
2291 * delays that some of the newer hardware requires
2294 if (test_bit(__IXGBEVF_DOWN, &adapter->state))
2295 goto watchdog_short_circuit;
2297 /* get one bit for every active tx/rx interrupt vector */
2298 for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
2299 struct ixgbevf_q_vector *qv = adapter->q_vector[i];
2300 if (qv->rx.ring || qv->tx.ring)
2304 IXGBE_WRITE_REG(hw, IXGBE_VTEICS, eics);
2306 watchdog_short_circuit:
2307 schedule_work(&adapter->watchdog_task);
2311 * ixgbevf_tx_timeout - Respond to a Tx Hang
2312 * @netdev: network interface device structure
2314 static void ixgbevf_tx_timeout(struct net_device *netdev)
2316 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2318 /* Do the reset outside of interrupt context */
2319 schedule_work(&adapter->reset_task);
2322 static void ixgbevf_reset_task(struct work_struct *work)
2324 struct ixgbevf_adapter *adapter;
2325 adapter = container_of(work, struct ixgbevf_adapter, reset_task);
2327 /* If we're already down or resetting, just bail */
2328 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
2329 test_bit(__IXGBEVF_RESETTING, &adapter->state))
2332 adapter->tx_timeout_count++;
2334 ixgbevf_reinit_locked(adapter);
2338 * ixgbevf_watchdog_task - worker thread to bring link up
2339 * @work: pointer to work_struct containing our data
2341 static void ixgbevf_watchdog_task(struct work_struct *work)
2343 struct ixgbevf_adapter *adapter = container_of(work,
2344 struct ixgbevf_adapter,
2346 struct net_device *netdev = adapter->netdev;
2347 struct ixgbe_hw *hw = &adapter->hw;
2348 u32 link_speed = adapter->link_speed;
2349 bool link_up = adapter->link_up;
2352 adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK;
2355 * Always check the link on the watchdog because we have
2358 spin_lock_bh(&adapter->mbx_lock);
2360 need_reset = hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
2362 spin_unlock_bh(&adapter->mbx_lock);
2365 adapter->link_up = link_up;
2366 adapter->link_speed = link_speed;
2367 netif_carrier_off(netdev);
2368 netif_tx_stop_all_queues(netdev);
2369 schedule_work(&adapter->reset_task);
2372 adapter->link_up = link_up;
2373 adapter->link_speed = link_speed;
2376 if (!netif_carrier_ok(netdev)) {
2377 char *link_speed_string;
2378 switch (link_speed) {
2379 case IXGBE_LINK_SPEED_10GB_FULL:
2380 link_speed_string = "10 Gbps";
2382 case IXGBE_LINK_SPEED_1GB_FULL:
2383 link_speed_string = "1 Gbps";
2385 case IXGBE_LINK_SPEED_100_FULL:
2386 link_speed_string = "100 Mbps";
2389 link_speed_string = "unknown speed";
2392 dev_info(&adapter->pdev->dev,
2393 "NIC Link is Up, %s\n", link_speed_string);
2394 netif_carrier_on(netdev);
2395 netif_tx_wake_all_queues(netdev);
2398 adapter->link_up = false;
2399 adapter->link_speed = 0;
2400 if (netif_carrier_ok(netdev)) {
2401 dev_info(&adapter->pdev->dev, "NIC Link is Down\n");
2402 netif_carrier_off(netdev);
2403 netif_tx_stop_all_queues(netdev);
2407 ixgbevf_update_stats(adapter);
2410 /* Reset the timer */
2411 if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
2412 mod_timer(&adapter->watchdog_timer,
2413 round_jiffies(jiffies + (2 * HZ)));
2415 adapter->flags &= ~IXGBE_FLAG_IN_WATCHDOG_TASK;
2419 * ixgbevf_free_tx_resources - Free Tx Resources per Queue
2420 * @adapter: board private structure
2421 * @tx_ring: Tx descriptor ring for a specific queue
2423 * Free all transmit software resources
2425 void ixgbevf_free_tx_resources(struct ixgbevf_adapter *adapter,
2426 struct ixgbevf_ring *tx_ring)
2428 struct pci_dev *pdev = adapter->pdev;
2430 ixgbevf_clean_tx_ring(adapter, tx_ring);
2432 vfree(tx_ring->tx_buffer_info);
2433 tx_ring->tx_buffer_info = NULL;
2435 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
2438 tx_ring->desc = NULL;
2442 * ixgbevf_free_all_tx_resources - Free Tx Resources for All Queues
2443 * @adapter: board private structure
2445 * Free all transmit software resources
2447 static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter *adapter)
2451 for (i = 0; i < adapter->num_tx_queues; i++)
2452 if (adapter->tx_ring[i].desc)
2453 ixgbevf_free_tx_resources(adapter,
2454 &adapter->tx_ring[i]);
2459 * ixgbevf_setup_tx_resources - allocate Tx resources (Descriptors)
2460 * @adapter: board private structure
2461 * @tx_ring: tx descriptor ring (for a specific queue) to setup
2463 * Return 0 on success, negative on failure
2465 int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *adapter,
2466 struct ixgbevf_ring *tx_ring)
2468 struct pci_dev *pdev = adapter->pdev;
2471 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
2472 tx_ring->tx_buffer_info = vzalloc(size);
2473 if (!tx_ring->tx_buffer_info)
2476 /* round up to nearest 4K */
2477 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
2478 tx_ring->size = ALIGN(tx_ring->size, 4096);
2480 tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
2481 &tx_ring->dma, GFP_KERNEL);
2485 tx_ring->next_to_use = 0;
2486 tx_ring->next_to_clean = 0;
2490 vfree(tx_ring->tx_buffer_info);
2491 tx_ring->tx_buffer_info = NULL;
2492 hw_dbg(&adapter->hw, "Unable to allocate memory for the transmit "
2493 "descriptor ring\n");
2498 * ixgbevf_setup_all_tx_resources - allocate all queues Tx resources
2499 * @adapter: board private structure
2501 * If this function returns with an error, then it's possible one or
2502 * more of the rings is populated (while the rest are not). It is the
2503 * callers duty to clean those orphaned rings.
2505 * Return 0 on success, negative on failure
2507 static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter)
2511 for (i = 0; i < adapter->num_tx_queues; i++) {
2512 err = ixgbevf_setup_tx_resources(adapter, &adapter->tx_ring[i]);
2515 hw_dbg(&adapter->hw,
2516 "Allocation for Tx Queue %u failed\n", i);
2524 * ixgbevf_setup_rx_resources - allocate Rx resources (Descriptors)
2525 * @adapter: board private structure
2526 * @rx_ring: rx descriptor ring (for a specific queue) to setup
2528 * Returns 0 on success, negative on failure
2530 int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *adapter,
2531 struct ixgbevf_ring *rx_ring)
2533 struct pci_dev *pdev = adapter->pdev;
2536 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
2537 rx_ring->rx_buffer_info = vzalloc(size);
2538 if (!rx_ring->rx_buffer_info)
2541 /* Round up to nearest 4K */
2542 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
2543 rx_ring->size = ALIGN(rx_ring->size, 4096);
2545 rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
2546 &rx_ring->dma, GFP_KERNEL);
2548 if (!rx_ring->desc) {
2549 vfree(rx_ring->rx_buffer_info);
2550 rx_ring->rx_buffer_info = NULL;
2554 rx_ring->next_to_clean = 0;
2555 rx_ring->next_to_use = 0;
2563 * ixgbevf_setup_all_rx_resources - allocate all queues Rx resources
2564 * @adapter: board private structure
2566 * If this function returns with an error, then it's possible one or
2567 * more of the rings is populated (while the rest are not). It is the
2568 * callers duty to clean those orphaned rings.
2570 * Return 0 on success, negative on failure
2572 static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter *adapter)
2576 for (i = 0; i < adapter->num_rx_queues; i++) {
2577 err = ixgbevf_setup_rx_resources(adapter, &adapter->rx_ring[i]);
2580 hw_dbg(&adapter->hw,
2581 "Allocation for Rx Queue %u failed\n", i);
2588 * ixgbevf_free_rx_resources - Free Rx Resources
2589 * @adapter: board private structure
2590 * @rx_ring: ring to clean the resources from
2592 * Free all receive software resources
2594 void ixgbevf_free_rx_resources(struct ixgbevf_adapter *adapter,
2595 struct ixgbevf_ring *rx_ring)
2597 struct pci_dev *pdev = adapter->pdev;
2599 ixgbevf_clean_rx_ring(adapter, rx_ring);
2601 vfree(rx_ring->rx_buffer_info);
2602 rx_ring->rx_buffer_info = NULL;
2604 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
2607 rx_ring->desc = NULL;
2611 * ixgbevf_free_all_rx_resources - Free Rx Resources for All Queues
2612 * @adapter: board private structure
2614 * Free all receive software resources
2616 static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter)
2620 for (i = 0; i < adapter->num_rx_queues; i++)
2621 if (adapter->rx_ring[i].desc)
2622 ixgbevf_free_rx_resources(adapter,
2623 &adapter->rx_ring[i]);
2626 static int ixgbevf_setup_queues(struct ixgbevf_adapter *adapter)
2628 struct ixgbe_hw *hw = &adapter->hw;
2629 struct ixgbevf_ring *rx_ring;
2630 unsigned int def_q = 0;
2631 unsigned int num_tcs = 0;
2632 unsigned int num_rx_queues = 1;
2635 spin_lock_bh(&adapter->mbx_lock);
2637 /* fetch queue configuration from the PF */
2638 err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
2640 spin_unlock_bh(&adapter->mbx_lock);
2646 /* update default Tx ring register index */
2647 adapter->tx_ring[0].reg_idx = def_q;
2649 /* we need as many queues as traffic classes */
2650 num_rx_queues = num_tcs;
2653 /* nothing to do if we have the correct number of queues */
2654 if (adapter->num_rx_queues == num_rx_queues)
2657 /* allocate new rings */
2658 rx_ring = kcalloc(num_rx_queues,
2659 sizeof(struct ixgbevf_ring), GFP_KERNEL);
2663 /* setup ring fields */
2664 for (i = 0; i < num_rx_queues; i++) {
2665 rx_ring[i].count = adapter->rx_ring_count;
2666 rx_ring[i].queue_index = i;
2667 rx_ring[i].reg_idx = i;
2668 rx_ring[i].dev = &adapter->pdev->dev;
2669 rx_ring[i].netdev = adapter->netdev;
2672 /* free the existing ring and queues */
2673 adapter->num_rx_queues = 0;
2674 kfree(adapter->rx_ring);
2676 /* move new rings into position on the adapter struct */
2677 adapter->rx_ring = rx_ring;
2678 adapter->num_rx_queues = num_rx_queues;
2684 * ixgbevf_open - Called when a network interface is made active
2685 * @netdev: network interface device structure
2687 * Returns 0 on success, negative value on failure
2689 * The open entry point is called when a network interface is made
2690 * active by the system (IFF_UP). At this point all resources needed
2691 * for transmit and receive operations are allocated, the interrupt
2692 * handler is registered with the OS, the watchdog timer is started,
2693 * and the stack is notified that the interface is ready.
2695 static int ixgbevf_open(struct net_device *netdev)
2697 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2698 struct ixgbe_hw *hw = &adapter->hw;
2701 /* A previous failure to open the device because of a lack of
2702 * available MSIX vector resources may have reset the number
2703 * of msix vectors variable to zero. The only way to recover
2704 * is to unload/reload the driver and hope that the system has
2705 * been able to recover some MSIX vector resources.
2707 if (!adapter->num_msix_vectors)
2710 /* disallow open during test */
2711 if (test_bit(__IXGBEVF_TESTING, &adapter->state))
2714 if (hw->adapter_stopped) {
2715 ixgbevf_reset(adapter);
2716 /* if adapter is still stopped then PF isn't up and
2717 * the vf can't start. */
2718 if (hw->adapter_stopped) {
2719 err = IXGBE_ERR_MBX;
2720 pr_err("Unable to start - perhaps the PF Driver isn't "
2722 goto err_setup_reset;
2726 /* setup queue reg_idx and Rx queue count */
2727 err = ixgbevf_setup_queues(adapter);
2729 goto err_setup_queues;
2731 /* allocate transmit descriptors */
2732 err = ixgbevf_setup_all_tx_resources(adapter);
2736 /* allocate receive descriptors */
2737 err = ixgbevf_setup_all_rx_resources(adapter);
2741 ixgbevf_configure(adapter);
2744 * Map the Tx/Rx rings to the vectors we were allotted.
2745 * if request_irq will be called in this function map_rings
2746 * must be called *before* up_complete
2748 ixgbevf_map_rings_to_vectors(adapter);
2750 ixgbevf_up_complete(adapter);
2752 /* clear any pending interrupts, may auto mask */
2753 IXGBE_READ_REG(hw, IXGBE_VTEICR);
2754 err = ixgbevf_request_irq(adapter);
2758 ixgbevf_irq_enable(adapter);
2763 ixgbevf_down(adapter);
2765 ixgbevf_free_all_rx_resources(adapter);
2767 ixgbevf_free_all_tx_resources(adapter);
2769 ixgbevf_reset(adapter);
2777 * ixgbevf_close - Disables a network interface
2778 * @netdev: network interface device structure
2780 * Returns 0, this is not allowed to fail
2782 * The close entry point is called when an interface is de-activated
2783 * by the OS. The hardware is still under the drivers control, but
2784 * needs to be disabled. A global MAC reset is issued to stop the
2785 * hardware, and all transmit and receive resources are freed.
2787 static int ixgbevf_close(struct net_device *netdev)
2789 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2791 ixgbevf_down(adapter);
2792 ixgbevf_free_irq(adapter);
2794 ixgbevf_free_all_tx_resources(adapter);
2795 ixgbevf_free_all_rx_resources(adapter);
2800 static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring *tx_ring,
2801 u32 vlan_macip_lens, u32 type_tucmd,
2804 struct ixgbe_adv_tx_context_desc *context_desc;
2805 u16 i = tx_ring->next_to_use;
2807 context_desc = IXGBEVF_TX_CTXTDESC(tx_ring, i);
2810 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
2812 /* set bits to identify this as an advanced context descriptor */
2813 type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
2815 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
2816 context_desc->seqnum_seed = 0;
2817 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
2818 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
2821 static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
2822 struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
2824 u32 vlan_macip_lens, type_tucmd;
2825 u32 mss_l4len_idx, l4len;
2827 if (!skb_is_gso(skb))
2830 if (skb_header_cloned(skb)) {
2831 int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2836 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
2837 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
2839 if (skb->protocol == htons(ETH_P_IP)) {
2840 struct iphdr *iph = ip_hdr(skb);
2843 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2847 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
2848 } else if (skb_is_gso_v6(skb)) {
2849 ipv6_hdr(skb)->payload_len = 0;
2850 tcp_hdr(skb)->check =
2851 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2852 &ipv6_hdr(skb)->daddr,
2856 /* compute header lengths */
2857 l4len = tcp_hdrlen(skb);
2859 *hdr_len = skb_transport_offset(skb) + l4len;
2861 /* mss_l4len_id: use 1 as index for TSO */
2862 mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT;
2863 mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
2864 mss_l4len_idx |= 1 << IXGBE_ADVTXD_IDX_SHIFT;
2866 /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
2867 vlan_macip_lens = skb_network_header_len(skb);
2868 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
2869 vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
2871 ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
2872 type_tucmd, mss_l4len_idx);
2877 static bool ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
2878 struct sk_buff *skb, u32 tx_flags)
2880 u32 vlan_macip_lens = 0;
2881 u32 mss_l4len_idx = 0;
2884 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2886 switch (skb->protocol) {
2887 case __constant_htons(ETH_P_IP):
2888 vlan_macip_lens |= skb_network_header_len(skb);
2889 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
2890 l4_hdr = ip_hdr(skb)->protocol;
2892 case __constant_htons(ETH_P_IPV6):
2893 vlan_macip_lens |= skb_network_header_len(skb);
2894 l4_hdr = ipv6_hdr(skb)->nexthdr;
2897 if (unlikely(net_ratelimit())) {
2898 dev_warn(tx_ring->dev,
2899 "partial checksum but proto=%x!\n",
2907 type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
2908 mss_l4len_idx = tcp_hdrlen(skb) <<
2909 IXGBE_ADVTXD_L4LEN_SHIFT;
2912 type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
2913 mss_l4len_idx = sizeof(struct sctphdr) <<
2914 IXGBE_ADVTXD_L4LEN_SHIFT;
2917 mss_l4len_idx = sizeof(struct udphdr) <<
2918 IXGBE_ADVTXD_L4LEN_SHIFT;
2921 if (unlikely(net_ratelimit())) {
2922 dev_warn(tx_ring->dev,
2923 "partial checksum but l4 proto=%x!\n",
2930 /* vlan_macip_lens: MACLEN, VLAN tag */
2931 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
2932 vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
2934 ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
2935 type_tucmd, mss_l4len_idx);
2937 return (skb->ip_summed == CHECKSUM_PARTIAL);
2940 static int ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
2941 struct sk_buff *skb, u32 tx_flags)
2943 struct ixgbevf_tx_buffer *tx_buffer_info;
2945 unsigned int total = skb->len;
2946 unsigned int offset = 0, size;
2948 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
2952 i = tx_ring->next_to_use;
2954 len = min(skb_headlen(skb), total);
2956 tx_buffer_info = &tx_ring->tx_buffer_info[i];
2957 size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD);
2959 tx_buffer_info->length = size;
2960 tx_buffer_info->mapped_as_page = false;
2961 tx_buffer_info->dma = dma_map_single(tx_ring->dev,
2963 size, DMA_TO_DEVICE);
2964 if (dma_mapping_error(tx_ring->dev, tx_buffer_info->dma))
2972 if (i == tx_ring->count)
2976 for (f = 0; f < nr_frags; f++) {
2977 const struct skb_frag_struct *frag;
2979 frag = &skb_shinfo(skb)->frags[f];
2980 len = min((unsigned int)skb_frag_size(frag), total);
2984 tx_buffer_info = &tx_ring->tx_buffer_info[i];
2985 size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD);
2987 tx_buffer_info->length = size;
2988 tx_buffer_info->dma =
2989 skb_frag_dma_map(tx_ring->dev, frag,
2990 offset, size, DMA_TO_DEVICE);
2991 if (dma_mapping_error(tx_ring->dev,
2992 tx_buffer_info->dma))
2994 tx_buffer_info->mapped_as_page = true;
3001 if (i == tx_ring->count)
3009 i = tx_ring->count - 1;
3012 tx_ring->tx_buffer_info[i].skb = skb;
3017 dev_err(tx_ring->dev, "TX DMA map failed\n");
3019 /* clear timestamp and dma mappings for failed tx_buffer_info map */
3020 tx_buffer_info->dma = 0;
3023 /* clear timestamp and dma mappings for remaining portion of packet */
3024 while (count >= 0) {
3028 i += tx_ring->count;
3029 tx_buffer_info = &tx_ring->tx_buffer_info[i];
3030 ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
3036 static void ixgbevf_tx_queue(struct ixgbevf_ring *tx_ring, int tx_flags,
3037 int count, unsigned int first, u32 paylen,
3040 union ixgbe_adv_tx_desc *tx_desc = NULL;
3041 struct ixgbevf_tx_buffer *tx_buffer_info;
3042 u32 olinfo_status = 0, cmd_type_len = 0;
3045 u32 txd_cmd = IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS | IXGBE_TXD_CMD_IFCS;
3047 cmd_type_len |= IXGBE_ADVTXD_DTYP_DATA;
3049 cmd_type_len |= IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT;
3051 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
3052 cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
3054 if (tx_flags & IXGBE_TX_FLAGS_CSUM)
3055 olinfo_status |= IXGBE_ADVTXD_POPTS_TXSM;
3057 if (tx_flags & IXGBE_TX_FLAGS_TSO) {
3058 cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
3060 /* use index 1 context for tso */
3061 olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
3062 if (tx_flags & IXGBE_TX_FLAGS_IPV4)
3063 olinfo_status |= IXGBE_ADVTXD_POPTS_IXSM;
3067 * Check Context must be set if Tx switch is enabled, which it
3068 * always is for case where virtual functions are running
3070 olinfo_status |= IXGBE_ADVTXD_CC;
3072 olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT);
3074 i = tx_ring->next_to_use;
3076 tx_buffer_info = &tx_ring->tx_buffer_info[i];
3077 tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
3078 tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma);
3079 tx_desc->read.cmd_type_len =
3080 cpu_to_le32(cmd_type_len | tx_buffer_info->length);
3081 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
3083 if (i == tx_ring->count)
3087 tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd);
3089 tx_ring->tx_buffer_info[first].time_stamp = jiffies;
3091 /* Force memory writes to complete before letting h/w
3092 * know there are new descriptors to fetch. (Only
3093 * applicable for weak-ordered memory model archs,
3098 tx_ring->tx_buffer_info[first].next_to_watch = tx_desc;
3099 tx_ring->next_to_use = i;
3102 static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
3104 struct ixgbevf_adapter *adapter = netdev_priv(tx_ring->netdev);
3106 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
3107 /* Herbert's original patch had:
3108 * smp_mb__after_netif_stop_queue();
3109 * but since that doesn't exist yet, just open code it. */
3112 /* We need to check again in a case another CPU has just
3113 * made room available. */
3114 if (likely(IXGBE_DESC_UNUSED(tx_ring) < size))
3117 /* A reprieve! - use start_queue because it doesn't call schedule */
3118 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
3119 ++adapter->restart_queue;
3123 static int ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
3125 if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size))
3127 return __ixgbevf_maybe_stop_tx(tx_ring, size);
3130 static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3132 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3133 struct ixgbevf_ring *tx_ring;
3135 unsigned int tx_flags = 0;
3138 u16 count = TXD_USE_COUNT(skb_headlen(skb));
3139 #if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
3142 u8 *dst_mac = skb_header_pointer(skb, 0, 0, NULL);
3143 if (!dst_mac || is_link_local_ether_addr(dst_mac)) {
3145 return NETDEV_TX_OK;
3148 tx_ring = &adapter->tx_ring[r_idx];
3151 * need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD,
3152 * + 1 desc for skb_headlen/IXGBE_MAX_DATA_PER_TXD,
3153 * + 2 desc gap to keep tail from touching head,
3154 * + 1 desc for context descriptor,
3155 * otherwise try next time
3157 #if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
3158 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
3159 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
3161 count += skb_shinfo(skb)->nr_frags;
3163 if (ixgbevf_maybe_stop_tx(tx_ring, count + 3)) {
3165 return NETDEV_TX_BUSY;
3168 if (vlan_tx_tag_present(skb)) {
3169 tx_flags |= vlan_tx_tag_get(skb);
3170 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
3171 tx_flags |= IXGBE_TX_FLAGS_VLAN;
3174 first = tx_ring->next_to_use;
3176 if (skb->protocol == htons(ETH_P_IP))
3177 tx_flags |= IXGBE_TX_FLAGS_IPV4;
3178 tso = ixgbevf_tso(tx_ring, skb, tx_flags, &hdr_len);
3180 dev_kfree_skb_any(skb);
3181 return NETDEV_TX_OK;
3185 tx_flags |= IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_CSUM;
3186 else if (ixgbevf_tx_csum(tx_ring, skb, tx_flags))
3187 tx_flags |= IXGBE_TX_FLAGS_CSUM;
3189 ixgbevf_tx_queue(tx_ring, tx_flags,
3190 ixgbevf_tx_map(tx_ring, skb, tx_flags),
3191 first, skb->len, hdr_len);
3193 writel(tx_ring->next_to_use, adapter->hw.hw_addr + tx_ring->tail);
3195 ixgbevf_maybe_stop_tx(tx_ring, DESC_NEEDED);
3197 return NETDEV_TX_OK;
3201 * ixgbevf_set_mac - Change the Ethernet Address of the NIC
3202 * @netdev: network interface device structure
3203 * @p: pointer to an address structure
3205 * Returns 0 on success, negative on failure
3207 static int ixgbevf_set_mac(struct net_device *netdev, void *p)
3209 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3210 struct ixgbe_hw *hw = &adapter->hw;
3211 struct sockaddr *addr = p;
3213 if (!is_valid_ether_addr(addr->sa_data))
3214 return -EADDRNOTAVAIL;
3216 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
3217 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
3219 spin_lock_bh(&adapter->mbx_lock);
3221 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
3223 spin_unlock_bh(&adapter->mbx_lock);
3229 * ixgbevf_change_mtu - Change the Maximum Transfer Unit
3230 * @netdev: network interface device structure
3231 * @new_mtu: new value for maximum frame size
3233 * Returns 0 on success, negative on failure
3235 static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
3237 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3238 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
3239 int max_possible_frame = MAXIMUM_ETHERNET_VLAN_SIZE;
3241 switch (adapter->hw.api_version) {
3242 case ixgbe_mbox_api_11:
3243 max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE;
3246 if (adapter->hw.mac.type == ixgbe_mac_X540_vf)
3247 max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE;
3251 /* MTU < 68 is an error and causes problems on some kernels */
3252 if ((new_mtu < 68) || (max_frame > max_possible_frame))
3255 hw_dbg(&adapter->hw, "changing MTU from %d to %d\n",
3256 netdev->mtu, new_mtu);
3257 /* must set new MTU before calling down or up */
3258 netdev->mtu = new_mtu;
3260 if (netif_running(netdev))
3261 ixgbevf_reinit_locked(adapter);
3266 static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state)
3268 struct net_device *netdev = pci_get_drvdata(pdev);
3269 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3274 netif_device_detach(netdev);
3276 if (netif_running(netdev)) {
3278 ixgbevf_down(adapter);
3279 ixgbevf_free_irq(adapter);
3280 ixgbevf_free_all_tx_resources(adapter);
3281 ixgbevf_free_all_rx_resources(adapter);
3285 ixgbevf_clear_interrupt_scheme(adapter);
3288 retval = pci_save_state(pdev);
3293 pci_disable_device(pdev);
3299 static int ixgbevf_resume(struct pci_dev *pdev)
3301 struct ixgbevf_adapter *adapter = pci_get_drvdata(pdev);
3302 struct net_device *netdev = adapter->netdev;
3305 pci_set_power_state(pdev, PCI_D0);
3306 pci_restore_state(pdev);
3308 * pci_restore_state clears dev->state_saved so call
3309 * pci_save_state to restore it.
3311 pci_save_state(pdev);
3313 err = pci_enable_device_mem(pdev);
3315 dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n");
3318 pci_set_master(pdev);
3320 ixgbevf_reset(adapter);
3323 err = ixgbevf_init_interrupt_scheme(adapter);
3326 dev_err(&pdev->dev, "Cannot initialize interrupts\n");
3330 if (netif_running(netdev)) {
3331 err = ixgbevf_open(netdev);
3336 netif_device_attach(netdev);
3341 #endif /* CONFIG_PM */
3342 static void ixgbevf_shutdown(struct pci_dev *pdev)
3344 ixgbevf_suspend(pdev, PMSG_SUSPEND);
3347 static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev,
3348 struct rtnl_link_stats64 *stats)
3350 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3353 const struct ixgbevf_ring *ring;
3356 ixgbevf_update_stats(adapter);
3358 stats->multicast = adapter->stats.vfmprc - adapter->stats.base_vfmprc;
3360 for (i = 0; i < adapter->num_rx_queues; i++) {
3361 ring = &adapter->rx_ring[i];
3363 start = u64_stats_fetch_begin_bh(&ring->syncp);
3364 bytes = ring->total_bytes;
3365 packets = ring->total_packets;
3366 } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
3367 stats->rx_bytes += bytes;
3368 stats->rx_packets += packets;
3371 for (i = 0; i < adapter->num_tx_queues; i++) {
3372 ring = &adapter->tx_ring[i];
3374 start = u64_stats_fetch_begin_bh(&ring->syncp);
3375 bytes = ring->total_bytes;
3376 packets = ring->total_packets;
3377 } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
3378 stats->tx_bytes += bytes;
3379 stats->tx_packets += packets;
3385 static const struct net_device_ops ixgbevf_netdev_ops = {
3386 .ndo_open = ixgbevf_open,
3387 .ndo_stop = ixgbevf_close,
3388 .ndo_start_xmit = ixgbevf_xmit_frame,
3389 .ndo_set_rx_mode = ixgbevf_set_rx_mode,
3390 .ndo_get_stats64 = ixgbevf_get_stats,
3391 .ndo_validate_addr = eth_validate_addr,
3392 .ndo_set_mac_address = ixgbevf_set_mac,
3393 .ndo_change_mtu = ixgbevf_change_mtu,
3394 .ndo_tx_timeout = ixgbevf_tx_timeout,
3395 .ndo_vlan_rx_add_vid = ixgbevf_vlan_rx_add_vid,
3396 .ndo_vlan_rx_kill_vid = ixgbevf_vlan_rx_kill_vid,
3397 #ifdef CONFIG_NET_RX_BUSY_POLL
3398 .ndo_busy_poll = ixgbevf_busy_poll_recv,
3402 static void ixgbevf_assign_netdev_ops(struct net_device *dev)
3404 dev->netdev_ops = &ixgbevf_netdev_ops;
3405 ixgbevf_set_ethtool_ops(dev);
3406 dev->watchdog_timeo = 5 * HZ;
3410 * ixgbevf_probe - Device Initialization Routine
3411 * @pdev: PCI device information struct
3412 * @ent: entry in ixgbevf_pci_tbl
3414 * Returns 0 on success, negative on failure
3416 * ixgbevf_probe initializes an adapter identified by a pci_dev structure.
3417 * The OS initialization, configuring of the adapter private structure,
3418 * and a hardware reset occur.
3420 static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3422 struct net_device *netdev;
3423 struct ixgbevf_adapter *adapter = NULL;
3424 struct ixgbe_hw *hw = NULL;
3425 const struct ixgbevf_info *ii = ixgbevf_info_tbl[ent->driver_data];
3426 static int cards_found;
3427 int err, pci_using_dac;
3429 err = pci_enable_device(pdev);
3433 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
3434 !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
3437 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
3439 err = dma_set_coherent_mask(&pdev->dev,
3442 dev_err(&pdev->dev, "No usable DMA "
3443 "configuration, aborting\n");
3450 err = pci_request_regions(pdev, ixgbevf_driver_name);
3452 dev_err(&pdev->dev, "pci_request_regions failed 0x%x\n", err);
3456 pci_set_master(pdev);
3458 netdev = alloc_etherdev_mq(sizeof(struct ixgbevf_adapter),
3462 goto err_alloc_etherdev;
3465 SET_NETDEV_DEV(netdev, &pdev->dev);
3467 pci_set_drvdata(pdev, netdev);
3468 adapter = netdev_priv(netdev);
3470 adapter->netdev = netdev;
3471 adapter->pdev = pdev;
3474 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
3477 * call save state here in standalone driver because it relies on
3478 * adapter struct to exist, and needs to call netdev_priv
3480 pci_save_state(pdev);
3482 hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
3483 pci_resource_len(pdev, 0));
3489 ixgbevf_assign_netdev_ops(netdev);
3491 adapter->bd_number = cards_found;
3494 memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
3495 hw->mac.type = ii->mac;
3497 memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops,
3498 sizeof(struct ixgbe_mbx_operations));
3500 /* setup the private structure */
3501 err = ixgbevf_sw_init(adapter);
3505 /* The HW MAC address was set and/or determined in sw_init */
3506 if (!is_valid_ether_addr(netdev->dev_addr)) {
3507 pr_err("invalid MAC address\n");
3512 netdev->hw_features = NETIF_F_SG |
3519 netdev->features = netdev->hw_features |
3520 NETIF_F_HW_VLAN_CTAG_TX |
3521 NETIF_F_HW_VLAN_CTAG_RX |
3522 NETIF_F_HW_VLAN_CTAG_FILTER;
3524 netdev->vlan_features |= NETIF_F_TSO;
3525 netdev->vlan_features |= NETIF_F_TSO6;
3526 netdev->vlan_features |= NETIF_F_IP_CSUM;
3527 netdev->vlan_features |= NETIF_F_IPV6_CSUM;
3528 netdev->vlan_features |= NETIF_F_SG;
3531 netdev->features |= NETIF_F_HIGHDMA;
3533 netdev->priv_flags |= IFF_UNICAST_FLT;
3535 init_timer(&adapter->watchdog_timer);
3536 adapter->watchdog_timer.function = ixgbevf_watchdog;
3537 adapter->watchdog_timer.data = (unsigned long)adapter;
3539 INIT_WORK(&adapter->reset_task, ixgbevf_reset_task);
3540 INIT_WORK(&adapter->watchdog_task, ixgbevf_watchdog_task);
3542 err = ixgbevf_init_interrupt_scheme(adapter);
3546 strcpy(netdev->name, "eth%d");
3548 err = register_netdev(netdev);
3552 netif_carrier_off(netdev);
3554 ixgbevf_init_last_counter_stats(adapter);
3556 /* print the MAC address */
3557 hw_dbg(hw, "%pM\n", netdev->dev_addr);
3559 hw_dbg(hw, "MAC: %d\n", hw->mac.type);
3561 hw_dbg(hw, "Intel(R) 82599 Virtual Function\n");
3566 ixgbevf_clear_interrupt_scheme(adapter);
3568 ixgbevf_reset_interrupt_capability(adapter);
3569 iounmap(hw->hw_addr);
3571 free_netdev(netdev);
3573 pci_release_regions(pdev);
3576 pci_disable_device(pdev);
3581 * ixgbevf_remove - Device Removal Routine
3582 * @pdev: PCI device information struct
3584 * ixgbevf_remove is called by the PCI subsystem to alert the driver
3585 * that it should release a PCI device. The could be caused by a
3586 * Hot-Plug event, or because the driver is going to be removed from
3589 static void ixgbevf_remove(struct pci_dev *pdev)
3591 struct net_device *netdev = pci_get_drvdata(pdev);
3592 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3594 set_bit(__IXGBEVF_DOWN, &adapter->state);
3596 del_timer_sync(&adapter->watchdog_timer);
3598 cancel_work_sync(&adapter->reset_task);
3599 cancel_work_sync(&adapter->watchdog_task);
3601 if (netdev->reg_state == NETREG_REGISTERED)
3602 unregister_netdev(netdev);
3604 ixgbevf_clear_interrupt_scheme(adapter);
3605 ixgbevf_reset_interrupt_capability(adapter);
3607 iounmap(adapter->hw.hw_addr);
3608 pci_release_regions(pdev);
3610 hw_dbg(&adapter->hw, "Remove complete\n");
3612 kfree(adapter->tx_ring);
3613 kfree(adapter->rx_ring);
3615 free_netdev(netdev);
3617 pci_disable_device(pdev);
3621 * ixgbevf_io_error_detected - called when PCI error is detected
3622 * @pdev: Pointer to PCI device
3623 * @state: The current pci connection state
3625 * This function is called after a PCI bus error affecting
3626 * this device has been detected.
3628 static pci_ers_result_t ixgbevf_io_error_detected(struct pci_dev *pdev,
3629 pci_channel_state_t state)
3631 struct net_device *netdev = pci_get_drvdata(pdev);
3632 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3634 netif_device_detach(netdev);
3636 if (state == pci_channel_io_perm_failure)
3637 return PCI_ERS_RESULT_DISCONNECT;
3639 if (netif_running(netdev))
3640 ixgbevf_down(adapter);
3642 pci_disable_device(pdev);
3644 /* Request a slot slot reset. */
3645 return PCI_ERS_RESULT_NEED_RESET;
3649 * ixgbevf_io_slot_reset - called after the pci bus has been reset.
3650 * @pdev: Pointer to PCI device
3652 * Restart the card from scratch, as if from a cold-boot. Implementation
3653 * resembles the first-half of the ixgbevf_resume routine.
3655 static pci_ers_result_t ixgbevf_io_slot_reset(struct pci_dev *pdev)
3657 struct net_device *netdev = pci_get_drvdata(pdev);
3658 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3660 if (pci_enable_device_mem(pdev)) {
3662 "Cannot re-enable PCI device after reset.\n");
3663 return PCI_ERS_RESULT_DISCONNECT;
3666 pci_set_master(pdev);
3668 ixgbevf_reset(adapter);
3670 return PCI_ERS_RESULT_RECOVERED;
3674 * ixgbevf_io_resume - called when traffic can start flowing again.
3675 * @pdev: Pointer to PCI device
3677 * This callback is called when the error recovery driver tells us that
3678 * its OK to resume normal operation. Implementation resembles the
3679 * second-half of the ixgbevf_resume routine.
3681 static void ixgbevf_io_resume(struct pci_dev *pdev)
3683 struct net_device *netdev = pci_get_drvdata(pdev);
3684 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3686 if (netif_running(netdev))
3687 ixgbevf_up(adapter);
3689 netif_device_attach(netdev);
3692 /* PCI Error Recovery (ERS) */
3693 static const struct pci_error_handlers ixgbevf_err_handler = {
3694 .error_detected = ixgbevf_io_error_detected,
3695 .slot_reset = ixgbevf_io_slot_reset,
3696 .resume = ixgbevf_io_resume,
3699 static struct pci_driver ixgbevf_driver = {
3700 .name = ixgbevf_driver_name,
3701 .id_table = ixgbevf_pci_tbl,
3702 .probe = ixgbevf_probe,
3703 .remove = ixgbevf_remove,
3705 /* Power Management Hooks */
3706 .suspend = ixgbevf_suspend,
3707 .resume = ixgbevf_resume,
3709 .shutdown = ixgbevf_shutdown,
3710 .err_handler = &ixgbevf_err_handler
3714 * ixgbevf_init_module - Driver Registration Routine
3716 * ixgbevf_init_module is the first routine called when the driver is
3717 * loaded. All it does is register with the PCI subsystem.
3719 static int __init ixgbevf_init_module(void)
3722 pr_info("%s - version %s\n", ixgbevf_driver_string,
3723 ixgbevf_driver_version);
3725 pr_info("%s\n", ixgbevf_copyright);
3727 ret = pci_register_driver(&ixgbevf_driver);
3731 module_init(ixgbevf_init_module);
3734 * ixgbevf_exit_module - Driver Exit Cleanup Routine
3736 * ixgbevf_exit_module is called just before the driver is removed
3739 static void __exit ixgbevf_exit_module(void)
3741 pci_unregister_driver(&ixgbevf_driver);
3746 * ixgbevf_get_hw_dev_name - return device name string
3747 * used by hardware layer to print debugging information
3749 char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw)
3751 struct ixgbevf_adapter *adapter = hw->back;
3752 return adapter->netdev->name;
3756 module_exit(ixgbevf_exit_module);
3758 /* ixgbevf_main.c */