]> Pileus Git - ~andy/linux/blob - drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
batman-adv: use consistent kerneldoc style
[~andy/linux] / drivers / net / ethernet / intel / ixgbevf / ixgbevf_main.c
1 /*******************************************************************************
2
3   Intel 82599 Virtual Function driver
4   Copyright(c) 1999 - 2012 Intel Corporation.
5
6   This program is free software; you can redistribute it and/or modify it
7   under the terms and conditions of the GNU General Public License,
8   version 2, as published by the Free Software Foundation.
9
10   This program is distributed in the hope it will be useful, but WITHOUT
11   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13   more details.
14
15   You should have received a copy of the GNU General Public License along with
16   this program; if not, write to the Free Software Foundation, Inc.,
17   51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19   The full GNU General Public License is included in this distribution in
20   the file called "COPYING".
21
22   Contact Information:
23   e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26 *******************************************************************************/
27
28
29 /******************************************************************************
30  Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code
31 ******************************************************************************/
32
33 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
34
35 #include <linux/types.h>
36 #include <linux/bitops.h>
37 #include <linux/module.h>
38 #include <linux/pci.h>
39 #include <linux/netdevice.h>
40 #include <linux/vmalloc.h>
41 #include <linux/string.h>
42 #include <linux/in.h>
43 #include <linux/ip.h>
44 #include <linux/tcp.h>
45 #include <linux/sctp.h>
46 #include <linux/ipv6.h>
47 #include <linux/slab.h>
48 #include <net/checksum.h>
49 #include <net/ip6_checksum.h>
50 #include <linux/ethtool.h>
51 #include <linux/if.h>
52 #include <linux/if_vlan.h>
53 #include <linux/prefetch.h>
54
55 #include "ixgbevf.h"
56
57 const char ixgbevf_driver_name[] = "ixgbevf";
58 static const char ixgbevf_driver_string[] =
59         "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver";
60
61 #define DRV_VERSION "2.11.3-k"
62 const char ixgbevf_driver_version[] = DRV_VERSION;
63 static char ixgbevf_copyright[] =
64         "Copyright (c) 2009 - 2012 Intel Corporation.";
65
66 static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
67         [board_82599_vf] = &ixgbevf_82599_vf_info,
68         [board_X540_vf]  = &ixgbevf_X540_vf_info,
69 };
70
71 /* ixgbevf_pci_tbl - PCI Device ID Table
72  *
73  * Wildcard entries (PCI_ANY_ID) should come last
74  * Last entry must be all 0s
75  *
76  * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
77  *   Class, Class Mask, private data (not used) }
78  */
79 static DEFINE_PCI_DEVICE_TABLE(ixgbevf_pci_tbl) = {
80         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF), board_82599_vf },
81         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF), board_X540_vf },
82         /* required last entry */
83         {0, }
84 };
85 MODULE_DEVICE_TABLE(pci, ixgbevf_pci_tbl);
86
87 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
88 MODULE_DESCRIPTION("Intel(R) 82599 Virtual Function Driver");
89 MODULE_LICENSE("GPL");
90 MODULE_VERSION(DRV_VERSION);
91
92 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
93 static int debug = -1;
94 module_param(debug, int, 0);
95 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
96
97 /* forward decls */
98 static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter);
99 static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector);
100 static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter);
101
102 static inline void ixgbevf_release_rx_desc(struct ixgbevf_ring *rx_ring,
103                                            u32 val)
104 {
105         rx_ring->next_to_use = val;
106
107         /*
108          * Force memory writes to complete before letting h/w
109          * know there are new descriptors to fetch.  (Only
110          * applicable for weak-ordered memory model archs,
111          * such as IA-64).
112          */
113         wmb();
114         writel(val, rx_ring->tail);
115 }
116
117 /**
118  * ixgbevf_set_ivar - set IVAR registers - maps interrupt causes to vectors
119  * @adapter: pointer to adapter struct
120  * @direction: 0 for Rx, 1 for Tx, -1 for other causes
121  * @queue: queue to map the corresponding interrupt to
122  * @msix_vector: the vector to map to the corresponding queue
123  */
124 static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction,
125                              u8 queue, u8 msix_vector)
126 {
127         u32 ivar, index;
128         struct ixgbe_hw *hw = &adapter->hw;
129         if (direction == -1) {
130                 /* other causes */
131                 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
132                 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
133                 ivar &= ~0xFF;
134                 ivar |= msix_vector;
135                 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
136         } else {
137                 /* tx or rx causes */
138                 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
139                 index = ((16 * (queue & 1)) + (8 * direction));
140                 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1));
141                 ivar &= ~(0xFF << index);
142                 ivar |= (msix_vector << index);
143                 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), ivar);
144         }
145 }
146
147 static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_ring *tx_ring,
148                                                struct ixgbevf_tx_buffer
149                                                *tx_buffer_info)
150 {
151         if (tx_buffer_info->dma) {
152                 if (tx_buffer_info->mapped_as_page)
153                         dma_unmap_page(tx_ring->dev,
154                                        tx_buffer_info->dma,
155                                        tx_buffer_info->length,
156                                        DMA_TO_DEVICE);
157                 else
158                         dma_unmap_single(tx_ring->dev,
159                                          tx_buffer_info->dma,
160                                          tx_buffer_info->length,
161                                          DMA_TO_DEVICE);
162                 tx_buffer_info->dma = 0;
163         }
164         if (tx_buffer_info->skb) {
165                 dev_kfree_skb_any(tx_buffer_info->skb);
166                 tx_buffer_info->skb = NULL;
167         }
168         tx_buffer_info->time_stamp = 0;
169         /* tx_buffer_info must be completely set up in the transmit path */
170 }
171
172 #define IXGBE_MAX_TXD_PWR       14
173 #define IXGBE_MAX_DATA_PER_TXD  (1 << IXGBE_MAX_TXD_PWR)
174
175 /* Tx Descriptors needed, worst case */
176 #define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD)
177 #define DESC_NEEDED (MAX_SKB_FRAGS + 4)
178
179 static void ixgbevf_tx_timeout(struct net_device *netdev);
180
181 /**
182  * ixgbevf_clean_tx_irq - Reclaim resources after transmit completes
183  * @q_vector: board private structure
184  * @tx_ring: tx ring to clean
185  **/
186 static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
187                                  struct ixgbevf_ring *tx_ring)
188 {
189         struct ixgbevf_adapter *adapter = q_vector->adapter;
190         union ixgbe_adv_tx_desc *tx_desc, *eop_desc;
191         struct ixgbevf_tx_buffer *tx_buffer_info;
192         unsigned int i, count = 0;
193         unsigned int total_bytes = 0, total_packets = 0;
194
195         if (test_bit(__IXGBEVF_DOWN, &adapter->state))
196                 return true;
197
198         i = tx_ring->next_to_clean;
199         tx_buffer_info = &tx_ring->tx_buffer_info[i];
200         eop_desc = tx_buffer_info->next_to_watch;
201
202         do {
203                 bool cleaned = false;
204
205                 /* if next_to_watch is not set then there is no work pending */
206                 if (!eop_desc)
207                         break;
208
209                 /* prevent any other reads prior to eop_desc */
210                 read_barrier_depends();
211
212                 /* if DD is not set pending work has not been completed */
213                 if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
214                         break;
215
216                 /* clear next_to_watch to prevent false hangs */
217                 tx_buffer_info->next_to_watch = NULL;
218
219                 for ( ; !cleaned; count++) {
220                         struct sk_buff *skb;
221                         tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
222                         cleaned = (tx_desc == eop_desc);
223                         skb = tx_buffer_info->skb;
224
225                         if (cleaned && skb) {
226                                 unsigned int segs, bytecount;
227
228                                 /* gso_segs is currently only valid for tcp */
229                                 segs = skb_shinfo(skb)->gso_segs ?: 1;
230                                 /* multiply data chunks by size of headers */
231                                 bytecount = ((segs - 1) * skb_headlen(skb)) +
232                                             skb->len;
233                                 total_packets += segs;
234                                 total_bytes += bytecount;
235                         }
236
237                         ixgbevf_unmap_and_free_tx_resource(tx_ring,
238                                                            tx_buffer_info);
239
240                         tx_desc->wb.status = 0;
241
242                         i++;
243                         if (i == tx_ring->count)
244                                 i = 0;
245
246                         tx_buffer_info = &tx_ring->tx_buffer_info[i];
247                 }
248
249                 eop_desc = tx_buffer_info->next_to_watch;
250         } while (count < tx_ring->count);
251
252         tx_ring->next_to_clean = i;
253
254 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
255         if (unlikely(count && netif_carrier_ok(tx_ring->netdev) &&
256                      (ixgbevf_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) {
257                 /* Make sure that anybody stopping the queue after this
258                  * sees the new next_to_clean.
259                  */
260                 smp_mb();
261                 if (__netif_subqueue_stopped(tx_ring->netdev,
262                                              tx_ring->queue_index) &&
263                     !test_bit(__IXGBEVF_DOWN, &adapter->state)) {
264                         netif_wake_subqueue(tx_ring->netdev,
265                                             tx_ring->queue_index);
266                         ++adapter->restart_queue;
267                 }
268         }
269
270         u64_stats_update_begin(&tx_ring->syncp);
271         tx_ring->total_bytes += total_bytes;
272         tx_ring->total_packets += total_packets;
273         u64_stats_update_end(&tx_ring->syncp);
274         q_vector->tx.total_bytes += total_bytes;
275         q_vector->tx.total_packets += total_packets;
276
277         return count < tx_ring->count;
278 }
279
280 /**
281  * ixgbevf_receive_skb - Send a completed packet up the stack
282  * @q_vector: structure containing interrupt and ring information
283  * @skb: packet to send up
284  * @status: hardware indication of status of receive
285  * @rx_desc: rx descriptor
286  **/
287 static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector,
288                                 struct sk_buff *skb, u8 status,
289                                 union ixgbe_adv_rx_desc *rx_desc)
290 {
291         struct ixgbevf_adapter *adapter = q_vector->adapter;
292         bool is_vlan = (status & IXGBE_RXD_STAT_VP);
293         u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan);
294
295         if (is_vlan && test_bit(tag & VLAN_VID_MASK, adapter->active_vlans))
296                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tag);
297
298         if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL))
299                 napi_gro_receive(&q_vector->napi, skb);
300         else
301                 netif_rx(skb);
302 }
303
304 /**
305  * ixgbevf_rx_skb - Helper function to determine proper Rx method
306  * @q_vector: structure containing interrupt and ring information
307  * @skb: packet to send up
308  * @status: hardware indication of status of receive
309  * @rx_desc: rx descriptor
310  **/
311 static void ixgbevf_rx_skb(struct ixgbevf_q_vector *q_vector,
312                            struct sk_buff *skb, u8 status,
313                            union ixgbe_adv_rx_desc *rx_desc)
314 {
315 #ifdef CONFIG_NET_RX_BUSY_POLL
316         skb_mark_napi_id(skb, &q_vector->napi);
317
318         if (ixgbevf_qv_busy_polling(q_vector)) {
319                 netif_receive_skb(skb);
320                 /* exit early if we busy polled */
321                 return;
322         }
323 #endif /* CONFIG_NET_RX_BUSY_POLL */
324
325         ixgbevf_receive_skb(q_vector, skb, status, rx_desc);
326 }
327
328 /**
329  * ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum
330  * @ring: pointer to Rx descriptor ring structure
331  * @status_err: hardware indication of status of receive
332  * @skb: skb currently being received and modified
333  **/
334 static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring,
335                                        u32 status_err, struct sk_buff *skb)
336 {
337         skb_checksum_none_assert(skb);
338
339         /* Rx csum disabled */
340         if (!(ring->netdev->features & NETIF_F_RXCSUM))
341                 return;
342
343         /* if IP and error */
344         if ((status_err & IXGBE_RXD_STAT_IPCS) &&
345             (status_err & IXGBE_RXDADV_ERR_IPE)) {
346                 ring->hw_csum_rx_error++;
347                 return;
348         }
349
350         if (!(status_err & IXGBE_RXD_STAT_L4CS))
351                 return;
352
353         if (status_err & IXGBE_RXDADV_ERR_TCPE) {
354                 ring->hw_csum_rx_error++;
355                 return;
356         }
357
358         /* It must be a TCP or UDP packet with a valid checksum */
359         skb->ip_summed = CHECKSUM_UNNECESSARY;
360         ring->hw_csum_rx_good++;
361 }
362
363 /**
364  * ixgbevf_alloc_rx_buffers - Replace used receive buffers; packet split
365  * @adapter: address of board private structure
366  **/
367 static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter,
368                                      struct ixgbevf_ring *rx_ring,
369                                      int cleaned_count)
370 {
371         struct pci_dev *pdev = adapter->pdev;
372         union ixgbe_adv_rx_desc *rx_desc;
373         struct ixgbevf_rx_buffer *bi;
374         unsigned int i = rx_ring->next_to_use;
375
376         bi = &rx_ring->rx_buffer_info[i];
377
378         while (cleaned_count--) {
379                 rx_desc = IXGBEVF_RX_DESC(rx_ring, i);
380
381                 if (!bi->skb) {
382                         struct sk_buff *skb;
383
384                         skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
385                                                         rx_ring->rx_buf_len);
386                         if (!skb) {
387                                 adapter->alloc_rx_buff_failed++;
388                                 goto no_buffers;
389                         }
390                         bi->skb = skb;
391
392                         bi->dma = dma_map_single(&pdev->dev, skb->data,
393                                                  rx_ring->rx_buf_len,
394                                                  DMA_FROM_DEVICE);
395                         if (dma_mapping_error(&pdev->dev, bi->dma)) {
396                                 dev_kfree_skb(skb);
397                                 bi->skb = NULL;
398                                 dev_err(&pdev->dev, "RX DMA map failed\n");
399                                 break;
400                         }
401                 }
402                 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
403
404                 i++;
405                 if (i == rx_ring->count)
406                         i = 0;
407                 bi = &rx_ring->rx_buffer_info[i];
408         }
409
410 no_buffers:
411         if (rx_ring->next_to_use != i)
412                 ixgbevf_release_rx_desc(rx_ring, i);
413 }
414
415 static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter,
416                                              u32 qmask)
417 {
418         struct ixgbe_hw *hw = &adapter->hw;
419
420         IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, qmask);
421 }
422
423 static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
424                                 struct ixgbevf_ring *rx_ring,
425                                 int budget)
426 {
427         struct ixgbevf_adapter *adapter = q_vector->adapter;
428         struct pci_dev *pdev = adapter->pdev;
429         union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
430         struct ixgbevf_rx_buffer *rx_buffer_info, *next_buffer;
431         struct sk_buff *skb;
432         unsigned int i;
433         u32 len, staterr;
434         int cleaned_count = 0;
435         unsigned int total_rx_bytes = 0, total_rx_packets = 0;
436
437         i = rx_ring->next_to_clean;
438         rx_desc = IXGBEVF_RX_DESC(rx_ring, i);
439         staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
440         rx_buffer_info = &rx_ring->rx_buffer_info[i];
441
442         while (staterr & IXGBE_RXD_STAT_DD) {
443                 if (!budget)
444                         break;
445                 budget--;
446
447                 rmb(); /* read descriptor and rx_buffer_info after status DD */
448                 len = le16_to_cpu(rx_desc->wb.upper.length);
449                 skb = rx_buffer_info->skb;
450                 prefetch(skb->data - NET_IP_ALIGN);
451                 rx_buffer_info->skb = NULL;
452
453                 if (rx_buffer_info->dma) {
454                         dma_unmap_single(&pdev->dev, rx_buffer_info->dma,
455                                          rx_ring->rx_buf_len,
456                                          DMA_FROM_DEVICE);
457                         rx_buffer_info->dma = 0;
458                         skb_put(skb, len);
459                 }
460
461                 i++;
462                 if (i == rx_ring->count)
463                         i = 0;
464
465                 next_rxd = IXGBEVF_RX_DESC(rx_ring, i);
466                 prefetch(next_rxd);
467                 cleaned_count++;
468
469                 next_buffer = &rx_ring->rx_buffer_info[i];
470
471                 if (!(staterr & IXGBE_RXD_STAT_EOP)) {
472                         skb->next = next_buffer->skb;
473                         IXGBE_CB(skb->next)->prev = skb;
474                         adapter->non_eop_descs++;
475                         goto next_desc;
476                 }
477
478                 /* we should not be chaining buffers, if we did drop the skb */
479                 if (IXGBE_CB(skb)->prev) {
480                         do {
481                                 struct sk_buff *this = skb;
482                                 skb = IXGBE_CB(skb)->prev;
483                                 dev_kfree_skb(this);
484                         } while (skb);
485                         goto next_desc;
486                 }
487
488                 /* ERR_MASK will only have valid bits if EOP set */
489                 if (unlikely(staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK)) {
490                         dev_kfree_skb_irq(skb);
491                         goto next_desc;
492                 }
493
494                 ixgbevf_rx_checksum(rx_ring, staterr, skb);
495
496                 /* probably a little skewed due to removing CRC */
497                 total_rx_bytes += skb->len;
498                 total_rx_packets++;
499
500                 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
501
502                 /* Workaround hardware that can't do proper VEPA multicast
503                  * source pruning.
504                  */
505                 if ((skb->pkt_type & (PACKET_BROADCAST | PACKET_MULTICAST)) &&
506                     ether_addr_equal(adapter->netdev->dev_addr,
507                                      eth_hdr(skb)->h_source)) {
508                         dev_kfree_skb_irq(skb);
509                         goto next_desc;
510                 }
511
512                 ixgbevf_rx_skb(q_vector, skb, staterr, rx_desc);
513
514 next_desc:
515                 rx_desc->wb.upper.status_error = 0;
516
517                 /* return some buffers to hardware, one at a time is too slow */
518                 if (cleaned_count >= IXGBEVF_RX_BUFFER_WRITE) {
519                         ixgbevf_alloc_rx_buffers(adapter, rx_ring,
520                                                  cleaned_count);
521                         cleaned_count = 0;
522                 }
523
524                 /* use prefetched values */
525                 rx_desc = next_rxd;
526                 rx_buffer_info = &rx_ring->rx_buffer_info[i];
527
528                 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
529         }
530
531         rx_ring->next_to_clean = i;
532         cleaned_count = ixgbevf_desc_unused(rx_ring);
533
534         if (cleaned_count)
535                 ixgbevf_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
536
537         u64_stats_update_begin(&rx_ring->syncp);
538         rx_ring->total_packets += total_rx_packets;
539         rx_ring->total_bytes += total_rx_bytes;
540         u64_stats_update_end(&rx_ring->syncp);
541         q_vector->rx.total_packets += total_rx_packets;
542         q_vector->rx.total_bytes += total_rx_bytes;
543
544         return total_rx_packets;
545 }
546
547 /**
548  * ixgbevf_poll - NAPI polling calback
549  * @napi: napi struct with our devices info in it
550  * @budget: amount of work driver is allowed to do this pass, in packets
551  *
552  * This function will clean more than one or more rings associated with a
553  * q_vector.
554  **/
555 static int ixgbevf_poll(struct napi_struct *napi, int budget)
556 {
557         struct ixgbevf_q_vector *q_vector =
558                 container_of(napi, struct ixgbevf_q_vector, napi);
559         struct ixgbevf_adapter *adapter = q_vector->adapter;
560         struct ixgbevf_ring *ring;
561         int per_ring_budget;
562         bool clean_complete = true;
563
564         ixgbevf_for_each_ring(ring, q_vector->tx)
565                 clean_complete &= ixgbevf_clean_tx_irq(q_vector, ring);
566
567 #ifdef CONFIG_NET_RX_BUSY_POLL
568         if (!ixgbevf_qv_lock_napi(q_vector))
569                 return budget;
570 #endif
571
572         /* attempt to distribute budget to each queue fairly, but don't allow
573          * the budget to go below 1 because we'll exit polling */
574         if (q_vector->rx.count > 1)
575                 per_ring_budget = max(budget/q_vector->rx.count, 1);
576         else
577                 per_ring_budget = budget;
578
579         adapter->flags |= IXGBE_FLAG_IN_NETPOLL;
580         ixgbevf_for_each_ring(ring, q_vector->rx)
581                 clean_complete &= (ixgbevf_clean_rx_irq(q_vector, ring,
582                                                         per_ring_budget)
583                                    < per_ring_budget);
584         adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL;
585
586 #ifdef CONFIG_NET_RX_BUSY_POLL
587         ixgbevf_qv_unlock_napi(q_vector);
588 #endif
589
590         /* If all work not completed, return budget and keep polling */
591         if (!clean_complete)
592                 return budget;
593         /* all work done, exit the polling mode */
594         napi_complete(napi);
595         if (adapter->rx_itr_setting & 1)
596                 ixgbevf_set_itr(q_vector);
597         if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
598                 ixgbevf_irq_enable_queues(adapter,
599                                           1 << q_vector->v_idx);
600
601         return 0;
602 }
603
604 /**
605  * ixgbevf_write_eitr - write VTEITR register in hardware specific way
606  * @q_vector: structure containing interrupt and ring information
607  */
608 void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector)
609 {
610         struct ixgbevf_adapter *adapter = q_vector->adapter;
611         struct ixgbe_hw *hw = &adapter->hw;
612         int v_idx = q_vector->v_idx;
613         u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR;
614
615         /*
616          * set the WDIS bit to not clear the timer bits and cause an
617          * immediate assertion of the interrupt
618          */
619         itr_reg |= IXGBE_EITR_CNT_WDIS;
620
621         IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg);
622 }
623
624 #ifdef CONFIG_NET_RX_BUSY_POLL
625 /* must be called with local_bh_disable()d */
626 static int ixgbevf_busy_poll_recv(struct napi_struct *napi)
627 {
628         struct ixgbevf_q_vector *q_vector =
629                         container_of(napi, struct ixgbevf_q_vector, napi);
630         struct ixgbevf_adapter *adapter = q_vector->adapter;
631         struct ixgbevf_ring  *ring;
632         int found = 0;
633
634         if (test_bit(__IXGBEVF_DOWN, &adapter->state))
635                 return LL_FLUSH_FAILED;
636
637         if (!ixgbevf_qv_lock_poll(q_vector))
638                 return LL_FLUSH_BUSY;
639
640         ixgbevf_for_each_ring(ring, q_vector->rx) {
641                 found = ixgbevf_clean_rx_irq(q_vector, ring, 4);
642 #ifdef BP_EXTENDED_STATS
643                 if (found)
644                         ring->bp_cleaned += found;
645                 else
646                         ring->bp_misses++;
647 #endif
648                 if (found)
649                         break;
650         }
651
652         ixgbevf_qv_unlock_poll(q_vector);
653
654         return found;
655 }
656 #endif /* CONFIG_NET_RX_BUSY_POLL */
657
658 /**
659  * ixgbevf_configure_msix - Configure MSI-X hardware
660  * @adapter: board private structure
661  *
662  * ixgbevf_configure_msix sets up the hardware to properly generate MSI-X
663  * interrupts.
664  **/
665 static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter)
666 {
667         struct ixgbevf_q_vector *q_vector;
668         int q_vectors, v_idx;
669
670         q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
671         adapter->eims_enable_mask = 0;
672
673         /*
674          * Populate the IVAR table and set the ITR values to the
675          * corresponding register.
676          */
677         for (v_idx = 0; v_idx < q_vectors; v_idx++) {
678                 struct ixgbevf_ring *ring;
679                 q_vector = adapter->q_vector[v_idx];
680
681                 ixgbevf_for_each_ring(ring, q_vector->rx)
682                         ixgbevf_set_ivar(adapter, 0, ring->reg_idx, v_idx);
683
684                 ixgbevf_for_each_ring(ring, q_vector->tx)
685                         ixgbevf_set_ivar(adapter, 1, ring->reg_idx, v_idx);
686
687                 if (q_vector->tx.ring && !q_vector->rx.ring) {
688                         /* tx only vector */
689                         if (adapter->tx_itr_setting == 1)
690                                 q_vector->itr = IXGBE_10K_ITR;
691                         else
692                                 q_vector->itr = adapter->tx_itr_setting;
693                 } else {
694                         /* rx or rx/tx vector */
695                         if (adapter->rx_itr_setting == 1)
696                                 q_vector->itr = IXGBE_20K_ITR;
697                         else
698                                 q_vector->itr = adapter->rx_itr_setting;
699                 }
700
701                 /* add q_vector eims value to global eims_enable_mask */
702                 adapter->eims_enable_mask |= 1 << v_idx;
703
704                 ixgbevf_write_eitr(q_vector);
705         }
706
707         ixgbevf_set_ivar(adapter, -1, 1, v_idx);
708         /* setup eims_other and add value to global eims_enable_mask */
709         adapter->eims_other = 1 << v_idx;
710         adapter->eims_enable_mask |= adapter->eims_other;
711 }
712
713 enum latency_range {
714         lowest_latency = 0,
715         low_latency = 1,
716         bulk_latency = 2,
717         latency_invalid = 255
718 };
719
720 /**
721  * ixgbevf_update_itr - update the dynamic ITR value based on statistics
722  * @q_vector: structure containing interrupt and ring information
723  * @ring_container: structure containing ring performance data
724  *
725  *      Stores a new ITR value based on packets and byte
726  *      counts during the last interrupt.  The advantage of per interrupt
727  *      computation is faster updates and more accurate ITR for the current
728  *      traffic pattern.  Constants in this function were computed
729  *      based on theoretical maximum wire speed and thresholds were set based
730  *      on testing data as well as attempting to minimize response time
731  *      while increasing bulk throughput.
732  **/
733 static void ixgbevf_update_itr(struct ixgbevf_q_vector *q_vector,
734                                struct ixgbevf_ring_container *ring_container)
735 {
736         int bytes = ring_container->total_bytes;
737         int packets = ring_container->total_packets;
738         u32 timepassed_us;
739         u64 bytes_perint;
740         u8 itr_setting = ring_container->itr;
741
742         if (packets == 0)
743                 return;
744
745         /* simple throttlerate management
746          *    0-20MB/s lowest (100000 ints/s)
747          *   20-100MB/s low   (20000 ints/s)
748          *  100-1249MB/s bulk (8000 ints/s)
749          */
750         /* what was last interrupt timeslice? */
751         timepassed_us = q_vector->itr >> 2;
752         bytes_perint = bytes / timepassed_us; /* bytes/usec */
753
754         switch (itr_setting) {
755         case lowest_latency:
756                 if (bytes_perint > 10)
757                         itr_setting = low_latency;
758                 break;
759         case low_latency:
760                 if (bytes_perint > 20)
761                         itr_setting = bulk_latency;
762                 else if (bytes_perint <= 10)
763                         itr_setting = lowest_latency;
764                 break;
765         case bulk_latency:
766                 if (bytes_perint <= 20)
767                         itr_setting = low_latency;
768                 break;
769         }
770
771         /* clear work counters since we have the values we need */
772         ring_container->total_bytes = 0;
773         ring_container->total_packets = 0;
774
775         /* write updated itr to ring container */
776         ring_container->itr = itr_setting;
777 }
778
779 static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector)
780 {
781         u32 new_itr = q_vector->itr;
782         u8 current_itr;
783
784         ixgbevf_update_itr(q_vector, &q_vector->tx);
785         ixgbevf_update_itr(q_vector, &q_vector->rx);
786
787         current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
788
789         switch (current_itr) {
790         /* counts and packets in update_itr are dependent on these numbers */
791         case lowest_latency:
792                 new_itr = IXGBE_100K_ITR;
793                 break;
794         case low_latency:
795                 new_itr = IXGBE_20K_ITR;
796                 break;
797         case bulk_latency:
798         default:
799                 new_itr = IXGBE_8K_ITR;
800                 break;
801         }
802
803         if (new_itr != q_vector->itr) {
804                 /* do an exponential smoothing */
805                 new_itr = (10 * new_itr * q_vector->itr) /
806                           ((9 * new_itr) + q_vector->itr);
807
808                 /* save the algorithm value here */
809                 q_vector->itr = new_itr;
810
811                 ixgbevf_write_eitr(q_vector);
812         }
813 }
814
815 static irqreturn_t ixgbevf_msix_other(int irq, void *data)
816 {
817         struct ixgbevf_adapter *adapter = data;
818         struct ixgbe_hw *hw = &adapter->hw;
819
820         hw->mac.get_link_status = 1;
821
822         if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
823                 mod_timer(&adapter->watchdog_timer, jiffies);
824
825         IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other);
826
827         return IRQ_HANDLED;
828 }
829
830 /**
831  * ixgbevf_msix_clean_rings - single unshared vector rx clean (all queues)
832  * @irq: unused
833  * @data: pointer to our q_vector struct for this interrupt vector
834  **/
835 static irqreturn_t ixgbevf_msix_clean_rings(int irq, void *data)
836 {
837         struct ixgbevf_q_vector *q_vector = data;
838
839         /* EIAM disabled interrupts (on this vector) for us */
840         if (q_vector->rx.ring || q_vector->tx.ring)
841                 napi_schedule(&q_vector->napi);
842
843         return IRQ_HANDLED;
844 }
845
846 static inline void map_vector_to_rxq(struct ixgbevf_adapter *a, int v_idx,
847                                      int r_idx)
848 {
849         struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
850
851         a->rx_ring[r_idx].next = q_vector->rx.ring;
852         q_vector->rx.ring = &a->rx_ring[r_idx];
853         q_vector->rx.count++;
854 }
855
856 static inline void map_vector_to_txq(struct ixgbevf_adapter *a, int v_idx,
857                                      int t_idx)
858 {
859         struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
860
861         a->tx_ring[t_idx].next = q_vector->tx.ring;
862         q_vector->tx.ring = &a->tx_ring[t_idx];
863         q_vector->tx.count++;
864 }
865
866 /**
867  * ixgbevf_map_rings_to_vectors - Maps descriptor rings to vectors
868  * @adapter: board private structure to initialize
869  *
870  * This function maps descriptor rings to the queue-specific vectors
871  * we were allotted through the MSI-X enabling code.  Ideally, we'd have
872  * one vector per ring/queue, but on a constrained vector budget, we
873  * group the rings as "efficiently" as possible.  You would add new
874  * mapping configurations in here.
875  **/
876 static int ixgbevf_map_rings_to_vectors(struct ixgbevf_adapter *adapter)
877 {
878         int q_vectors;
879         int v_start = 0;
880         int rxr_idx = 0, txr_idx = 0;
881         int rxr_remaining = adapter->num_rx_queues;
882         int txr_remaining = adapter->num_tx_queues;
883         int i, j;
884         int rqpv, tqpv;
885         int err = 0;
886
887         q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
888
889         /*
890          * The ideal configuration...
891          * We have enough vectors to map one per queue.
892          */
893         if (q_vectors == adapter->num_rx_queues + adapter->num_tx_queues) {
894                 for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
895                         map_vector_to_rxq(adapter, v_start, rxr_idx);
896
897                 for (; txr_idx < txr_remaining; v_start++, txr_idx++)
898                         map_vector_to_txq(adapter, v_start, txr_idx);
899                 goto out;
900         }
901
902         /*
903          * If we don't have enough vectors for a 1-to-1
904          * mapping, we'll have to group them so there are
905          * multiple queues per vector.
906          */
907         /* Re-adjusting *qpv takes care of the remainder. */
908         for (i = v_start; i < q_vectors; i++) {
909                 rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - i);
910                 for (j = 0; j < rqpv; j++) {
911                         map_vector_to_rxq(adapter, i, rxr_idx);
912                         rxr_idx++;
913                         rxr_remaining--;
914                 }
915         }
916         for (i = v_start; i < q_vectors; i++) {
917                 tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i);
918                 for (j = 0; j < tqpv; j++) {
919                         map_vector_to_txq(adapter, i, txr_idx);
920                         txr_idx++;
921                         txr_remaining--;
922                 }
923         }
924
925 out:
926         return err;
927 }
928
929 /**
930  * ixgbevf_request_msix_irqs - Initialize MSI-X interrupts
931  * @adapter: board private structure
932  *
933  * ixgbevf_request_msix_irqs allocates MSI-X vectors and requests
934  * interrupts from the kernel.
935  **/
936 static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter)
937 {
938         struct net_device *netdev = adapter->netdev;
939         int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
940         int vector, err;
941         int ri = 0, ti = 0;
942
943         for (vector = 0; vector < q_vectors; vector++) {
944                 struct ixgbevf_q_vector *q_vector = adapter->q_vector[vector];
945                 struct msix_entry *entry = &adapter->msix_entries[vector];
946
947                 if (q_vector->tx.ring && q_vector->rx.ring) {
948                         snprintf(q_vector->name, sizeof(q_vector->name) - 1,
949                                  "%s-%s-%d", netdev->name, "TxRx", ri++);
950                         ti++;
951                 } else if (q_vector->rx.ring) {
952                         snprintf(q_vector->name, sizeof(q_vector->name) - 1,
953                                  "%s-%s-%d", netdev->name, "rx", ri++);
954                 } else if (q_vector->tx.ring) {
955                         snprintf(q_vector->name, sizeof(q_vector->name) - 1,
956                                  "%s-%s-%d", netdev->name, "tx", ti++);
957                 } else {
958                         /* skip this unused q_vector */
959                         continue;
960                 }
961                 err = request_irq(entry->vector, &ixgbevf_msix_clean_rings, 0,
962                                   q_vector->name, q_vector);
963                 if (err) {
964                         hw_dbg(&adapter->hw,
965                                "request_irq failed for MSIX interrupt "
966                                "Error: %d\n", err);
967                         goto free_queue_irqs;
968                 }
969         }
970
971         err = request_irq(adapter->msix_entries[vector].vector,
972                           &ixgbevf_msix_other, 0, netdev->name, adapter);
973         if (err) {
974                 hw_dbg(&adapter->hw,
975                        "request_irq for msix_other failed: %d\n", err);
976                 goto free_queue_irqs;
977         }
978
979         return 0;
980
981 free_queue_irqs:
982         while (vector) {
983                 vector--;
984                 free_irq(adapter->msix_entries[vector].vector,
985                          adapter->q_vector[vector]);
986         }
987         /* This failure is non-recoverable - it indicates the system is
988          * out of MSIX vector resources and the VF driver cannot run
989          * without them.  Set the number of msix vectors to zero
990          * indicating that not enough can be allocated.  The error
991          * will be returned to the user indicating device open failed.
992          * Any further attempts to force the driver to open will also
993          * fail.  The only way to recover is to unload the driver and
994          * reload it again.  If the system has recovered some MSIX
995          * vectors then it may succeed.
996          */
997         adapter->num_msix_vectors = 0;
998         return err;
999 }
1000
1001 static inline void ixgbevf_reset_q_vectors(struct ixgbevf_adapter *adapter)
1002 {
1003         int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1004
1005         for (i = 0; i < q_vectors; i++) {
1006                 struct ixgbevf_q_vector *q_vector = adapter->q_vector[i];
1007                 q_vector->rx.ring = NULL;
1008                 q_vector->tx.ring = NULL;
1009                 q_vector->rx.count = 0;
1010                 q_vector->tx.count = 0;
1011         }
1012 }
1013
1014 /**
1015  * ixgbevf_request_irq - initialize interrupts
1016  * @adapter: board private structure
1017  *
1018  * Attempts to configure interrupts using the best available
1019  * capabilities of the hardware and kernel.
1020  **/
1021 static int ixgbevf_request_irq(struct ixgbevf_adapter *adapter)
1022 {
1023         int err = 0;
1024
1025         err = ixgbevf_request_msix_irqs(adapter);
1026
1027         if (err)
1028                 hw_dbg(&adapter->hw,
1029                        "request_irq failed, Error %d\n", err);
1030
1031         return err;
1032 }
1033
1034 static void ixgbevf_free_irq(struct ixgbevf_adapter *adapter)
1035 {
1036         int i, q_vectors;
1037
1038         q_vectors = adapter->num_msix_vectors;
1039         i = q_vectors - 1;
1040
1041         free_irq(adapter->msix_entries[i].vector, adapter);
1042         i--;
1043
1044         for (; i >= 0; i--) {
1045                 /* free only the irqs that were actually requested */
1046                 if (!adapter->q_vector[i]->rx.ring &&
1047                     !adapter->q_vector[i]->tx.ring)
1048                         continue;
1049
1050                 free_irq(adapter->msix_entries[i].vector,
1051                          adapter->q_vector[i]);
1052         }
1053
1054         ixgbevf_reset_q_vectors(adapter);
1055 }
1056
1057 /**
1058  * ixgbevf_irq_disable - Mask off interrupt generation on the NIC
1059  * @adapter: board private structure
1060  **/
1061 static inline void ixgbevf_irq_disable(struct ixgbevf_adapter *adapter)
1062 {
1063         struct ixgbe_hw *hw = &adapter->hw;
1064         int i;
1065
1066         IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, 0);
1067         IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, ~0);
1068         IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, 0);
1069
1070         IXGBE_WRITE_FLUSH(hw);
1071
1072         for (i = 0; i < adapter->num_msix_vectors; i++)
1073                 synchronize_irq(adapter->msix_entries[i].vector);
1074 }
1075
1076 /**
1077  * ixgbevf_irq_enable - Enable default interrupt generation settings
1078  * @adapter: board private structure
1079  **/
1080 static inline void ixgbevf_irq_enable(struct ixgbevf_adapter *adapter)
1081 {
1082         struct ixgbe_hw *hw = &adapter->hw;
1083
1084         IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, adapter->eims_enable_mask);
1085         IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, adapter->eims_enable_mask);
1086         IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_enable_mask);
1087 }
1088
1089 /**
1090  * ixgbevf_configure_tx - Configure 82599 VF Transmit Unit after Reset
1091  * @adapter: board private structure
1092  *
1093  * Configure the Tx unit of the MAC after a reset.
1094  **/
1095 static void ixgbevf_configure_tx(struct ixgbevf_adapter *adapter)
1096 {
1097         u64 tdba;
1098         struct ixgbe_hw *hw = &adapter->hw;
1099         u32 i, j, tdlen, txctrl;
1100
1101         /* Setup the HW Tx Head and Tail descriptor pointers */
1102         for (i = 0; i < adapter->num_tx_queues; i++) {
1103                 struct ixgbevf_ring *ring = &adapter->tx_ring[i];
1104                 j = ring->reg_idx;
1105                 tdba = ring->dma;
1106                 tdlen = ring->count * sizeof(union ixgbe_adv_tx_desc);
1107                 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(j),
1108                                 (tdba & DMA_BIT_MASK(32)));
1109                 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(j), (tdba >> 32));
1110                 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(j), tdlen);
1111                 IXGBE_WRITE_REG(hw, IXGBE_VFTDH(j), 0);
1112                 IXGBE_WRITE_REG(hw, IXGBE_VFTDT(j), 0);
1113                 ring->tail = hw->hw_addr + IXGBE_VFTDT(j);
1114                 ring->next_to_clean = 0;
1115                 ring->next_to_use = 0;
1116                 /* Disable Tx Head Writeback RO bit, since this hoses
1117                  * bookkeeping if things aren't delivered in order.
1118                  */
1119                 txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(j));
1120                 txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
1121                 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(j), txctrl);
1122         }
1123 }
1124
1125 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
1126
1127 static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index)
1128 {
1129         struct ixgbevf_ring *rx_ring;
1130         struct ixgbe_hw *hw = &adapter->hw;
1131         u32 srrctl;
1132
1133         rx_ring = &adapter->rx_ring[index];
1134
1135         srrctl = IXGBE_SRRCTL_DROP_EN;
1136
1137         srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1138
1139         srrctl |= ALIGN(rx_ring->rx_buf_len, 1024) >>
1140                   IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1141
1142         IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl);
1143 }
1144
1145 static void ixgbevf_setup_psrtype(struct ixgbevf_adapter *adapter)
1146 {
1147         struct ixgbe_hw *hw = &adapter->hw;
1148
1149         /* PSRTYPE must be initialized in 82599 */
1150         u32 psrtype = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR |
1151                       IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR |
1152                       IXGBE_PSRTYPE_L2HDR;
1153
1154         if (adapter->num_rx_queues > 1)
1155                 psrtype |= 1 << 29;
1156
1157         IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
1158 }
1159
1160 static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter *adapter)
1161 {
1162         struct ixgbe_hw *hw = &adapter->hw;
1163         struct net_device *netdev = adapter->netdev;
1164         int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1165         int i;
1166         u16 rx_buf_len;
1167
1168         /* notify the PF of our intent to use this size of frame */
1169         ixgbevf_rlpml_set_vf(hw, max_frame);
1170
1171         /* PF will allow an extra 4 bytes past for vlan tagged frames */
1172         max_frame += VLAN_HLEN;
1173
1174         /*
1175          * Allocate buffer sizes that fit well into 32K and
1176          * take into account max frame size of 9.5K
1177          */
1178         if ((hw->mac.type == ixgbe_mac_X540_vf) &&
1179             (max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE))
1180                 rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
1181         else if (max_frame <= IXGBEVF_RXBUFFER_2K)
1182                 rx_buf_len = IXGBEVF_RXBUFFER_2K;
1183         else if (max_frame <= IXGBEVF_RXBUFFER_4K)
1184                 rx_buf_len = IXGBEVF_RXBUFFER_4K;
1185         else if (max_frame <= IXGBEVF_RXBUFFER_8K)
1186                 rx_buf_len = IXGBEVF_RXBUFFER_8K;
1187         else
1188                 rx_buf_len = IXGBEVF_RXBUFFER_10K;
1189
1190         for (i = 0; i < adapter->num_rx_queues; i++)
1191                 adapter->rx_ring[i].rx_buf_len = rx_buf_len;
1192 }
1193
1194 /**
1195  * ixgbevf_configure_rx - Configure 82599 VF Receive Unit after Reset
1196  * @adapter: board private structure
1197  *
1198  * Configure the Rx unit of the MAC after a reset.
1199  **/
1200 static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
1201 {
1202         u64 rdba;
1203         struct ixgbe_hw *hw = &adapter->hw;
1204         int i, j;
1205         u32 rdlen;
1206
1207         ixgbevf_setup_psrtype(adapter);
1208
1209         /* set_rx_buffer_len must be called before ring initialization */
1210         ixgbevf_set_rx_buffer_len(adapter);
1211
1212         /* Setup the HW Rx Head and Tail Descriptor Pointers and
1213          * the Base and Length of the Rx Descriptor Ring */
1214         for (i = 0; i < adapter->num_rx_queues; i++) {
1215                 struct ixgbevf_ring *ring = &adapter->rx_ring[i];
1216                 rdba = ring->dma;
1217                 j = ring->reg_idx;
1218                 rdlen = ring->count * sizeof(union ixgbe_adv_rx_desc);
1219                 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(j),
1220                                 (rdba & DMA_BIT_MASK(32)));
1221                 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(j), (rdba >> 32));
1222                 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(j), rdlen);
1223                 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(j), 0);
1224                 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(j), 0);
1225                 ring->tail = hw->hw_addr + IXGBE_VFRDT(j);
1226                 ring->next_to_clean = 0;
1227                 ring->next_to_use = 0;
1228
1229                 ixgbevf_configure_srrctl(adapter, j);
1230         }
1231 }
1232
1233 static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev,
1234                                    __be16 proto, u16 vid)
1235 {
1236         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1237         struct ixgbe_hw *hw = &adapter->hw;
1238         int err;
1239
1240         spin_lock_bh(&adapter->mbx_lock);
1241
1242         /* add VID to filter table */
1243         err = hw->mac.ops.set_vfta(hw, vid, 0, true);
1244
1245         spin_unlock_bh(&adapter->mbx_lock);
1246
1247         /* translate error return types so error makes sense */
1248         if (err == IXGBE_ERR_MBX)
1249                 return -EIO;
1250
1251         if (err == IXGBE_ERR_INVALID_ARGUMENT)
1252                 return -EACCES;
1253
1254         set_bit(vid, adapter->active_vlans);
1255
1256         return err;
1257 }
1258
1259 static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev,
1260                                     __be16 proto, u16 vid)
1261 {
1262         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1263         struct ixgbe_hw *hw = &adapter->hw;
1264         int err = -EOPNOTSUPP;
1265
1266         spin_lock_bh(&adapter->mbx_lock);
1267
1268         /* remove VID from filter table */
1269         err = hw->mac.ops.set_vfta(hw, vid, 0, false);
1270
1271         spin_unlock_bh(&adapter->mbx_lock);
1272
1273         clear_bit(vid, adapter->active_vlans);
1274
1275         return err;
1276 }
1277
1278 static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter)
1279 {
1280         u16 vid;
1281
1282         for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
1283                 ixgbevf_vlan_rx_add_vid(adapter->netdev,
1284                                         htons(ETH_P_8021Q), vid);
1285 }
1286
1287 static int ixgbevf_write_uc_addr_list(struct net_device *netdev)
1288 {
1289         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1290         struct ixgbe_hw *hw = &adapter->hw;
1291         int count = 0;
1292
1293         if ((netdev_uc_count(netdev)) > 10) {
1294                 pr_err("Too many unicast filters - No Space\n");
1295                 return -ENOSPC;
1296         }
1297
1298         if (!netdev_uc_empty(netdev)) {
1299                 struct netdev_hw_addr *ha;
1300                 netdev_for_each_uc_addr(ha, netdev) {
1301                         hw->mac.ops.set_uc_addr(hw, ++count, ha->addr);
1302                         udelay(200);
1303                 }
1304         } else {
1305                 /*
1306                  * If the list is empty then send message to PF driver to
1307                  * clear all macvlans on this VF.
1308                  */
1309                 hw->mac.ops.set_uc_addr(hw, 0, NULL);
1310         }
1311
1312         return count;
1313 }
1314
1315 /**
1316  * ixgbevf_set_rx_mode - Multicast and unicast set
1317  * @netdev: network interface device structure
1318  *
1319  * The set_rx_method entry point is called whenever the multicast address
1320  * list, unicast address list or the network interface flags are updated.
1321  * This routine is responsible for configuring the hardware for proper
1322  * multicast mode and configuring requested unicast filters.
1323  **/
1324 static void ixgbevf_set_rx_mode(struct net_device *netdev)
1325 {
1326         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1327         struct ixgbe_hw *hw = &adapter->hw;
1328
1329         spin_lock_bh(&adapter->mbx_lock);
1330
1331         /* reprogram multicast list */
1332         hw->mac.ops.update_mc_addr_list(hw, netdev);
1333
1334         ixgbevf_write_uc_addr_list(netdev);
1335
1336         spin_unlock_bh(&adapter->mbx_lock);
1337 }
1338
1339 static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter)
1340 {
1341         int q_idx;
1342         struct ixgbevf_q_vector *q_vector;
1343         int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1344
1345         for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1346                 q_vector = adapter->q_vector[q_idx];
1347 #ifdef CONFIG_NET_RX_BUSY_POLL
1348                 ixgbevf_qv_init_lock(adapter->q_vector[q_idx]);
1349 #endif
1350                 napi_enable(&q_vector->napi);
1351         }
1352 }
1353
1354 static void ixgbevf_napi_disable_all(struct ixgbevf_adapter *adapter)
1355 {
1356         int q_idx;
1357         struct ixgbevf_q_vector *q_vector;
1358         int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1359
1360         for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1361                 q_vector = adapter->q_vector[q_idx];
1362                 napi_disable(&q_vector->napi);
1363 #ifdef CONFIG_NET_RX_BUSY_POLL
1364                 while (!ixgbevf_qv_disable(adapter->q_vector[q_idx])) {
1365                         pr_info("QV %d locked\n", q_idx);
1366                         usleep_range(1000, 20000);
1367                 }
1368 #endif /* CONFIG_NET_RX_BUSY_POLL */
1369         }
1370 }
1371
1372 static int ixgbevf_configure_dcb(struct ixgbevf_adapter *adapter)
1373 {
1374         struct ixgbe_hw *hw = &adapter->hw;
1375         unsigned int def_q = 0;
1376         unsigned int num_tcs = 0;
1377         unsigned int num_rx_queues = 1;
1378         int err;
1379
1380         spin_lock_bh(&adapter->mbx_lock);
1381
1382         /* fetch queue configuration from the PF */
1383         err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
1384
1385         spin_unlock_bh(&adapter->mbx_lock);
1386
1387         if (err)
1388                 return err;
1389
1390         if (num_tcs > 1) {
1391                 /* update default Tx ring register index */
1392                 adapter->tx_ring[0].reg_idx = def_q;
1393
1394                 /* we need as many queues as traffic classes */
1395                 num_rx_queues = num_tcs;
1396         }
1397
1398         /* if we have a bad config abort request queue reset */
1399         if (adapter->num_rx_queues != num_rx_queues) {
1400                 /* force mailbox timeout to prevent further messages */
1401                 hw->mbx.timeout = 0;
1402
1403                 /* wait for watchdog to come around and bail us out */
1404                 adapter->flags |= IXGBEVF_FLAG_QUEUE_RESET_REQUESTED;
1405         }
1406
1407         return 0;
1408 }
1409
1410 static void ixgbevf_configure(struct ixgbevf_adapter *adapter)
1411 {
1412         struct net_device *netdev = adapter->netdev;
1413         int i;
1414
1415         ixgbevf_configure_dcb(adapter);
1416
1417         ixgbevf_set_rx_mode(netdev);
1418
1419         ixgbevf_restore_vlan(adapter);
1420
1421         ixgbevf_configure_tx(adapter);
1422         ixgbevf_configure_rx(adapter);
1423         for (i = 0; i < adapter->num_rx_queues; i++) {
1424                 struct ixgbevf_ring *ring = &adapter->rx_ring[i];
1425                 ixgbevf_alloc_rx_buffers(adapter, ring,
1426                                          ixgbevf_desc_unused(ring));
1427         }
1428 }
1429
1430 #define IXGBEVF_MAX_RX_DESC_POLL 10
1431 static void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter,
1432                                          int rxr)
1433 {
1434         struct ixgbe_hw *hw = &adapter->hw;
1435         int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
1436         u32 rxdctl;
1437         int j = adapter->rx_ring[rxr].reg_idx;
1438
1439         do {
1440                 usleep_range(1000, 2000);
1441                 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
1442         } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE));
1443
1444         if (!wait_loop)
1445                 hw_dbg(hw, "RXDCTL.ENABLE queue %d not set while polling\n",
1446                        rxr);
1447
1448         ixgbevf_release_rx_desc(&adapter->rx_ring[rxr],
1449                                 (adapter->rx_ring[rxr].count - 1));
1450 }
1451
1452 static void ixgbevf_disable_rx_queue(struct ixgbevf_adapter *adapter,
1453                                      struct ixgbevf_ring *ring)
1454 {
1455         struct ixgbe_hw *hw = &adapter->hw;
1456         int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
1457         u32 rxdctl;
1458         u8 reg_idx = ring->reg_idx;
1459
1460         rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1461         rxdctl &= ~IXGBE_RXDCTL_ENABLE;
1462
1463         /* write value back with RXDCTL.ENABLE bit cleared */
1464         IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl);
1465
1466         /* the hardware may take up to 100us to really disable the rx queue */
1467         do {
1468                 udelay(10);
1469                 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1470         } while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE));
1471
1472         if (!wait_loop)
1473                 hw_dbg(hw, "RXDCTL.ENABLE queue %d not cleared while polling\n",
1474                        reg_idx);
1475 }
1476
1477 static void ixgbevf_save_reset_stats(struct ixgbevf_adapter *adapter)
1478 {
1479         /* Only save pre-reset stats if there are some */
1480         if (adapter->stats.vfgprc || adapter->stats.vfgptc) {
1481                 adapter->stats.saved_reset_vfgprc += adapter->stats.vfgprc -
1482                         adapter->stats.base_vfgprc;
1483                 adapter->stats.saved_reset_vfgptc += adapter->stats.vfgptc -
1484                         adapter->stats.base_vfgptc;
1485                 adapter->stats.saved_reset_vfgorc += adapter->stats.vfgorc -
1486                         adapter->stats.base_vfgorc;
1487                 adapter->stats.saved_reset_vfgotc += adapter->stats.vfgotc -
1488                         adapter->stats.base_vfgotc;
1489                 adapter->stats.saved_reset_vfmprc += adapter->stats.vfmprc -
1490                         adapter->stats.base_vfmprc;
1491         }
1492 }
1493
1494 static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter)
1495 {
1496         struct ixgbe_hw *hw = &adapter->hw;
1497
1498         adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
1499         adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
1500         adapter->stats.last_vfgorc |=
1501                 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
1502         adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
1503         adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
1504         adapter->stats.last_vfgotc |=
1505                 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
1506         adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
1507
1508         adapter->stats.base_vfgprc = adapter->stats.last_vfgprc;
1509         adapter->stats.base_vfgorc = adapter->stats.last_vfgorc;
1510         adapter->stats.base_vfgptc = adapter->stats.last_vfgptc;
1511         adapter->stats.base_vfgotc = adapter->stats.last_vfgotc;
1512         adapter->stats.base_vfmprc = adapter->stats.last_vfmprc;
1513 }
1514
1515 static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter)
1516 {
1517         struct ixgbe_hw *hw = &adapter->hw;
1518         int api[] = { ixgbe_mbox_api_11,
1519                       ixgbe_mbox_api_10,
1520                       ixgbe_mbox_api_unknown };
1521         int err = 0, idx = 0;
1522
1523         spin_lock_bh(&adapter->mbx_lock);
1524
1525         while (api[idx] != ixgbe_mbox_api_unknown) {
1526                 err = ixgbevf_negotiate_api_version(hw, api[idx]);
1527                 if (!err)
1528                         break;
1529                 idx++;
1530         }
1531
1532         spin_unlock_bh(&adapter->mbx_lock);
1533 }
1534
1535 static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
1536 {
1537         struct net_device *netdev = adapter->netdev;
1538         struct ixgbe_hw *hw = &adapter->hw;
1539         int i, j = 0;
1540         int num_rx_rings = adapter->num_rx_queues;
1541         u32 txdctl, rxdctl;
1542
1543         for (i = 0; i < adapter->num_tx_queues; i++) {
1544                 j = adapter->tx_ring[i].reg_idx;
1545                 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1546                 /* enable WTHRESH=8 descriptors, to encourage burst writeback */
1547                 txdctl |= (8 << 16);
1548                 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
1549         }
1550
1551         for (i = 0; i < adapter->num_tx_queues; i++) {
1552                 j = adapter->tx_ring[i].reg_idx;
1553                 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1554                 txdctl |= IXGBE_TXDCTL_ENABLE;
1555                 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
1556         }
1557
1558         for (i = 0; i < num_rx_rings; i++) {
1559                 j = adapter->rx_ring[i].reg_idx;
1560                 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
1561                 rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
1562                 if (hw->mac.type == ixgbe_mac_X540_vf) {
1563                         rxdctl &= ~IXGBE_RXDCTL_RLPMLMASK;
1564                         rxdctl |= ((netdev->mtu + ETH_HLEN + ETH_FCS_LEN) |
1565                                    IXGBE_RXDCTL_RLPML_EN);
1566                 }
1567                 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
1568                 ixgbevf_rx_desc_queue_enable(adapter, i);
1569         }
1570
1571         ixgbevf_configure_msix(adapter);
1572
1573         spin_lock_bh(&adapter->mbx_lock);
1574
1575         if (is_valid_ether_addr(hw->mac.addr))
1576                 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
1577         else
1578                 hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0);
1579
1580         spin_unlock_bh(&adapter->mbx_lock);
1581
1582         clear_bit(__IXGBEVF_DOWN, &adapter->state);
1583         ixgbevf_napi_enable_all(adapter);
1584
1585         /* enable transmits */
1586         netif_tx_start_all_queues(netdev);
1587
1588         ixgbevf_save_reset_stats(adapter);
1589         ixgbevf_init_last_counter_stats(adapter);
1590
1591         hw->mac.get_link_status = 1;
1592         mod_timer(&adapter->watchdog_timer, jiffies);
1593 }
1594
1595 void ixgbevf_up(struct ixgbevf_adapter *adapter)
1596 {
1597         struct ixgbe_hw *hw = &adapter->hw;
1598
1599         ixgbevf_configure(adapter);
1600
1601         ixgbevf_up_complete(adapter);
1602
1603         /* clear any pending interrupts, may auto mask */
1604         IXGBE_READ_REG(hw, IXGBE_VTEICR);
1605
1606         ixgbevf_irq_enable(adapter);
1607 }
1608
1609 /**
1610  * ixgbevf_clean_rx_ring - Free Rx Buffers per Queue
1611  * @adapter: board private structure
1612  * @rx_ring: ring to free buffers from
1613  **/
1614 static void ixgbevf_clean_rx_ring(struct ixgbevf_adapter *adapter,
1615                                   struct ixgbevf_ring *rx_ring)
1616 {
1617         struct pci_dev *pdev = adapter->pdev;
1618         unsigned long size;
1619         unsigned int i;
1620
1621         if (!rx_ring->rx_buffer_info)
1622                 return;
1623
1624         /* Free all the Rx ring sk_buffs */
1625         for (i = 0; i < rx_ring->count; i++) {
1626                 struct ixgbevf_rx_buffer *rx_buffer_info;
1627
1628                 rx_buffer_info = &rx_ring->rx_buffer_info[i];
1629                 if (rx_buffer_info->dma) {
1630                         dma_unmap_single(&pdev->dev, rx_buffer_info->dma,
1631                                          rx_ring->rx_buf_len,
1632                                          DMA_FROM_DEVICE);
1633                         rx_buffer_info->dma = 0;
1634                 }
1635                 if (rx_buffer_info->skb) {
1636                         struct sk_buff *skb = rx_buffer_info->skb;
1637                         rx_buffer_info->skb = NULL;
1638                         do {
1639                                 struct sk_buff *this = skb;
1640                                 skb = IXGBE_CB(skb)->prev;
1641                                 dev_kfree_skb(this);
1642                         } while (skb);
1643                 }
1644         }
1645
1646         size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
1647         memset(rx_ring->rx_buffer_info, 0, size);
1648
1649         /* Zero out the descriptor ring */
1650         memset(rx_ring->desc, 0, rx_ring->size);
1651 }
1652
1653 /**
1654  * ixgbevf_clean_tx_ring - Free Tx Buffers
1655  * @adapter: board private structure
1656  * @tx_ring: ring to be cleaned
1657  **/
1658 static void ixgbevf_clean_tx_ring(struct ixgbevf_adapter *adapter,
1659                                   struct ixgbevf_ring *tx_ring)
1660 {
1661         struct ixgbevf_tx_buffer *tx_buffer_info;
1662         unsigned long size;
1663         unsigned int i;
1664
1665         if (!tx_ring->tx_buffer_info)
1666                 return;
1667
1668         /* Free all the Tx ring sk_buffs */
1669         for (i = 0; i < tx_ring->count; i++) {
1670                 tx_buffer_info = &tx_ring->tx_buffer_info[i];
1671                 ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
1672         }
1673
1674         size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
1675         memset(tx_ring->tx_buffer_info, 0, size);
1676
1677         memset(tx_ring->desc, 0, tx_ring->size);
1678 }
1679
1680 /**
1681  * ixgbevf_clean_all_rx_rings - Free Rx Buffers for all queues
1682  * @adapter: board private structure
1683  **/
1684 static void ixgbevf_clean_all_rx_rings(struct ixgbevf_adapter *adapter)
1685 {
1686         int i;
1687
1688         for (i = 0; i < adapter->num_rx_queues; i++)
1689                 ixgbevf_clean_rx_ring(adapter, &adapter->rx_ring[i]);
1690 }
1691
1692 /**
1693  * ixgbevf_clean_all_tx_rings - Free Tx Buffers for all queues
1694  * @adapter: board private structure
1695  **/
1696 static void ixgbevf_clean_all_tx_rings(struct ixgbevf_adapter *adapter)
1697 {
1698         int i;
1699
1700         for (i = 0; i < adapter->num_tx_queues; i++)
1701                 ixgbevf_clean_tx_ring(adapter, &adapter->tx_ring[i]);
1702 }
1703
1704 void ixgbevf_down(struct ixgbevf_adapter *adapter)
1705 {
1706         struct net_device *netdev = adapter->netdev;
1707         struct ixgbe_hw *hw = &adapter->hw;
1708         u32 txdctl;
1709         int i, j;
1710
1711         /* signal that we are down to the interrupt handler */
1712         set_bit(__IXGBEVF_DOWN, &adapter->state);
1713
1714         /* disable all enabled rx queues */
1715         for (i = 0; i < adapter->num_rx_queues; i++)
1716                 ixgbevf_disable_rx_queue(adapter, &adapter->rx_ring[i]);
1717
1718         netif_tx_disable(netdev);
1719
1720         msleep(10);
1721
1722         netif_tx_stop_all_queues(netdev);
1723
1724         ixgbevf_irq_disable(adapter);
1725
1726         ixgbevf_napi_disable_all(adapter);
1727
1728         del_timer_sync(&adapter->watchdog_timer);
1729         /* can't call flush scheduled work here because it can deadlock
1730          * if linkwatch_event tries to acquire the rtnl_lock which we are
1731          * holding */
1732         while (adapter->flags & IXGBE_FLAG_IN_WATCHDOG_TASK)
1733                 msleep(1);
1734
1735         /* disable transmits in the hardware now that interrupts are off */
1736         for (i = 0; i < adapter->num_tx_queues; i++) {
1737                 j = adapter->tx_ring[i].reg_idx;
1738                 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1739                 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j),
1740                                 (txdctl & ~IXGBE_TXDCTL_ENABLE));
1741         }
1742
1743         netif_carrier_off(netdev);
1744
1745         if (!pci_channel_offline(adapter->pdev))
1746                 ixgbevf_reset(adapter);
1747
1748         ixgbevf_clean_all_tx_rings(adapter);
1749         ixgbevf_clean_all_rx_rings(adapter);
1750 }
1751
1752 void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter)
1753 {
1754         WARN_ON(in_interrupt());
1755
1756         while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state))
1757                 msleep(1);
1758
1759         ixgbevf_down(adapter);
1760         ixgbevf_up(adapter);
1761
1762         clear_bit(__IXGBEVF_RESETTING, &adapter->state);
1763 }
1764
1765 void ixgbevf_reset(struct ixgbevf_adapter *adapter)
1766 {
1767         struct ixgbe_hw *hw = &adapter->hw;
1768         struct net_device *netdev = adapter->netdev;
1769
1770         if (hw->mac.ops.reset_hw(hw)) {
1771                 hw_dbg(hw, "PF still resetting\n");
1772         } else {
1773                 hw->mac.ops.init_hw(hw);
1774                 ixgbevf_negotiate_api(adapter);
1775         }
1776
1777         if (is_valid_ether_addr(adapter->hw.mac.addr)) {
1778                 memcpy(netdev->dev_addr, adapter->hw.mac.addr,
1779                        netdev->addr_len);
1780                 memcpy(netdev->perm_addr, adapter->hw.mac.addr,
1781                        netdev->addr_len);
1782         }
1783 }
1784
1785 static int ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
1786                                         int vectors)
1787 {
1788         int err = 0;
1789         int vector_threshold;
1790
1791         /* We'll want at least 2 (vector_threshold):
1792          * 1) TxQ[0] + RxQ[0] handler
1793          * 2) Other (Link Status Change, etc.)
1794          */
1795         vector_threshold = MIN_MSIX_COUNT;
1796
1797         /* The more we get, the more we will assign to Tx/Rx Cleanup
1798          * for the separate queues...where Rx Cleanup >= Tx Cleanup.
1799          * Right now, we simply care about how many we'll get; we'll
1800          * set them up later while requesting irq's.
1801          */
1802         while (vectors >= vector_threshold) {
1803                 err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
1804                                       vectors);
1805                 if (!err || err < 0) /* Success or a nasty failure. */
1806                         break;
1807                 else /* err == number of vectors we should try again with */
1808                         vectors = err;
1809         }
1810
1811         if (vectors < vector_threshold)
1812                 err = -ENOMEM;
1813
1814         if (err) {
1815                 dev_err(&adapter->pdev->dev,
1816                         "Unable to allocate MSI-X interrupts\n");
1817                 kfree(adapter->msix_entries);
1818                 adapter->msix_entries = NULL;
1819         } else {
1820                 /*
1821                  * Adjust for only the vectors we'll use, which is minimum
1822                  * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
1823                  * vectors we were allocated.
1824                  */
1825                 adapter->num_msix_vectors = vectors;
1826         }
1827
1828         return err;
1829 }
1830
1831 /**
1832  * ixgbevf_set_num_queues - Allocate queues for device, feature dependent
1833  * @adapter: board private structure to initialize
1834  *
1835  * This is the top level queue allocation routine.  The order here is very
1836  * important, starting with the "most" number of features turned on at once,
1837  * and ending with the smallest set of features.  This way large combinations
1838  * can be allocated if they're turned on, and smaller combinations are the
1839  * fallthrough conditions.
1840  *
1841  **/
1842 static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
1843 {
1844         struct ixgbe_hw *hw = &adapter->hw;
1845         unsigned int def_q = 0;
1846         unsigned int num_tcs = 0;
1847         int err;
1848
1849         /* Start with base case */
1850         adapter->num_rx_queues = 1;
1851         adapter->num_tx_queues = 1;
1852
1853         spin_lock_bh(&adapter->mbx_lock);
1854
1855         /* fetch queue configuration from the PF */
1856         err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
1857
1858         spin_unlock_bh(&adapter->mbx_lock);
1859
1860         if (err)
1861                 return;
1862
1863         /* we need as many queues as traffic classes */
1864         if (num_tcs > 1)
1865                 adapter->num_rx_queues = num_tcs;
1866 }
1867
1868 /**
1869  * ixgbevf_alloc_queues - Allocate memory for all rings
1870  * @adapter: board private structure to initialize
1871  *
1872  * We allocate one ring per queue at run-time since we don't know the
1873  * number of queues at compile-time.  The polling_netdev array is
1874  * intended for Multiqueue, but should work fine with a single queue.
1875  **/
1876 static int ixgbevf_alloc_queues(struct ixgbevf_adapter *adapter)
1877 {
1878         int i;
1879
1880         adapter->tx_ring = kcalloc(adapter->num_tx_queues,
1881                                    sizeof(struct ixgbevf_ring), GFP_KERNEL);
1882         if (!adapter->tx_ring)
1883                 goto err_tx_ring_allocation;
1884
1885         adapter->rx_ring = kcalloc(adapter->num_rx_queues,
1886                                    sizeof(struct ixgbevf_ring), GFP_KERNEL);
1887         if (!adapter->rx_ring)
1888                 goto err_rx_ring_allocation;
1889
1890         for (i = 0; i < adapter->num_tx_queues; i++) {
1891                 adapter->tx_ring[i].count = adapter->tx_ring_count;
1892                 adapter->tx_ring[i].queue_index = i;
1893                 /* reg_idx may be remapped later by DCB config */
1894                 adapter->tx_ring[i].reg_idx = i;
1895                 adapter->tx_ring[i].dev = &adapter->pdev->dev;
1896                 adapter->tx_ring[i].netdev = adapter->netdev;
1897         }
1898
1899         for (i = 0; i < adapter->num_rx_queues; i++) {
1900                 adapter->rx_ring[i].count = adapter->rx_ring_count;
1901                 adapter->rx_ring[i].queue_index = i;
1902                 adapter->rx_ring[i].reg_idx = i;
1903                 adapter->rx_ring[i].dev = &adapter->pdev->dev;
1904                 adapter->rx_ring[i].netdev = adapter->netdev;
1905         }
1906
1907         return 0;
1908
1909 err_rx_ring_allocation:
1910         kfree(adapter->tx_ring);
1911 err_tx_ring_allocation:
1912         return -ENOMEM;
1913 }
1914
1915 /**
1916  * ixgbevf_set_interrupt_capability - set MSI-X or FAIL if not supported
1917  * @adapter: board private structure to initialize
1918  *
1919  * Attempt to configure the interrupts using the best available
1920  * capabilities of the hardware and the kernel.
1921  **/
1922 static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter)
1923 {
1924         struct net_device *netdev = adapter->netdev;
1925         int err = 0;
1926         int vector, v_budget;
1927
1928         /*
1929          * It's easy to be greedy for MSI-X vectors, but it really
1930          * doesn't do us much good if we have a lot more vectors
1931          * than CPU's.  So let's be conservative and only ask for
1932          * (roughly) the same number of vectors as there are CPU's.
1933          * The default is to use pairs of vectors.
1934          */
1935         v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues);
1936         v_budget = min_t(int, v_budget, num_online_cpus());
1937         v_budget += NON_Q_VECTORS;
1938
1939         /* A failure in MSI-X entry allocation isn't fatal, but it does
1940          * mean we disable MSI-X capabilities of the adapter. */
1941         adapter->msix_entries = kcalloc(v_budget,
1942                                         sizeof(struct msix_entry), GFP_KERNEL);
1943         if (!adapter->msix_entries) {
1944                 err = -ENOMEM;
1945                 goto out;
1946         }
1947
1948         for (vector = 0; vector < v_budget; vector++)
1949                 adapter->msix_entries[vector].entry = vector;
1950
1951         err = ixgbevf_acquire_msix_vectors(adapter, v_budget);
1952         if (err)
1953                 goto out;
1954
1955         err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues);
1956         if (err)
1957                 goto out;
1958
1959         err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues);
1960
1961 out:
1962         return err;
1963 }
1964
1965 /**
1966  * ixgbevf_alloc_q_vectors - Allocate memory for interrupt vectors
1967  * @adapter: board private structure to initialize
1968  *
1969  * We allocate one q_vector per queue interrupt.  If allocation fails we
1970  * return -ENOMEM.
1971  **/
1972 static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter)
1973 {
1974         int q_idx, num_q_vectors;
1975         struct ixgbevf_q_vector *q_vector;
1976
1977         num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1978
1979         for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
1980                 q_vector = kzalloc(sizeof(struct ixgbevf_q_vector), GFP_KERNEL);
1981                 if (!q_vector)
1982                         goto err_out;
1983                 q_vector->adapter = adapter;
1984                 q_vector->v_idx = q_idx;
1985                 netif_napi_add(adapter->netdev, &q_vector->napi,
1986                                ixgbevf_poll, 64);
1987 #ifdef CONFIG_NET_RX_BUSY_POLL
1988                 napi_hash_add(&q_vector->napi);
1989 #endif
1990                 adapter->q_vector[q_idx] = q_vector;
1991         }
1992
1993         return 0;
1994
1995 err_out:
1996         while (q_idx) {
1997                 q_idx--;
1998                 q_vector = adapter->q_vector[q_idx];
1999 #ifdef CONFIG_NET_RX_BUSY_POLL
2000                 napi_hash_del(&q_vector->napi);
2001 #endif
2002                 netif_napi_del(&q_vector->napi);
2003                 kfree(q_vector);
2004                 adapter->q_vector[q_idx] = NULL;
2005         }
2006         return -ENOMEM;
2007 }
2008
2009 /**
2010  * ixgbevf_free_q_vectors - Free memory allocated for interrupt vectors
2011  * @adapter: board private structure to initialize
2012  *
2013  * This function frees the memory allocated to the q_vectors.  In addition if
2014  * NAPI is enabled it will delete any references to the NAPI struct prior
2015  * to freeing the q_vector.
2016  **/
2017 static void ixgbevf_free_q_vectors(struct ixgbevf_adapter *adapter)
2018 {
2019         int q_idx, num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2020
2021         for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
2022                 struct ixgbevf_q_vector *q_vector = adapter->q_vector[q_idx];
2023
2024                 adapter->q_vector[q_idx] = NULL;
2025 #ifdef CONFIG_NET_RX_BUSY_POLL
2026                 napi_hash_del(&q_vector->napi);
2027 #endif
2028                 netif_napi_del(&q_vector->napi);
2029                 kfree(q_vector);
2030         }
2031 }
2032
2033 /**
2034  * ixgbevf_reset_interrupt_capability - Reset MSIX setup
2035  * @adapter: board private structure
2036  *
2037  **/
2038 static void ixgbevf_reset_interrupt_capability(struct ixgbevf_adapter *adapter)
2039 {
2040         pci_disable_msix(adapter->pdev);
2041         kfree(adapter->msix_entries);
2042         adapter->msix_entries = NULL;
2043 }
2044
2045 /**
2046  * ixgbevf_init_interrupt_scheme - Determine if MSIX is supported and init
2047  * @adapter: board private structure to initialize
2048  *
2049  **/
2050 static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter)
2051 {
2052         int err;
2053
2054         /* Number of supported queues */
2055         ixgbevf_set_num_queues(adapter);
2056
2057         err = ixgbevf_set_interrupt_capability(adapter);
2058         if (err) {
2059                 hw_dbg(&adapter->hw,
2060                        "Unable to setup interrupt capabilities\n");
2061                 goto err_set_interrupt;
2062         }
2063
2064         err = ixgbevf_alloc_q_vectors(adapter);
2065         if (err) {
2066                 hw_dbg(&adapter->hw, "Unable to allocate memory for queue "
2067                        "vectors\n");
2068                 goto err_alloc_q_vectors;
2069         }
2070
2071         err = ixgbevf_alloc_queues(adapter);
2072         if (err) {
2073                 pr_err("Unable to allocate memory for queues\n");
2074                 goto err_alloc_queues;
2075         }
2076
2077         hw_dbg(&adapter->hw, "Multiqueue %s: Rx Queue count = %u, "
2078                "Tx Queue count = %u\n",
2079                (adapter->num_rx_queues > 1) ? "Enabled" :
2080                "Disabled", adapter->num_rx_queues, adapter->num_tx_queues);
2081
2082         set_bit(__IXGBEVF_DOWN, &adapter->state);
2083
2084         return 0;
2085 err_alloc_queues:
2086         ixgbevf_free_q_vectors(adapter);
2087 err_alloc_q_vectors:
2088         ixgbevf_reset_interrupt_capability(adapter);
2089 err_set_interrupt:
2090         return err;
2091 }
2092
2093 /**
2094  * ixgbevf_clear_interrupt_scheme - Clear the current interrupt scheme settings
2095  * @adapter: board private structure to clear interrupt scheme on
2096  *
2097  * We go through and clear interrupt specific resources and reset the structure
2098  * to pre-load conditions
2099  **/
2100 static void ixgbevf_clear_interrupt_scheme(struct ixgbevf_adapter *adapter)
2101 {
2102         adapter->num_tx_queues = 0;
2103         adapter->num_rx_queues = 0;
2104
2105         ixgbevf_free_q_vectors(adapter);
2106         ixgbevf_reset_interrupt_capability(adapter);
2107 }
2108
2109 /**
2110  * ixgbevf_sw_init - Initialize general software structures
2111  * (struct ixgbevf_adapter)
2112  * @adapter: board private structure to initialize
2113  *
2114  * ixgbevf_sw_init initializes the Adapter private data structure.
2115  * Fields are initialized based on PCI device information and
2116  * OS network device settings (MTU size).
2117  **/
2118 static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
2119 {
2120         struct ixgbe_hw *hw = &adapter->hw;
2121         struct pci_dev *pdev = adapter->pdev;
2122         struct net_device *netdev = adapter->netdev;
2123         int err;
2124
2125         /* PCI config space info */
2126
2127         hw->vendor_id = pdev->vendor;
2128         hw->device_id = pdev->device;
2129         hw->revision_id = pdev->revision;
2130         hw->subsystem_vendor_id = pdev->subsystem_vendor;
2131         hw->subsystem_device_id = pdev->subsystem_device;
2132
2133         hw->mbx.ops.init_params(hw);
2134
2135         /* assume legacy case in which PF would only give VF 2 queues */
2136         hw->mac.max_tx_queues = 2;
2137         hw->mac.max_rx_queues = 2;
2138
2139         /* lock to protect mailbox accesses */
2140         spin_lock_init(&adapter->mbx_lock);
2141
2142         err = hw->mac.ops.reset_hw(hw);
2143         if (err) {
2144                 dev_info(&pdev->dev,
2145                          "PF still in reset state.  Is the PF interface up?\n");
2146         } else {
2147                 err = hw->mac.ops.init_hw(hw);
2148                 if (err) {
2149                         pr_err("init_shared_code failed: %d\n", err);
2150                         goto out;
2151                 }
2152                 ixgbevf_negotiate_api(adapter);
2153                 err = hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
2154                 if (err)
2155                         dev_info(&pdev->dev, "Error reading MAC address\n");
2156                 else if (is_zero_ether_addr(adapter->hw.mac.addr))
2157                         dev_info(&pdev->dev,
2158                                  "MAC address not assigned by administrator.\n");
2159                 memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
2160         }
2161
2162         if (!is_valid_ether_addr(netdev->dev_addr)) {
2163                 dev_info(&pdev->dev, "Assigning random MAC address\n");
2164                 eth_hw_addr_random(netdev);
2165                 memcpy(hw->mac.addr, netdev->dev_addr, netdev->addr_len);
2166         }
2167
2168         /* Enable dynamic interrupt throttling rates */
2169         adapter->rx_itr_setting = 1;
2170         adapter->tx_itr_setting = 1;
2171
2172         /* set default ring sizes */
2173         adapter->tx_ring_count = IXGBEVF_DEFAULT_TXD;
2174         adapter->rx_ring_count = IXGBEVF_DEFAULT_RXD;
2175
2176         set_bit(__IXGBEVF_DOWN, &adapter->state);
2177         return 0;
2178
2179 out:
2180         return err;
2181 }
2182
2183 #define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter)     \
2184         {                                                       \
2185                 u32 current_counter = IXGBE_READ_REG(hw, reg);  \
2186                 if (current_counter < last_counter)             \
2187                         counter += 0x100000000LL;               \
2188                 last_counter = current_counter;                 \
2189                 counter &= 0xFFFFFFFF00000000LL;                \
2190                 counter |= current_counter;                     \
2191         }
2192
2193 #define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
2194         {                                                                \
2195                 u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb);   \
2196                 u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb);   \
2197                 u64 current_counter = (current_counter_msb << 32) |      \
2198                         current_counter_lsb;                             \
2199                 if (current_counter < last_counter)                      \
2200                         counter += 0x1000000000LL;                       \
2201                 last_counter = current_counter;                          \
2202                 counter &= 0xFFFFFFF000000000LL;                         \
2203                 counter |= current_counter;                              \
2204         }
2205 /**
2206  * ixgbevf_update_stats - Update the board statistics counters.
2207  * @adapter: board private structure
2208  **/
2209 void ixgbevf_update_stats(struct ixgbevf_adapter *adapter)
2210 {
2211         struct ixgbe_hw *hw = &adapter->hw;
2212         int i;
2213
2214         if (!adapter->link_up)
2215                 return;
2216
2217         UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC, adapter->stats.last_vfgprc,
2218                                 adapter->stats.vfgprc);
2219         UPDATE_VF_COUNTER_32bit(IXGBE_VFGPTC, adapter->stats.last_vfgptc,
2220                                 adapter->stats.vfgptc);
2221         UPDATE_VF_COUNTER_36bit(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
2222                                 adapter->stats.last_vfgorc,
2223                                 adapter->stats.vfgorc);
2224         UPDATE_VF_COUNTER_36bit(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
2225                                 adapter->stats.last_vfgotc,
2226                                 adapter->stats.vfgotc);
2227         UPDATE_VF_COUNTER_32bit(IXGBE_VFMPRC, adapter->stats.last_vfmprc,
2228                                 adapter->stats.vfmprc);
2229
2230         for (i = 0;  i  < adapter->num_rx_queues;  i++) {
2231                 adapter->hw_csum_rx_error +=
2232                         adapter->rx_ring[i].hw_csum_rx_error;
2233                 adapter->hw_csum_rx_good +=
2234                         adapter->rx_ring[i].hw_csum_rx_good;
2235                 adapter->rx_ring[i].hw_csum_rx_error = 0;
2236                 adapter->rx_ring[i].hw_csum_rx_good = 0;
2237         }
2238 }
2239
2240 /**
2241  * ixgbevf_watchdog - Timer Call-back
2242  * @data: pointer to adapter cast into an unsigned long
2243  **/
2244 static void ixgbevf_watchdog(unsigned long data)
2245 {
2246         struct ixgbevf_adapter *adapter = (struct ixgbevf_adapter *)data;
2247         struct ixgbe_hw *hw = &adapter->hw;
2248         u32 eics = 0;
2249         int i;
2250
2251         /*
2252          * Do the watchdog outside of interrupt context due to the lovely
2253          * delays that some of the newer hardware requires
2254          */
2255
2256         if (test_bit(__IXGBEVF_DOWN, &adapter->state))
2257                 goto watchdog_short_circuit;
2258
2259         /* get one bit for every active tx/rx interrupt vector */
2260         for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
2261                 struct ixgbevf_q_vector *qv = adapter->q_vector[i];
2262                 if (qv->rx.ring || qv->tx.ring)
2263                         eics |= 1 << i;
2264         }
2265
2266         IXGBE_WRITE_REG(hw, IXGBE_VTEICS, eics);
2267
2268 watchdog_short_circuit:
2269         schedule_work(&adapter->watchdog_task);
2270 }
2271
2272 /**
2273  * ixgbevf_tx_timeout - Respond to a Tx Hang
2274  * @netdev: network interface device structure
2275  **/
2276 static void ixgbevf_tx_timeout(struct net_device *netdev)
2277 {
2278         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2279
2280         /* Do the reset outside of interrupt context */
2281         schedule_work(&adapter->reset_task);
2282 }
2283
2284 static void ixgbevf_reset_task(struct work_struct *work)
2285 {
2286         struct ixgbevf_adapter *adapter;
2287         adapter = container_of(work, struct ixgbevf_adapter, reset_task);
2288
2289         /* If we're already down or resetting, just bail */
2290         if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
2291             test_bit(__IXGBEVF_RESETTING, &adapter->state))
2292                 return;
2293
2294         adapter->tx_timeout_count++;
2295
2296         ixgbevf_reinit_locked(adapter);
2297 }
2298
2299 /**
2300  * ixgbevf_watchdog_task - worker thread to bring link up
2301  * @work: pointer to work_struct containing our data
2302  **/
2303 static void ixgbevf_watchdog_task(struct work_struct *work)
2304 {
2305         struct ixgbevf_adapter *adapter = container_of(work,
2306                                                        struct ixgbevf_adapter,
2307                                                        watchdog_task);
2308         struct net_device *netdev = adapter->netdev;
2309         struct ixgbe_hw *hw = &adapter->hw;
2310         u32 link_speed = adapter->link_speed;
2311         bool link_up = adapter->link_up;
2312         s32 need_reset;
2313
2314         ixgbevf_queue_reset_subtask(adapter);
2315
2316         adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK;
2317
2318         /*
2319          * Always check the link on the watchdog because we have
2320          * no LSC interrupt
2321          */
2322         spin_lock_bh(&adapter->mbx_lock);
2323
2324         need_reset = hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
2325
2326         spin_unlock_bh(&adapter->mbx_lock);
2327
2328         if (need_reset) {
2329                 adapter->link_up = link_up;
2330                 adapter->link_speed = link_speed;
2331                 netif_carrier_off(netdev);
2332                 netif_tx_stop_all_queues(netdev);
2333                 schedule_work(&adapter->reset_task);
2334                 goto pf_has_reset;
2335         }
2336         adapter->link_up = link_up;
2337         adapter->link_speed = link_speed;
2338
2339         if (link_up) {
2340                 if (!netif_carrier_ok(netdev)) {
2341                         char *link_speed_string;
2342                         switch (link_speed) {
2343                         case IXGBE_LINK_SPEED_10GB_FULL:
2344                                 link_speed_string = "10 Gbps";
2345                                 break;
2346                         case IXGBE_LINK_SPEED_1GB_FULL:
2347                                 link_speed_string = "1 Gbps";
2348                                 break;
2349                         case IXGBE_LINK_SPEED_100_FULL:
2350                                 link_speed_string = "100 Mbps";
2351                                 break;
2352                         default:
2353                                 link_speed_string = "unknown speed";
2354                                 break;
2355                         }
2356                         dev_info(&adapter->pdev->dev,
2357                                 "NIC Link is Up, %s\n", link_speed_string);
2358                         netif_carrier_on(netdev);
2359                         netif_tx_wake_all_queues(netdev);
2360                 }
2361         } else {
2362                 adapter->link_up = false;
2363                 adapter->link_speed = 0;
2364                 if (netif_carrier_ok(netdev)) {
2365                         dev_info(&adapter->pdev->dev, "NIC Link is Down\n");
2366                         netif_carrier_off(netdev);
2367                         netif_tx_stop_all_queues(netdev);
2368                 }
2369         }
2370
2371         ixgbevf_update_stats(adapter);
2372
2373 pf_has_reset:
2374         /* Reset the timer */
2375         if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
2376                 mod_timer(&adapter->watchdog_timer,
2377                           round_jiffies(jiffies + (2 * HZ)));
2378
2379         adapter->flags &= ~IXGBE_FLAG_IN_WATCHDOG_TASK;
2380 }
2381
2382 /**
2383  * ixgbevf_free_tx_resources - Free Tx Resources per Queue
2384  * @adapter: board private structure
2385  * @tx_ring: Tx descriptor ring for a specific queue
2386  *
2387  * Free all transmit software resources
2388  **/
2389 void ixgbevf_free_tx_resources(struct ixgbevf_adapter *adapter,
2390                                struct ixgbevf_ring *tx_ring)
2391 {
2392         struct pci_dev *pdev = adapter->pdev;
2393
2394         ixgbevf_clean_tx_ring(adapter, tx_ring);
2395
2396         vfree(tx_ring->tx_buffer_info);
2397         tx_ring->tx_buffer_info = NULL;
2398
2399         dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
2400                           tx_ring->dma);
2401
2402         tx_ring->desc = NULL;
2403 }
2404
2405 /**
2406  * ixgbevf_free_all_tx_resources - Free Tx Resources for All Queues
2407  * @adapter: board private structure
2408  *
2409  * Free all transmit software resources
2410  **/
2411 static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter *adapter)
2412 {
2413         int i;
2414
2415         for (i = 0; i < adapter->num_tx_queues; i++)
2416                 if (adapter->tx_ring[i].desc)
2417                         ixgbevf_free_tx_resources(adapter,
2418                                                   &adapter->tx_ring[i]);
2419
2420 }
2421
2422 /**
2423  * ixgbevf_setup_tx_resources - allocate Tx resources (Descriptors)
2424  * @adapter: board private structure
2425  * @tx_ring:    tx descriptor ring (for a specific queue) to setup
2426  *
2427  * Return 0 on success, negative on failure
2428  **/
2429 int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *adapter,
2430                                struct ixgbevf_ring *tx_ring)
2431 {
2432         struct pci_dev *pdev = adapter->pdev;
2433         int size;
2434
2435         size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
2436         tx_ring->tx_buffer_info = vzalloc(size);
2437         if (!tx_ring->tx_buffer_info)
2438                 goto err;
2439
2440         /* round up to nearest 4K */
2441         tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
2442         tx_ring->size = ALIGN(tx_ring->size, 4096);
2443
2444         tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
2445                                            &tx_ring->dma, GFP_KERNEL);
2446         if (!tx_ring->desc)
2447                 goto err;
2448
2449         return 0;
2450
2451 err:
2452         vfree(tx_ring->tx_buffer_info);
2453         tx_ring->tx_buffer_info = NULL;
2454         hw_dbg(&adapter->hw, "Unable to allocate memory for the transmit "
2455                "descriptor ring\n");
2456         return -ENOMEM;
2457 }
2458
2459 /**
2460  * ixgbevf_setup_all_tx_resources - allocate all queues Tx resources
2461  * @adapter: board private structure
2462  *
2463  * If this function returns with an error, then it's possible one or
2464  * more of the rings is populated (while the rest are not).  It is the
2465  * callers duty to clean those orphaned rings.
2466  *
2467  * Return 0 on success, negative on failure
2468  **/
2469 static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter)
2470 {
2471         int i, err = 0;
2472
2473         for (i = 0; i < adapter->num_tx_queues; i++) {
2474                 err = ixgbevf_setup_tx_resources(adapter, &adapter->tx_ring[i]);
2475                 if (!err)
2476                         continue;
2477                 hw_dbg(&adapter->hw,
2478                        "Allocation for Tx Queue %u failed\n", i);
2479                 break;
2480         }
2481
2482         return err;
2483 }
2484
2485 /**
2486  * ixgbevf_setup_rx_resources - allocate Rx resources (Descriptors)
2487  * @adapter: board private structure
2488  * @rx_ring:    rx descriptor ring (for a specific queue) to setup
2489  *
2490  * Returns 0 on success, negative on failure
2491  **/
2492 int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *adapter,
2493                                struct ixgbevf_ring *rx_ring)
2494 {
2495         struct pci_dev *pdev = adapter->pdev;
2496         int size;
2497
2498         size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
2499         rx_ring->rx_buffer_info = vzalloc(size);
2500         if (!rx_ring->rx_buffer_info)
2501                 goto alloc_failed;
2502
2503         /* Round up to nearest 4K */
2504         rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
2505         rx_ring->size = ALIGN(rx_ring->size, 4096);
2506
2507         rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
2508                                            &rx_ring->dma, GFP_KERNEL);
2509
2510         if (!rx_ring->desc) {
2511                 vfree(rx_ring->rx_buffer_info);
2512                 rx_ring->rx_buffer_info = NULL;
2513                 goto alloc_failed;
2514         }
2515
2516         return 0;
2517 alloc_failed:
2518         return -ENOMEM;
2519 }
2520
2521 /**
2522  * ixgbevf_setup_all_rx_resources - allocate all queues Rx resources
2523  * @adapter: board private structure
2524  *
2525  * If this function returns with an error, then it's possible one or
2526  * more of the rings is populated (while the rest are not).  It is the
2527  * callers duty to clean those orphaned rings.
2528  *
2529  * Return 0 on success, negative on failure
2530  **/
2531 static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter *adapter)
2532 {
2533         int i, err = 0;
2534
2535         for (i = 0; i < adapter->num_rx_queues; i++) {
2536                 err = ixgbevf_setup_rx_resources(adapter, &adapter->rx_ring[i]);
2537                 if (!err)
2538                         continue;
2539                 hw_dbg(&adapter->hw,
2540                        "Allocation for Rx Queue %u failed\n", i);
2541                 break;
2542         }
2543         return err;
2544 }
2545
2546 /**
2547  * ixgbevf_free_rx_resources - Free Rx Resources
2548  * @adapter: board private structure
2549  * @rx_ring: ring to clean the resources from
2550  *
2551  * Free all receive software resources
2552  **/
2553 void ixgbevf_free_rx_resources(struct ixgbevf_adapter *adapter,
2554                                struct ixgbevf_ring *rx_ring)
2555 {
2556         struct pci_dev *pdev = adapter->pdev;
2557
2558         ixgbevf_clean_rx_ring(adapter, rx_ring);
2559
2560         vfree(rx_ring->rx_buffer_info);
2561         rx_ring->rx_buffer_info = NULL;
2562
2563         dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
2564                           rx_ring->dma);
2565
2566         rx_ring->desc = NULL;
2567 }
2568
2569 /**
2570  * ixgbevf_free_all_rx_resources - Free Rx Resources for All Queues
2571  * @adapter: board private structure
2572  *
2573  * Free all receive software resources
2574  **/
2575 static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter)
2576 {
2577         int i;
2578
2579         for (i = 0; i < adapter->num_rx_queues; i++)
2580                 if (adapter->rx_ring[i].desc)
2581                         ixgbevf_free_rx_resources(adapter,
2582                                                   &adapter->rx_ring[i]);
2583 }
2584
2585 /**
2586  * ixgbevf_open - Called when a network interface is made active
2587  * @netdev: network interface device structure
2588  *
2589  * Returns 0 on success, negative value on failure
2590  *
2591  * The open entry point is called when a network interface is made
2592  * active by the system (IFF_UP).  At this point all resources needed
2593  * for transmit and receive operations are allocated, the interrupt
2594  * handler is registered with the OS, the watchdog timer is started,
2595  * and the stack is notified that the interface is ready.
2596  **/
2597 static int ixgbevf_open(struct net_device *netdev)
2598 {
2599         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2600         struct ixgbe_hw *hw = &adapter->hw;
2601         int err;
2602
2603         /* A previous failure to open the device because of a lack of
2604          * available MSIX vector resources may have reset the number
2605          * of msix vectors variable to zero.  The only way to recover
2606          * is to unload/reload the driver and hope that the system has
2607          * been able to recover some MSIX vector resources.
2608          */
2609         if (!adapter->num_msix_vectors)
2610                 return -ENOMEM;
2611
2612         /* disallow open during test */
2613         if (test_bit(__IXGBEVF_TESTING, &adapter->state))
2614                 return -EBUSY;
2615
2616         if (hw->adapter_stopped) {
2617                 ixgbevf_reset(adapter);
2618                 /* if adapter is still stopped then PF isn't up and
2619                  * the vf can't start. */
2620                 if (hw->adapter_stopped) {
2621                         err = IXGBE_ERR_MBX;
2622                         pr_err("Unable to start - perhaps the PF Driver isn't "
2623                                "up yet\n");
2624                         goto err_setup_reset;
2625                 }
2626         }
2627
2628         /* allocate transmit descriptors */
2629         err = ixgbevf_setup_all_tx_resources(adapter);
2630         if (err)
2631                 goto err_setup_tx;
2632
2633         /* allocate receive descriptors */
2634         err = ixgbevf_setup_all_rx_resources(adapter);
2635         if (err)
2636                 goto err_setup_rx;
2637
2638         ixgbevf_configure(adapter);
2639
2640         /*
2641          * Map the Tx/Rx rings to the vectors we were allotted.
2642          * if request_irq will be called in this function map_rings
2643          * must be called *before* up_complete
2644          */
2645         ixgbevf_map_rings_to_vectors(adapter);
2646
2647         ixgbevf_up_complete(adapter);
2648
2649         /* clear any pending interrupts, may auto mask */
2650         IXGBE_READ_REG(hw, IXGBE_VTEICR);
2651         err = ixgbevf_request_irq(adapter);
2652         if (err)
2653                 goto err_req_irq;
2654
2655         ixgbevf_irq_enable(adapter);
2656
2657         return 0;
2658
2659 err_req_irq:
2660         ixgbevf_down(adapter);
2661 err_setup_rx:
2662         ixgbevf_free_all_rx_resources(adapter);
2663 err_setup_tx:
2664         ixgbevf_free_all_tx_resources(adapter);
2665         ixgbevf_reset(adapter);
2666
2667 err_setup_reset:
2668
2669         return err;
2670 }
2671
2672 /**
2673  * ixgbevf_close - Disables a network interface
2674  * @netdev: network interface device structure
2675  *
2676  * Returns 0, this is not allowed to fail
2677  *
2678  * The close entry point is called when an interface is de-activated
2679  * by the OS.  The hardware is still under the drivers control, but
2680  * needs to be disabled.  A global MAC reset is issued to stop the
2681  * hardware, and all transmit and receive resources are freed.
2682  **/
2683 static int ixgbevf_close(struct net_device *netdev)
2684 {
2685         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2686
2687         ixgbevf_down(adapter);
2688         ixgbevf_free_irq(adapter);
2689
2690         ixgbevf_free_all_tx_resources(adapter);
2691         ixgbevf_free_all_rx_resources(adapter);
2692
2693         return 0;
2694 }
2695
2696 static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter)
2697 {
2698         struct net_device *dev = adapter->netdev;
2699
2700         if (!(adapter->flags & IXGBEVF_FLAG_QUEUE_RESET_REQUESTED))
2701                 return;
2702
2703         adapter->flags &= ~IXGBEVF_FLAG_QUEUE_RESET_REQUESTED;
2704
2705         /* if interface is down do nothing */
2706         if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
2707             test_bit(__IXGBEVF_RESETTING, &adapter->state))
2708                 return;
2709
2710         /* Hardware has to reinitialize queues and interrupts to
2711          * match packet buffer alignment. Unfortunately, the
2712          * hardware is not flexible enough to do this dynamically.
2713          */
2714         if (netif_running(dev))
2715                 ixgbevf_close(dev);
2716
2717         ixgbevf_clear_interrupt_scheme(adapter);
2718         ixgbevf_init_interrupt_scheme(adapter);
2719
2720         if (netif_running(dev))
2721                 ixgbevf_open(dev);
2722 }
2723
2724 static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring *tx_ring,
2725                                 u32 vlan_macip_lens, u32 type_tucmd,
2726                                 u32 mss_l4len_idx)
2727 {
2728         struct ixgbe_adv_tx_context_desc *context_desc;
2729         u16 i = tx_ring->next_to_use;
2730
2731         context_desc = IXGBEVF_TX_CTXTDESC(tx_ring, i);
2732
2733         i++;
2734         tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
2735
2736         /* set bits to identify this as an advanced context descriptor */
2737         type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
2738
2739         context_desc->vlan_macip_lens   = cpu_to_le32(vlan_macip_lens);
2740         context_desc->seqnum_seed       = 0;
2741         context_desc->type_tucmd_mlhl   = cpu_to_le32(type_tucmd);
2742         context_desc->mss_l4len_idx     = cpu_to_le32(mss_l4len_idx);
2743 }
2744
2745 static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
2746                        struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
2747 {
2748         u32 vlan_macip_lens, type_tucmd;
2749         u32 mss_l4len_idx, l4len;
2750
2751         if (!skb_is_gso(skb))
2752                 return 0;
2753
2754         if (skb_header_cloned(skb)) {
2755                 int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2756                 if (err)
2757                         return err;
2758         }
2759
2760         /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
2761         type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
2762
2763         if (skb->protocol == htons(ETH_P_IP)) {
2764                 struct iphdr *iph = ip_hdr(skb);
2765                 iph->tot_len = 0;
2766                 iph->check = 0;
2767                 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2768                                                          iph->daddr, 0,
2769                                                          IPPROTO_TCP,
2770                                                          0);
2771                 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
2772         } else if (skb_is_gso_v6(skb)) {
2773                 ipv6_hdr(skb)->payload_len = 0;
2774                 tcp_hdr(skb)->check =
2775                     ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2776                                      &ipv6_hdr(skb)->daddr,
2777                                      0, IPPROTO_TCP, 0);
2778         }
2779
2780         /* compute header lengths */
2781         l4len = tcp_hdrlen(skb);
2782         *hdr_len += l4len;
2783         *hdr_len = skb_transport_offset(skb) + l4len;
2784
2785         /* mss_l4len_id: use 1 as index for TSO */
2786         mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT;
2787         mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
2788         mss_l4len_idx |= 1 << IXGBE_ADVTXD_IDX_SHIFT;
2789
2790         /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
2791         vlan_macip_lens = skb_network_header_len(skb);
2792         vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
2793         vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
2794
2795         ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
2796                             type_tucmd, mss_l4len_idx);
2797
2798         return 1;
2799 }
2800
2801 static bool ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
2802                             struct sk_buff *skb, u32 tx_flags)
2803 {
2804         u32 vlan_macip_lens = 0;
2805         u32 mss_l4len_idx = 0;
2806         u32 type_tucmd = 0;
2807
2808         if (skb->ip_summed == CHECKSUM_PARTIAL) {
2809                 u8 l4_hdr = 0;
2810                 switch (skb->protocol) {
2811                 case __constant_htons(ETH_P_IP):
2812                         vlan_macip_lens |= skb_network_header_len(skb);
2813                         type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
2814                         l4_hdr = ip_hdr(skb)->protocol;
2815                         break;
2816                 case __constant_htons(ETH_P_IPV6):
2817                         vlan_macip_lens |= skb_network_header_len(skb);
2818                         l4_hdr = ipv6_hdr(skb)->nexthdr;
2819                         break;
2820                 default:
2821                         if (unlikely(net_ratelimit())) {
2822                                 dev_warn(tx_ring->dev,
2823                                  "partial checksum but proto=%x!\n",
2824                                  skb->protocol);
2825                         }
2826                         break;
2827                 }
2828
2829                 switch (l4_hdr) {
2830                 case IPPROTO_TCP:
2831                         type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
2832                         mss_l4len_idx = tcp_hdrlen(skb) <<
2833                                         IXGBE_ADVTXD_L4LEN_SHIFT;
2834                         break;
2835                 case IPPROTO_SCTP:
2836                         type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
2837                         mss_l4len_idx = sizeof(struct sctphdr) <<
2838                                         IXGBE_ADVTXD_L4LEN_SHIFT;
2839                         break;
2840                 case IPPROTO_UDP:
2841                         mss_l4len_idx = sizeof(struct udphdr) <<
2842                                         IXGBE_ADVTXD_L4LEN_SHIFT;
2843                         break;
2844                 default:
2845                         if (unlikely(net_ratelimit())) {
2846                                 dev_warn(tx_ring->dev,
2847                                  "partial checksum but l4 proto=%x!\n",
2848                                  l4_hdr);
2849                         }
2850                         break;
2851                 }
2852         }
2853
2854         /* vlan_macip_lens: MACLEN, VLAN tag */
2855         vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
2856         vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
2857
2858         ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
2859                             type_tucmd, mss_l4len_idx);
2860
2861         return (skb->ip_summed == CHECKSUM_PARTIAL);
2862 }
2863
2864 static int ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
2865                           struct sk_buff *skb, u32 tx_flags)
2866 {
2867         struct ixgbevf_tx_buffer *tx_buffer_info;
2868         unsigned int len;
2869         unsigned int total = skb->len;
2870         unsigned int offset = 0, size;
2871         int count = 0;
2872         unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
2873         unsigned int f;
2874         int i;
2875
2876         i = tx_ring->next_to_use;
2877
2878         len = min(skb_headlen(skb), total);
2879         while (len) {
2880                 tx_buffer_info = &tx_ring->tx_buffer_info[i];
2881                 size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD);
2882
2883                 tx_buffer_info->length = size;
2884                 tx_buffer_info->mapped_as_page = false;
2885                 tx_buffer_info->dma = dma_map_single(tx_ring->dev,
2886                                                      skb->data + offset,
2887                                                      size, DMA_TO_DEVICE);
2888                 if (dma_mapping_error(tx_ring->dev, tx_buffer_info->dma))
2889                         goto dma_error;
2890
2891                 len -= size;
2892                 total -= size;
2893                 offset += size;
2894                 count++;
2895                 i++;
2896                 if (i == tx_ring->count)
2897                         i = 0;
2898         }
2899
2900         for (f = 0; f < nr_frags; f++) {
2901                 const struct skb_frag_struct *frag;
2902
2903                 frag = &skb_shinfo(skb)->frags[f];
2904                 len = min((unsigned int)skb_frag_size(frag), total);
2905                 offset = 0;
2906
2907                 while (len) {
2908                         tx_buffer_info = &tx_ring->tx_buffer_info[i];
2909                         size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD);
2910
2911                         tx_buffer_info->length = size;
2912                         tx_buffer_info->dma =
2913                                 skb_frag_dma_map(tx_ring->dev, frag,
2914                                                  offset, size, DMA_TO_DEVICE);
2915                         if (dma_mapping_error(tx_ring->dev,
2916                                               tx_buffer_info->dma))
2917                                 goto dma_error;
2918                         tx_buffer_info->mapped_as_page = true;
2919
2920                         len -= size;
2921                         total -= size;
2922                         offset += size;
2923                         count++;
2924                         i++;
2925                         if (i == tx_ring->count)
2926                                 i = 0;
2927                 }
2928                 if (total == 0)
2929                         break;
2930         }
2931
2932         if (i == 0)
2933                 i = tx_ring->count - 1;
2934         else
2935                 i = i - 1;
2936         tx_ring->tx_buffer_info[i].skb = skb;
2937
2938         return count;
2939
2940 dma_error:
2941         dev_err(tx_ring->dev, "TX DMA map failed\n");
2942
2943         /* clear timestamp and dma mappings for failed tx_buffer_info map */
2944         tx_buffer_info->dma = 0;
2945         count--;
2946
2947         /* clear timestamp and dma mappings for remaining portion of packet */
2948         while (count >= 0) {
2949                 count--;
2950                 i--;
2951                 if (i < 0)
2952                         i += tx_ring->count;
2953                 tx_buffer_info = &tx_ring->tx_buffer_info[i];
2954                 ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
2955         }
2956
2957         return count;
2958 }
2959
2960 static void ixgbevf_tx_queue(struct ixgbevf_ring *tx_ring, int tx_flags,
2961                              int count, unsigned int first, u32 paylen,
2962                              u8 hdr_len)
2963 {
2964         union ixgbe_adv_tx_desc *tx_desc = NULL;
2965         struct ixgbevf_tx_buffer *tx_buffer_info;
2966         u32 olinfo_status = 0, cmd_type_len = 0;
2967         unsigned int i;
2968
2969         u32 txd_cmd = IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS | IXGBE_TXD_CMD_IFCS;
2970
2971         cmd_type_len |= IXGBE_ADVTXD_DTYP_DATA;
2972
2973         cmd_type_len |= IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT;
2974
2975         if (tx_flags & IXGBE_TX_FLAGS_VLAN)
2976                 cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
2977
2978         if (tx_flags & IXGBE_TX_FLAGS_CSUM)
2979                 olinfo_status |= IXGBE_ADVTXD_POPTS_TXSM;
2980
2981         if (tx_flags & IXGBE_TX_FLAGS_TSO) {
2982                 cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
2983
2984                 /* use index 1 context for tso */
2985                 olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
2986                 if (tx_flags & IXGBE_TX_FLAGS_IPV4)
2987                         olinfo_status |= IXGBE_ADVTXD_POPTS_IXSM;
2988         }
2989
2990         /*
2991          * Check Context must be set if Tx switch is enabled, which it
2992          * always is for case where virtual functions are running
2993          */
2994         olinfo_status |= IXGBE_ADVTXD_CC;
2995
2996         olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT);
2997
2998         i = tx_ring->next_to_use;
2999         while (count--) {
3000                 tx_buffer_info = &tx_ring->tx_buffer_info[i];
3001                 tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
3002                 tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma);
3003                 tx_desc->read.cmd_type_len =
3004                         cpu_to_le32(cmd_type_len | tx_buffer_info->length);
3005                 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
3006                 i++;
3007                 if (i == tx_ring->count)
3008                         i = 0;
3009         }
3010
3011         tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd);
3012
3013         tx_ring->tx_buffer_info[first].time_stamp = jiffies;
3014
3015         /* Force memory writes to complete before letting h/w
3016          * know there are new descriptors to fetch.  (Only
3017          * applicable for weak-ordered memory model archs,
3018          * such as IA-64).
3019          */
3020         wmb();
3021
3022         tx_ring->tx_buffer_info[first].next_to_watch = tx_desc;
3023         tx_ring->next_to_use = i;
3024 }
3025
3026 static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
3027 {
3028         struct ixgbevf_adapter *adapter = netdev_priv(tx_ring->netdev);
3029
3030         netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
3031         /* Herbert's original patch had:
3032          *  smp_mb__after_netif_stop_queue();
3033          * but since that doesn't exist yet, just open code it. */
3034         smp_mb();
3035
3036         /* We need to check again in a case another CPU has just
3037          * made room available. */
3038         if (likely(ixgbevf_desc_unused(tx_ring) < size))
3039                 return -EBUSY;
3040
3041         /* A reprieve! - use start_queue because it doesn't call schedule */
3042         netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
3043         ++adapter->restart_queue;
3044         return 0;
3045 }
3046
3047 static int ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
3048 {
3049         if (likely(ixgbevf_desc_unused(tx_ring) >= size))
3050                 return 0;
3051         return __ixgbevf_maybe_stop_tx(tx_ring, size);
3052 }
3053
3054 static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3055 {
3056         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3057         struct ixgbevf_ring *tx_ring;
3058         unsigned int first;
3059         unsigned int tx_flags = 0;
3060         u8 hdr_len = 0;
3061         int r_idx = 0, tso;
3062         u16 count = TXD_USE_COUNT(skb_headlen(skb));
3063 #if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
3064         unsigned short f;
3065 #endif
3066         u8 *dst_mac = skb_header_pointer(skb, 0, 0, NULL);
3067         if (!dst_mac || is_link_local_ether_addr(dst_mac)) {
3068                 dev_kfree_skb(skb);
3069                 return NETDEV_TX_OK;
3070         }
3071
3072         tx_ring = &adapter->tx_ring[r_idx];
3073
3074         /*
3075          * need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD,
3076          *       + 1 desc for skb_headlen/IXGBE_MAX_DATA_PER_TXD,
3077          *       + 2 desc gap to keep tail from touching head,
3078          *       + 1 desc for context descriptor,
3079          * otherwise try next time
3080          */
3081 #if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
3082         for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
3083                 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
3084 #else
3085         count += skb_shinfo(skb)->nr_frags;
3086 #endif
3087         if (ixgbevf_maybe_stop_tx(tx_ring, count + 3)) {
3088                 adapter->tx_busy++;
3089                 return NETDEV_TX_BUSY;
3090         }
3091
3092         if (vlan_tx_tag_present(skb)) {
3093                 tx_flags |= vlan_tx_tag_get(skb);
3094                 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
3095                 tx_flags |= IXGBE_TX_FLAGS_VLAN;
3096         }
3097
3098         first = tx_ring->next_to_use;
3099
3100         if (skb->protocol == htons(ETH_P_IP))
3101                 tx_flags |= IXGBE_TX_FLAGS_IPV4;
3102         tso = ixgbevf_tso(tx_ring, skb, tx_flags, &hdr_len);
3103         if (tso < 0) {
3104                 dev_kfree_skb_any(skb);
3105                 return NETDEV_TX_OK;
3106         }
3107
3108         if (tso)
3109                 tx_flags |= IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_CSUM;
3110         else if (ixgbevf_tx_csum(tx_ring, skb, tx_flags))
3111                 tx_flags |= IXGBE_TX_FLAGS_CSUM;
3112
3113         ixgbevf_tx_queue(tx_ring, tx_flags,
3114                          ixgbevf_tx_map(tx_ring, skb, tx_flags),
3115                          first, skb->len, hdr_len);
3116
3117         writel(tx_ring->next_to_use, tx_ring->tail);
3118
3119         ixgbevf_maybe_stop_tx(tx_ring, DESC_NEEDED);
3120
3121         return NETDEV_TX_OK;
3122 }
3123
3124 /**
3125  * ixgbevf_set_mac - Change the Ethernet Address of the NIC
3126  * @netdev: network interface device structure
3127  * @p: pointer to an address structure
3128  *
3129  * Returns 0 on success, negative on failure
3130  **/
3131 static int ixgbevf_set_mac(struct net_device *netdev, void *p)
3132 {
3133         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3134         struct ixgbe_hw *hw = &adapter->hw;
3135         struct sockaddr *addr = p;
3136
3137         if (!is_valid_ether_addr(addr->sa_data))
3138                 return -EADDRNOTAVAIL;
3139
3140         memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
3141         memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
3142
3143         spin_lock_bh(&adapter->mbx_lock);
3144
3145         hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
3146
3147         spin_unlock_bh(&adapter->mbx_lock);
3148
3149         return 0;
3150 }
3151
3152 /**
3153  * ixgbevf_change_mtu - Change the Maximum Transfer Unit
3154  * @netdev: network interface device structure
3155  * @new_mtu: new value for maximum frame size
3156  *
3157  * Returns 0 on success, negative on failure
3158  **/
3159 static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
3160 {
3161         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3162         int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
3163         int max_possible_frame = MAXIMUM_ETHERNET_VLAN_SIZE;
3164
3165         switch (adapter->hw.api_version) {
3166         case ixgbe_mbox_api_11:
3167                 max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE;
3168                 break;
3169         default:
3170                 if (adapter->hw.mac.type == ixgbe_mac_X540_vf)
3171                         max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE;
3172                 break;
3173         }
3174
3175         /* MTU < 68 is an error and causes problems on some kernels */
3176         if ((new_mtu < 68) || (max_frame > max_possible_frame))
3177                 return -EINVAL;
3178
3179         hw_dbg(&adapter->hw, "changing MTU from %d to %d\n",
3180                netdev->mtu, new_mtu);
3181         /* must set new MTU before calling down or up */
3182         netdev->mtu = new_mtu;
3183
3184         if (netif_running(netdev))
3185                 ixgbevf_reinit_locked(adapter);
3186
3187         return 0;
3188 }
3189
3190 static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state)
3191 {
3192         struct net_device *netdev = pci_get_drvdata(pdev);
3193         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3194 #ifdef CONFIG_PM
3195         int retval = 0;
3196 #endif
3197
3198         netif_device_detach(netdev);
3199
3200         if (netif_running(netdev)) {
3201                 rtnl_lock();
3202                 ixgbevf_down(adapter);
3203                 ixgbevf_free_irq(adapter);
3204                 ixgbevf_free_all_tx_resources(adapter);
3205                 ixgbevf_free_all_rx_resources(adapter);
3206                 rtnl_unlock();
3207         }
3208
3209         ixgbevf_clear_interrupt_scheme(adapter);
3210
3211 #ifdef CONFIG_PM
3212         retval = pci_save_state(pdev);
3213         if (retval)
3214                 return retval;
3215
3216 #endif
3217         pci_disable_device(pdev);
3218
3219         return 0;
3220 }
3221
3222 #ifdef CONFIG_PM
3223 static int ixgbevf_resume(struct pci_dev *pdev)
3224 {
3225         struct ixgbevf_adapter *adapter = pci_get_drvdata(pdev);
3226         struct net_device *netdev = adapter->netdev;
3227         u32 err;
3228
3229         pci_set_power_state(pdev, PCI_D0);
3230         pci_restore_state(pdev);
3231         /*
3232          * pci_restore_state clears dev->state_saved so call
3233          * pci_save_state to restore it.
3234          */
3235         pci_save_state(pdev);
3236
3237         err = pci_enable_device_mem(pdev);
3238         if (err) {
3239                 dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n");
3240                 return err;
3241         }
3242         pci_set_master(pdev);
3243
3244         ixgbevf_reset(adapter);
3245
3246         rtnl_lock();
3247         err = ixgbevf_init_interrupt_scheme(adapter);
3248         rtnl_unlock();
3249         if (err) {
3250                 dev_err(&pdev->dev, "Cannot initialize interrupts\n");
3251                 return err;
3252         }
3253
3254         if (netif_running(netdev)) {
3255                 err = ixgbevf_open(netdev);
3256                 if (err)
3257                         return err;
3258         }
3259
3260         netif_device_attach(netdev);
3261
3262         return err;
3263 }
3264
3265 #endif /* CONFIG_PM */
3266 static void ixgbevf_shutdown(struct pci_dev *pdev)
3267 {
3268         ixgbevf_suspend(pdev, PMSG_SUSPEND);
3269 }
3270
3271 static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev,
3272                                                 struct rtnl_link_stats64 *stats)
3273 {
3274         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3275         unsigned int start;
3276         u64 bytes, packets;
3277         const struct ixgbevf_ring *ring;
3278         int i;
3279
3280         ixgbevf_update_stats(adapter);
3281
3282         stats->multicast = adapter->stats.vfmprc - adapter->stats.base_vfmprc;
3283
3284         for (i = 0; i < adapter->num_rx_queues; i++) {
3285                 ring = &adapter->rx_ring[i];
3286                 do {
3287                         start = u64_stats_fetch_begin_bh(&ring->syncp);
3288                         bytes = ring->total_bytes;
3289                         packets = ring->total_packets;
3290                 } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
3291                 stats->rx_bytes += bytes;
3292                 stats->rx_packets += packets;
3293         }
3294
3295         for (i = 0; i < adapter->num_tx_queues; i++) {
3296                 ring = &adapter->tx_ring[i];
3297                 do {
3298                         start = u64_stats_fetch_begin_bh(&ring->syncp);
3299                         bytes = ring->total_bytes;
3300                         packets = ring->total_packets;
3301                 } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
3302                 stats->tx_bytes += bytes;
3303                 stats->tx_packets += packets;
3304         }
3305
3306         return stats;
3307 }
3308
3309 static const struct net_device_ops ixgbevf_netdev_ops = {
3310         .ndo_open               = ixgbevf_open,
3311         .ndo_stop               = ixgbevf_close,
3312         .ndo_start_xmit         = ixgbevf_xmit_frame,
3313         .ndo_set_rx_mode        = ixgbevf_set_rx_mode,
3314         .ndo_get_stats64        = ixgbevf_get_stats,
3315         .ndo_validate_addr      = eth_validate_addr,
3316         .ndo_set_mac_address    = ixgbevf_set_mac,
3317         .ndo_change_mtu         = ixgbevf_change_mtu,
3318         .ndo_tx_timeout         = ixgbevf_tx_timeout,
3319         .ndo_vlan_rx_add_vid    = ixgbevf_vlan_rx_add_vid,
3320         .ndo_vlan_rx_kill_vid   = ixgbevf_vlan_rx_kill_vid,
3321 #ifdef CONFIG_NET_RX_BUSY_POLL
3322         .ndo_busy_poll          = ixgbevf_busy_poll_recv,
3323 #endif
3324 };
3325
3326 static void ixgbevf_assign_netdev_ops(struct net_device *dev)
3327 {
3328         dev->netdev_ops = &ixgbevf_netdev_ops;
3329         ixgbevf_set_ethtool_ops(dev);
3330         dev->watchdog_timeo = 5 * HZ;
3331 }
3332
3333 /**
3334  * ixgbevf_probe - Device Initialization Routine
3335  * @pdev: PCI device information struct
3336  * @ent: entry in ixgbevf_pci_tbl
3337  *
3338  * Returns 0 on success, negative on failure
3339  *
3340  * ixgbevf_probe initializes an adapter identified by a pci_dev structure.
3341  * The OS initialization, configuring of the adapter private structure,
3342  * and a hardware reset occur.
3343  **/
3344 static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3345 {
3346         struct net_device *netdev;
3347         struct ixgbevf_adapter *adapter = NULL;
3348         struct ixgbe_hw *hw = NULL;
3349         const struct ixgbevf_info *ii = ixgbevf_info_tbl[ent->driver_data];
3350         static int cards_found;
3351         int err, pci_using_dac;
3352
3353         err = pci_enable_device(pdev);
3354         if (err)
3355                 return err;
3356
3357         if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
3358                 pci_using_dac = 1;
3359         } else {
3360                 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
3361                 if (err) {
3362                         dev_err(&pdev->dev, "No usable DMA "
3363                                 "configuration, aborting\n");
3364                         goto err_dma;
3365                 }
3366                 pci_using_dac = 0;
3367         }
3368
3369         err = pci_request_regions(pdev, ixgbevf_driver_name);
3370         if (err) {
3371                 dev_err(&pdev->dev, "pci_request_regions failed 0x%x\n", err);
3372                 goto err_pci_reg;
3373         }
3374
3375         pci_set_master(pdev);
3376
3377         netdev = alloc_etherdev_mq(sizeof(struct ixgbevf_adapter),
3378                                    MAX_TX_QUEUES);
3379         if (!netdev) {
3380                 err = -ENOMEM;
3381                 goto err_alloc_etherdev;
3382         }
3383
3384         SET_NETDEV_DEV(netdev, &pdev->dev);
3385
3386         pci_set_drvdata(pdev, netdev);
3387         adapter = netdev_priv(netdev);
3388
3389         adapter->netdev = netdev;
3390         adapter->pdev = pdev;
3391         hw = &adapter->hw;
3392         hw->back = adapter;
3393         adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
3394
3395         /*
3396          * call save state here in standalone driver because it relies on
3397          * adapter struct to exist, and needs to call netdev_priv
3398          */
3399         pci_save_state(pdev);
3400
3401         hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
3402                               pci_resource_len(pdev, 0));
3403         if (!hw->hw_addr) {
3404                 err = -EIO;
3405                 goto err_ioremap;
3406         }
3407
3408         ixgbevf_assign_netdev_ops(netdev);
3409
3410         adapter->bd_number = cards_found;
3411
3412         /* Setup hw api */
3413         memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
3414         hw->mac.type  = ii->mac;
3415
3416         memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops,
3417                sizeof(struct ixgbe_mbx_operations));
3418
3419         /* setup the private structure */
3420         err = ixgbevf_sw_init(adapter);
3421         if (err)
3422                 goto err_sw_init;
3423
3424         /* The HW MAC address was set and/or determined in sw_init */
3425         if (!is_valid_ether_addr(netdev->dev_addr)) {
3426                 pr_err("invalid MAC address\n");
3427                 err = -EIO;
3428                 goto err_sw_init;
3429         }
3430
3431         netdev->hw_features = NETIF_F_SG |
3432                            NETIF_F_IP_CSUM |
3433                            NETIF_F_IPV6_CSUM |
3434                            NETIF_F_TSO |
3435                            NETIF_F_TSO6 |
3436                            NETIF_F_RXCSUM;
3437
3438         netdev->features = netdev->hw_features |
3439                            NETIF_F_HW_VLAN_CTAG_TX |
3440                            NETIF_F_HW_VLAN_CTAG_RX |
3441                            NETIF_F_HW_VLAN_CTAG_FILTER;
3442
3443         netdev->vlan_features |= NETIF_F_TSO;
3444         netdev->vlan_features |= NETIF_F_TSO6;
3445         netdev->vlan_features |= NETIF_F_IP_CSUM;
3446         netdev->vlan_features |= NETIF_F_IPV6_CSUM;
3447         netdev->vlan_features |= NETIF_F_SG;
3448
3449         if (pci_using_dac)
3450                 netdev->features |= NETIF_F_HIGHDMA;
3451
3452         netdev->priv_flags |= IFF_UNICAST_FLT;
3453
3454         init_timer(&adapter->watchdog_timer);
3455         adapter->watchdog_timer.function = ixgbevf_watchdog;
3456         adapter->watchdog_timer.data = (unsigned long)adapter;
3457
3458         INIT_WORK(&adapter->reset_task, ixgbevf_reset_task);
3459         INIT_WORK(&adapter->watchdog_task, ixgbevf_watchdog_task);
3460
3461         err = ixgbevf_init_interrupt_scheme(adapter);
3462         if (err)
3463                 goto err_sw_init;
3464
3465         strcpy(netdev->name, "eth%d");
3466
3467         err = register_netdev(netdev);
3468         if (err)
3469                 goto err_register;
3470
3471         netif_carrier_off(netdev);
3472
3473         ixgbevf_init_last_counter_stats(adapter);
3474
3475         /* print the MAC address */
3476         hw_dbg(hw, "%pM\n", netdev->dev_addr);
3477
3478         hw_dbg(hw, "MAC: %d\n", hw->mac.type);
3479
3480         hw_dbg(hw, "Intel(R) 82599 Virtual Function\n");
3481         cards_found++;
3482         return 0;
3483
3484 err_register:
3485         ixgbevf_clear_interrupt_scheme(adapter);
3486 err_sw_init:
3487         ixgbevf_reset_interrupt_capability(adapter);
3488         iounmap(hw->hw_addr);
3489 err_ioremap:
3490         free_netdev(netdev);
3491 err_alloc_etherdev:
3492         pci_release_regions(pdev);
3493 err_pci_reg:
3494 err_dma:
3495         pci_disable_device(pdev);
3496         return err;
3497 }
3498
3499 /**
3500  * ixgbevf_remove - Device Removal Routine
3501  * @pdev: PCI device information struct
3502  *
3503  * ixgbevf_remove is called by the PCI subsystem to alert the driver
3504  * that it should release a PCI device.  The could be caused by a
3505  * Hot-Plug event, or because the driver is going to be removed from
3506  * memory.
3507  **/
3508 static void ixgbevf_remove(struct pci_dev *pdev)
3509 {
3510         struct net_device *netdev = pci_get_drvdata(pdev);
3511         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3512
3513         set_bit(__IXGBEVF_DOWN, &adapter->state);
3514
3515         del_timer_sync(&adapter->watchdog_timer);
3516
3517         cancel_work_sync(&adapter->reset_task);
3518         cancel_work_sync(&adapter->watchdog_task);
3519
3520         if (netdev->reg_state == NETREG_REGISTERED)
3521                 unregister_netdev(netdev);
3522
3523         ixgbevf_clear_interrupt_scheme(adapter);
3524         ixgbevf_reset_interrupt_capability(adapter);
3525
3526         iounmap(adapter->hw.hw_addr);
3527         pci_release_regions(pdev);
3528
3529         hw_dbg(&adapter->hw, "Remove complete\n");
3530
3531         kfree(adapter->tx_ring);
3532         kfree(adapter->rx_ring);
3533
3534         free_netdev(netdev);
3535
3536         pci_disable_device(pdev);
3537 }
3538
3539 /**
3540  * ixgbevf_io_error_detected - called when PCI error is detected
3541  * @pdev: Pointer to PCI device
3542  * @state: The current pci connection state
3543  *
3544  * This function is called after a PCI bus error affecting
3545  * this device has been detected.
3546  */
3547 static pci_ers_result_t ixgbevf_io_error_detected(struct pci_dev *pdev,
3548                                                   pci_channel_state_t state)
3549 {
3550         struct net_device *netdev = pci_get_drvdata(pdev);
3551         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3552
3553         netif_device_detach(netdev);
3554
3555         if (state == pci_channel_io_perm_failure)
3556                 return PCI_ERS_RESULT_DISCONNECT;
3557
3558         if (netif_running(netdev))
3559                 ixgbevf_down(adapter);
3560
3561         pci_disable_device(pdev);
3562
3563         /* Request a slot slot reset. */
3564         return PCI_ERS_RESULT_NEED_RESET;
3565 }
3566
3567 /**
3568  * ixgbevf_io_slot_reset - called after the pci bus has been reset.
3569  * @pdev: Pointer to PCI device
3570  *
3571  * Restart the card from scratch, as if from a cold-boot. Implementation
3572  * resembles the first-half of the ixgbevf_resume routine.
3573  */
3574 static pci_ers_result_t ixgbevf_io_slot_reset(struct pci_dev *pdev)
3575 {
3576         struct net_device *netdev = pci_get_drvdata(pdev);
3577         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3578
3579         if (pci_enable_device_mem(pdev)) {
3580                 dev_err(&pdev->dev,
3581                         "Cannot re-enable PCI device after reset.\n");
3582                 return PCI_ERS_RESULT_DISCONNECT;
3583         }
3584
3585         pci_set_master(pdev);
3586
3587         ixgbevf_reset(adapter);
3588
3589         return PCI_ERS_RESULT_RECOVERED;
3590 }
3591
3592 /**
3593  * ixgbevf_io_resume - called when traffic can start flowing again.
3594  * @pdev: Pointer to PCI device
3595  *
3596  * This callback is called when the error recovery driver tells us that
3597  * its OK to resume normal operation. Implementation resembles the
3598  * second-half of the ixgbevf_resume routine.
3599  */
3600 static void ixgbevf_io_resume(struct pci_dev *pdev)
3601 {
3602         struct net_device *netdev = pci_get_drvdata(pdev);
3603         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3604
3605         if (netif_running(netdev))
3606                 ixgbevf_up(adapter);
3607
3608         netif_device_attach(netdev);
3609 }
3610
3611 /* PCI Error Recovery (ERS) */
3612 static const struct pci_error_handlers ixgbevf_err_handler = {
3613         .error_detected = ixgbevf_io_error_detected,
3614         .slot_reset = ixgbevf_io_slot_reset,
3615         .resume = ixgbevf_io_resume,
3616 };
3617
3618 static struct pci_driver ixgbevf_driver = {
3619         .name     = ixgbevf_driver_name,
3620         .id_table = ixgbevf_pci_tbl,
3621         .probe    = ixgbevf_probe,
3622         .remove   = ixgbevf_remove,
3623 #ifdef CONFIG_PM
3624         /* Power Management Hooks */
3625         .suspend  = ixgbevf_suspend,
3626         .resume   = ixgbevf_resume,
3627 #endif
3628         .shutdown = ixgbevf_shutdown,
3629         .err_handler = &ixgbevf_err_handler
3630 };
3631
3632 /**
3633  * ixgbevf_init_module - Driver Registration Routine
3634  *
3635  * ixgbevf_init_module is the first routine called when the driver is
3636  * loaded. All it does is register with the PCI subsystem.
3637  **/
3638 static int __init ixgbevf_init_module(void)
3639 {
3640         int ret;
3641         pr_info("%s - version %s\n", ixgbevf_driver_string,
3642                 ixgbevf_driver_version);
3643
3644         pr_info("%s\n", ixgbevf_copyright);
3645
3646         ret = pci_register_driver(&ixgbevf_driver);
3647         return ret;
3648 }
3649
3650 module_init(ixgbevf_init_module);
3651
3652 /**
3653  * ixgbevf_exit_module - Driver Exit Cleanup Routine
3654  *
3655  * ixgbevf_exit_module is called just before the driver is removed
3656  * from memory.
3657  **/
3658 static void __exit ixgbevf_exit_module(void)
3659 {
3660         pci_unregister_driver(&ixgbevf_driver);
3661 }
3662
3663 #ifdef DEBUG
3664 /**
3665  * ixgbevf_get_hw_dev_name - return device name string
3666  * used by hardware layer to print debugging information
3667  **/
3668 char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw)
3669 {
3670         struct ixgbevf_adapter *adapter = hw->back;
3671         return adapter->netdev->name;
3672 }
3673
3674 #endif
3675 module_exit(ixgbevf_exit_module);
3676
3677 /* ixgbevf_main.c */