1 /*******************************************************************************
3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007-2009 Intel Corporation.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 *******************************************************************************/
28 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/init.h>
31 #include <linux/vmalloc.h>
32 #include <linux/pagemap.h>
33 #include <linux/netdevice.h>
34 #include <linux/ipv6.h>
35 #include <net/checksum.h>
36 #include <net/ip6_checksum.h>
37 #include <linux/net_tstamp.h>
38 #include <linux/mii.h>
39 #include <linux/ethtool.h>
40 #include <linux/if_vlan.h>
41 #include <linux/pci.h>
42 #include <linux/pci-aspm.h>
43 #include <linux/delay.h>
44 #include <linux/interrupt.h>
45 #include <linux/if_ether.h>
46 #include <linux/aer.h>
48 #include <linux/dca.h>
52 #define DRV_VERSION "1.3.16-k2"
53 char igb_driver_name[] = "igb";
54 char igb_driver_version[] = DRV_VERSION;
55 static const char igb_driver_string[] =
56 "Intel(R) Gigabit Ethernet Network Driver";
57 static const char igb_copyright[] = "Copyright (c) 2007-2009 Intel Corporation.";
59 static const struct e1000_info *igb_info_tbl[] = {
60 [board_82575] = &e1000_82575_info,
63 static struct pci_device_id igb_pci_tbl[] = {
64 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 },
65 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS), board_82575 },
66 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 },
67 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 },
68 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER), board_82575 },
69 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 },
70 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 },
71 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER), board_82575 },
72 /* required last entry */
76 MODULE_DEVICE_TABLE(pci, igb_pci_tbl);
78 void igb_reset(struct igb_adapter *);
79 static int igb_setup_all_tx_resources(struct igb_adapter *);
80 static int igb_setup_all_rx_resources(struct igb_adapter *);
81 static void igb_free_all_tx_resources(struct igb_adapter *);
82 static void igb_free_all_rx_resources(struct igb_adapter *);
83 void igb_update_stats(struct igb_adapter *);
84 static int igb_probe(struct pci_dev *, const struct pci_device_id *);
85 static void __devexit igb_remove(struct pci_dev *pdev);
86 static int igb_sw_init(struct igb_adapter *);
87 static int igb_open(struct net_device *);
88 static int igb_close(struct net_device *);
89 static void igb_configure_tx(struct igb_adapter *);
90 static void igb_configure_rx(struct igb_adapter *);
91 static void igb_setup_rctl(struct igb_adapter *);
92 static void igb_clean_all_tx_rings(struct igb_adapter *);
93 static void igb_clean_all_rx_rings(struct igb_adapter *);
94 static void igb_clean_tx_ring(struct igb_ring *);
95 static void igb_clean_rx_ring(struct igb_ring *);
96 static void igb_set_multi(struct net_device *);
97 static void igb_update_phy_info(unsigned long);
98 static void igb_watchdog(unsigned long);
99 static void igb_watchdog_task(struct work_struct *);
100 static int igb_xmit_frame_ring_adv(struct sk_buff *, struct net_device *,
102 static int igb_xmit_frame_adv(struct sk_buff *skb, struct net_device *);
103 static struct net_device_stats *igb_get_stats(struct net_device *);
104 static int igb_change_mtu(struct net_device *, int);
105 static int igb_set_mac(struct net_device *, void *);
106 static irqreturn_t igb_intr(int irq, void *);
107 static irqreturn_t igb_intr_msi(int irq, void *);
108 static irqreturn_t igb_msix_other(int irq, void *);
109 static irqreturn_t igb_msix_rx(int irq, void *);
110 static irqreturn_t igb_msix_tx(int irq, void *);
111 #ifdef CONFIG_IGB_DCA
112 static void igb_update_rx_dca(struct igb_ring *);
113 static void igb_update_tx_dca(struct igb_ring *);
114 static void igb_setup_dca(struct igb_adapter *);
115 #endif /* CONFIG_IGB_DCA */
116 static bool igb_clean_tx_irq(struct igb_ring *);
117 static int igb_poll(struct napi_struct *, int);
118 static bool igb_clean_rx_irq_adv(struct igb_ring *, int *, int);
119 static void igb_alloc_rx_buffers_adv(struct igb_ring *, int);
120 static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
121 static void igb_tx_timeout(struct net_device *);
122 static void igb_reset_task(struct work_struct *);
123 static void igb_vlan_rx_register(struct net_device *, struct vlan_group *);
124 static void igb_vlan_rx_add_vid(struct net_device *, u16);
125 static void igb_vlan_rx_kill_vid(struct net_device *, u16);
126 static void igb_restore_vlan(struct igb_adapter *);
127 static void igb_ping_all_vfs(struct igb_adapter *);
128 static void igb_msg_task(struct igb_adapter *);
129 static int igb_rcv_msg_from_vf(struct igb_adapter *, u32);
130 static inline void igb_set_rah_pool(struct e1000_hw *, int , int);
131 static void igb_set_mc_list_pools(struct igb_adapter *, int, u16);
132 static void igb_vmm_control(struct igb_adapter *);
133 static inline void igb_set_vmolr(struct e1000_hw *, int);
134 static inline int igb_set_vf_rlpml(struct igb_adapter *, int, int);
135 static int igb_set_vf_mac(struct igb_adapter *adapter, int, unsigned char *);
136 static void igb_restore_vf_multicasts(struct igb_adapter *adapter);
138 static int igb_suspend(struct pci_dev *, pm_message_t);
140 static int igb_resume(struct pci_dev *);
142 static void igb_shutdown(struct pci_dev *);
143 #ifdef CONFIG_IGB_DCA
144 static int igb_notify_dca(struct notifier_block *, unsigned long, void *);
145 static struct notifier_block dca_notifier = {
146 .notifier_call = igb_notify_dca,
151 #ifdef CONFIG_NET_POLL_CONTROLLER
152 /* for netdump / net console */
153 static void igb_netpoll(struct net_device *);
156 #ifdef CONFIG_PCI_IOV
157 static ssize_t igb_set_num_vfs(struct device *, struct device_attribute *,
158 const char *, size_t);
159 static ssize_t igb_show_num_vfs(struct device *, struct device_attribute *,
161 DEVICE_ATTR(num_vfs, S_IRUGO | S_IWUSR, igb_show_num_vfs, igb_set_num_vfs);
163 static pci_ers_result_t igb_io_error_detected(struct pci_dev *,
164 pci_channel_state_t);
165 static pci_ers_result_t igb_io_slot_reset(struct pci_dev *);
166 static void igb_io_resume(struct pci_dev *);
168 static struct pci_error_handlers igb_err_handler = {
169 .error_detected = igb_io_error_detected,
170 .slot_reset = igb_io_slot_reset,
171 .resume = igb_io_resume,
175 static struct pci_driver igb_driver = {
176 .name = igb_driver_name,
177 .id_table = igb_pci_tbl,
179 .remove = __devexit_p(igb_remove),
181 /* Power Managment Hooks */
182 .suspend = igb_suspend,
183 .resume = igb_resume,
185 .shutdown = igb_shutdown,
186 .err_handler = &igb_err_handler
189 static int global_quad_port_a; /* global quad port a indication */
191 MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
192 MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
193 MODULE_LICENSE("GPL");
194 MODULE_VERSION(DRV_VERSION);
197 * Scale the NIC clock cycle by a large factor so that
198 * relatively small clock corrections can be added or
199 * substracted at each clock tick. The drawbacks of a
200 * large factor are a) that the clock register overflows
201 * more quickly (not such a big deal) and b) that the
202 * increment per tick has to fit into 24 bits.
205 * TIMINCA = IGB_TSYNC_CYCLE_TIME_IN_NANOSECONDS *
207 * TIMINCA += TIMINCA * adjustment [ppm] / 1e9
209 * The base scale factor is intentionally a power of two
210 * so that the division in %struct timecounter can be done with
213 #define IGB_TSYNC_SHIFT (19)
214 #define IGB_TSYNC_SCALE (1<<IGB_TSYNC_SHIFT)
217 * The duration of one clock cycle of the NIC.
219 * @todo This hard-coded value is part of the specification and might change
220 * in future hardware revisions. Add revision check.
222 #define IGB_TSYNC_CYCLE_TIME_IN_NANOSECONDS 16
224 #if (IGB_TSYNC_SCALE * IGB_TSYNC_CYCLE_TIME_IN_NANOSECONDS) >= (1<<24)
225 # error IGB_TSYNC_SCALE and/or IGB_TSYNC_CYCLE_TIME_IN_NANOSECONDS are too large to fit into TIMINCA
229 * igb_read_clock - read raw cycle counter (to be used by time counter)
231 static cycle_t igb_read_clock(const struct cyclecounter *tc)
233 struct igb_adapter *adapter =
234 container_of(tc, struct igb_adapter, cycles);
235 struct e1000_hw *hw = &adapter->hw;
238 stamp = rd32(E1000_SYSTIML);
239 stamp |= (u64)rd32(E1000_SYSTIMH) << 32ULL;
246 * igb_get_hw_dev_name - return device name string
247 * used by hardware layer to print debugging information
249 char *igb_get_hw_dev_name(struct e1000_hw *hw)
251 struct igb_adapter *adapter = hw->back;
252 return adapter->netdev->name;
256 * igb_get_time_str - format current NIC and system time as string
258 static char *igb_get_time_str(struct igb_adapter *adapter,
261 cycle_t hw = adapter->cycles.read(&adapter->cycles);
262 struct timespec nic = ns_to_timespec(timecounter_read(&adapter->clock));
264 struct timespec delta;
265 getnstimeofday(&sys);
267 delta = timespec_sub(nic, sys);
270 "HW %llu, NIC %ld.%09lus, SYS %ld.%09lus, NIC-SYS %lds + %09luns",
272 (long)nic.tv_sec, nic.tv_nsec,
273 (long)sys.tv_sec, sys.tv_nsec,
274 (long)delta.tv_sec, delta.tv_nsec);
281 * igb_desc_unused - calculate if we have unused descriptors
283 static int igb_desc_unused(struct igb_ring *ring)
285 if (ring->next_to_clean > ring->next_to_use)
286 return ring->next_to_clean - ring->next_to_use - 1;
288 return ring->count + ring->next_to_clean - ring->next_to_use - 1;
292 * igb_init_module - Driver Registration Routine
294 * igb_init_module is the first routine called when the driver is
295 * loaded. All it does is register with the PCI subsystem.
297 static int __init igb_init_module(void)
300 printk(KERN_INFO "%s - version %s\n",
301 igb_driver_string, igb_driver_version);
303 printk(KERN_INFO "%s\n", igb_copyright);
305 global_quad_port_a = 0;
307 #ifdef CONFIG_IGB_DCA
308 dca_register_notify(&dca_notifier);
311 ret = pci_register_driver(&igb_driver);
315 module_init(igb_init_module);
318 * igb_exit_module - Driver Exit Cleanup Routine
320 * igb_exit_module is called just before the driver is removed
323 static void __exit igb_exit_module(void)
325 #ifdef CONFIG_IGB_DCA
326 dca_unregister_notify(&dca_notifier);
328 pci_unregister_driver(&igb_driver);
331 module_exit(igb_exit_module);
333 #define Q_IDX_82576(i) (((i & 0x1) << 3) + (i >> 1))
335 * igb_cache_ring_register - Descriptor ring to register mapping
336 * @adapter: board private structure to initialize
338 * Once we know the feature-set enabled for the device, we'll cache
339 * the register offset the descriptor ring is assigned to.
341 static void igb_cache_ring_register(struct igb_adapter *adapter)
344 unsigned int rbase_offset = adapter->vfs_allocated_count;
346 switch (adapter->hw.mac.type) {
348 /* The queues are allocated for virtualization such that VF 0
349 * is allocated queues 0 and 8, VF 1 queues 1 and 9, etc.
350 * In order to avoid collision we start at the first free queue
351 * and continue consuming queues in the same sequence
353 for (i = 0; i < adapter->num_rx_queues; i++)
354 adapter->rx_ring[i].reg_idx = rbase_offset +
356 for (i = 0; i < adapter->num_tx_queues; i++)
357 adapter->tx_ring[i].reg_idx = rbase_offset +
362 for (i = 0; i < adapter->num_rx_queues; i++)
363 adapter->rx_ring[i].reg_idx = i;
364 for (i = 0; i < adapter->num_tx_queues; i++)
365 adapter->tx_ring[i].reg_idx = i;
371 * igb_alloc_queues - Allocate memory for all rings
372 * @adapter: board private structure to initialize
374 * We allocate one ring per queue at run-time since we don't know the
375 * number of queues at compile-time.
377 static int igb_alloc_queues(struct igb_adapter *adapter)
381 adapter->tx_ring = kcalloc(adapter->num_tx_queues,
382 sizeof(struct igb_ring), GFP_KERNEL);
383 if (!adapter->tx_ring)
386 adapter->rx_ring = kcalloc(adapter->num_rx_queues,
387 sizeof(struct igb_ring), GFP_KERNEL);
388 if (!adapter->rx_ring) {
389 kfree(adapter->tx_ring);
393 adapter->rx_ring->buddy = adapter->tx_ring;
395 for (i = 0; i < adapter->num_tx_queues; i++) {
396 struct igb_ring *ring = &(adapter->tx_ring[i]);
397 ring->count = adapter->tx_ring_count;
398 ring->adapter = adapter;
399 ring->queue_index = i;
401 for (i = 0; i < adapter->num_rx_queues; i++) {
402 struct igb_ring *ring = &(adapter->rx_ring[i]);
403 ring->count = adapter->rx_ring_count;
404 ring->adapter = adapter;
405 ring->queue_index = i;
406 ring->itr_register = E1000_ITR;
408 /* set a default napi handler for each rx_ring */
409 netif_napi_add(adapter->netdev, &ring->napi, igb_poll, 64);
412 igb_cache_ring_register(adapter);
416 static void igb_free_queues(struct igb_adapter *adapter)
420 for (i = 0; i < adapter->num_rx_queues; i++)
421 netif_napi_del(&adapter->rx_ring[i].napi);
423 kfree(adapter->tx_ring);
424 kfree(adapter->rx_ring);
427 #define IGB_N0_QUEUE -1
428 static void igb_assign_vector(struct igb_adapter *adapter, int rx_queue,
429 int tx_queue, int msix_vector)
432 struct e1000_hw *hw = &adapter->hw;
435 switch (hw->mac.type) {
437 /* The 82575 assigns vectors using a bitmask, which matches the
438 bitmask for the EICR/EIMS/EIMC registers. To assign one
439 or more queues to a vector, we write the appropriate bits
440 into the MSIXBM register for that vector. */
441 if (rx_queue > IGB_N0_QUEUE) {
442 msixbm = E1000_EICR_RX_QUEUE0 << rx_queue;
443 adapter->rx_ring[rx_queue].eims_value = msixbm;
445 if (tx_queue > IGB_N0_QUEUE) {
446 msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue;
447 adapter->tx_ring[tx_queue].eims_value =
448 E1000_EICR_TX_QUEUE0 << tx_queue;
450 array_wr32(E1000_MSIXBM(0), msix_vector, msixbm);
453 /* 82576 uses a table-based method for assigning vectors.
454 Each queue has a single entry in the table to which we write
455 a vector number along with a "valid" bit. Sadly, the layout
456 of the table is somewhat counterintuitive. */
457 if (rx_queue > IGB_N0_QUEUE) {
458 index = (rx_queue >> 1) + adapter->vfs_allocated_count;
459 ivar = array_rd32(E1000_IVAR0, index);
460 if (rx_queue & 0x1) {
461 /* vector goes into third byte of register */
462 ivar = ivar & 0xFF00FFFF;
463 ivar |= (msix_vector | E1000_IVAR_VALID) << 16;
465 /* vector goes into low byte of register */
466 ivar = ivar & 0xFFFFFF00;
467 ivar |= msix_vector | E1000_IVAR_VALID;
469 adapter->rx_ring[rx_queue].eims_value= 1 << msix_vector;
470 array_wr32(E1000_IVAR0, index, ivar);
472 if (tx_queue > IGB_N0_QUEUE) {
473 index = (tx_queue >> 1) + adapter->vfs_allocated_count;
474 ivar = array_rd32(E1000_IVAR0, index);
475 if (tx_queue & 0x1) {
476 /* vector goes into high byte of register */
477 ivar = ivar & 0x00FFFFFF;
478 ivar |= (msix_vector | E1000_IVAR_VALID) << 24;
480 /* vector goes into second byte of register */
481 ivar = ivar & 0xFFFF00FF;
482 ivar |= (msix_vector | E1000_IVAR_VALID) << 8;
484 adapter->tx_ring[tx_queue].eims_value= 1 << msix_vector;
485 array_wr32(E1000_IVAR0, index, ivar);
495 * igb_configure_msix - Configure MSI-X hardware
497 * igb_configure_msix sets up the hardware to properly
498 * generate MSI-X interrupts.
500 static void igb_configure_msix(struct igb_adapter *adapter)
504 struct e1000_hw *hw = &adapter->hw;
506 adapter->eims_enable_mask = 0;
507 if (hw->mac.type == e1000_82576)
508 /* Turn on MSI-X capability first, or our settings
509 * won't stick. And it will take days to debug. */
510 wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE |
511 E1000_GPIE_PBA | E1000_GPIE_EIAME |
514 for (i = 0; i < adapter->num_tx_queues; i++) {
515 struct igb_ring *tx_ring = &adapter->tx_ring[i];
516 igb_assign_vector(adapter, IGB_N0_QUEUE, i, vector++);
517 adapter->eims_enable_mask |= tx_ring->eims_value;
518 if (tx_ring->itr_val)
519 writel(tx_ring->itr_val,
520 hw->hw_addr + tx_ring->itr_register);
522 writel(1, hw->hw_addr + tx_ring->itr_register);
525 for (i = 0; i < adapter->num_rx_queues; i++) {
526 struct igb_ring *rx_ring = &adapter->rx_ring[i];
527 rx_ring->buddy = NULL;
528 igb_assign_vector(adapter, i, IGB_N0_QUEUE, vector++);
529 adapter->eims_enable_mask |= rx_ring->eims_value;
530 if (rx_ring->itr_val)
531 writel(rx_ring->itr_val,
532 hw->hw_addr + rx_ring->itr_register);
534 writel(1, hw->hw_addr + rx_ring->itr_register);
538 /* set vector for other causes, i.e. link changes */
539 switch (hw->mac.type) {
541 array_wr32(E1000_MSIXBM(0), vector++,
544 tmp = rd32(E1000_CTRL_EXT);
545 /* enable MSI-X PBA support*/
546 tmp |= E1000_CTRL_EXT_PBA_CLR;
548 /* Auto-Mask interrupts upon ICR read. */
549 tmp |= E1000_CTRL_EXT_EIAME;
550 tmp |= E1000_CTRL_EXT_IRCA;
552 wr32(E1000_CTRL_EXT, tmp);
553 adapter->eims_enable_mask |= E1000_EIMS_OTHER;
554 adapter->eims_other = E1000_EIMS_OTHER;
559 tmp = (vector++ | E1000_IVAR_VALID) << 8;
560 wr32(E1000_IVAR_MISC, tmp);
562 adapter->eims_enable_mask = (1 << (vector)) - 1;
563 adapter->eims_other = 1 << (vector - 1);
566 /* do nothing, since nothing else supports MSI-X */
568 } /* switch (hw->mac.type) */
573 * igb_request_msix - Initialize MSI-X interrupts
575 * igb_request_msix allocates MSI-X vectors and requests interrupts from the
578 static int igb_request_msix(struct igb_adapter *adapter)
580 struct net_device *netdev = adapter->netdev;
581 int i, err = 0, vector = 0;
585 for (i = 0; i < adapter->num_tx_queues; i++) {
586 struct igb_ring *ring = &(adapter->tx_ring[i]);
587 sprintf(ring->name, "%s-tx-%d", netdev->name, i);
588 err = request_irq(adapter->msix_entries[vector].vector,
589 &igb_msix_tx, 0, ring->name,
590 &(adapter->tx_ring[i]));
593 ring->itr_register = E1000_EITR(0) + (vector << 2);
594 ring->itr_val = 976; /* ~4000 ints/sec */
597 for (i = 0; i < adapter->num_rx_queues; i++) {
598 struct igb_ring *ring = &(adapter->rx_ring[i]);
599 if (strlen(netdev->name) < (IFNAMSIZ - 5))
600 sprintf(ring->name, "%s-rx-%d", netdev->name, i);
602 memcpy(ring->name, netdev->name, IFNAMSIZ);
603 err = request_irq(adapter->msix_entries[vector].vector,
604 &igb_msix_rx, 0, ring->name,
605 &(adapter->rx_ring[i]));
608 ring->itr_register = E1000_EITR(0) + (vector << 2);
609 ring->itr_val = adapter->itr;
613 err = request_irq(adapter->msix_entries[vector].vector,
614 &igb_msix_other, 0, netdev->name, netdev);
618 igb_configure_msix(adapter);
624 static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
626 if (adapter->msix_entries) {
627 pci_disable_msix(adapter->pdev);
628 kfree(adapter->msix_entries);
629 adapter->msix_entries = NULL;
630 } else if (adapter->flags & IGB_FLAG_HAS_MSI)
631 pci_disable_msi(adapter->pdev);
637 * igb_set_interrupt_capability - set MSI or MSI-X if supported
639 * Attempt to configure interrupts using the best available
640 * capabilities of the hardware and kernel.
642 static void igb_set_interrupt_capability(struct igb_adapter *adapter)
647 /* Number of supported queues. */
648 /* Having more queues than CPUs doesn't make sense. */
649 adapter->num_rx_queues = min_t(u32, IGB_MAX_RX_QUEUES, num_online_cpus());
650 adapter->num_tx_queues = min_t(u32, IGB_MAX_TX_QUEUES, num_online_cpus());
652 numvecs = adapter->num_tx_queues + adapter->num_rx_queues + 1;
653 adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry),
655 if (!adapter->msix_entries)
658 for (i = 0; i < numvecs; i++)
659 adapter->msix_entries[i].entry = i;
661 err = pci_enable_msix(adapter->pdev,
662 adapter->msix_entries,
667 igb_reset_interrupt_capability(adapter);
669 /* If we can't do MSI-X, try MSI */
671 adapter->num_rx_queues = 1;
672 adapter->num_tx_queues = 1;
673 if (!pci_enable_msi(adapter->pdev))
674 adapter->flags |= IGB_FLAG_HAS_MSI;
676 /* Notify the stack of the (possibly) reduced Tx Queue count. */
677 adapter->netdev->real_num_tx_queues = adapter->num_tx_queues;
682 * igb_request_irq - initialize interrupts
684 * Attempts to configure interrupts using the best available
685 * capabilities of the hardware and kernel.
687 static int igb_request_irq(struct igb_adapter *adapter)
689 struct net_device *netdev = adapter->netdev;
690 struct e1000_hw *hw = &adapter->hw;
693 if (adapter->msix_entries) {
694 err = igb_request_msix(adapter);
697 /* fall back to MSI */
698 igb_reset_interrupt_capability(adapter);
699 if (!pci_enable_msi(adapter->pdev))
700 adapter->flags |= IGB_FLAG_HAS_MSI;
701 igb_free_all_tx_resources(adapter);
702 igb_free_all_rx_resources(adapter);
703 adapter->num_rx_queues = 1;
704 igb_alloc_queues(adapter);
706 switch (hw->mac.type) {
708 wr32(E1000_MSIXBM(0),
709 (E1000_EICR_RX_QUEUE0 | E1000_EIMS_OTHER));
712 wr32(E1000_IVAR0, E1000_IVAR_VALID);
719 if (adapter->flags & IGB_FLAG_HAS_MSI) {
720 err = request_irq(adapter->pdev->irq, &igb_intr_msi, 0,
721 netdev->name, netdev);
724 /* fall back to legacy interrupts */
725 igb_reset_interrupt_capability(adapter);
726 adapter->flags &= ~IGB_FLAG_HAS_MSI;
729 err = request_irq(adapter->pdev->irq, &igb_intr, IRQF_SHARED,
730 netdev->name, netdev);
733 dev_err(&adapter->pdev->dev, "Error %d getting interrupt\n",
740 static void igb_free_irq(struct igb_adapter *adapter)
742 struct net_device *netdev = adapter->netdev;
744 if (adapter->msix_entries) {
747 for (i = 0; i < adapter->num_tx_queues; i++)
748 free_irq(adapter->msix_entries[vector++].vector,
749 &(adapter->tx_ring[i]));
750 for (i = 0; i < adapter->num_rx_queues; i++)
751 free_irq(adapter->msix_entries[vector++].vector,
752 &(adapter->rx_ring[i]));
754 free_irq(adapter->msix_entries[vector++].vector, netdev);
758 free_irq(adapter->pdev->irq, netdev);
762 * igb_irq_disable - Mask off interrupt generation on the NIC
763 * @adapter: board private structure
765 static void igb_irq_disable(struct igb_adapter *adapter)
767 struct e1000_hw *hw = &adapter->hw;
769 if (adapter->msix_entries) {
771 wr32(E1000_EIMC, ~0);
778 synchronize_irq(adapter->pdev->irq);
782 * igb_irq_enable - Enable default interrupt generation settings
783 * @adapter: board private structure
785 static void igb_irq_enable(struct igb_adapter *adapter)
787 struct e1000_hw *hw = &adapter->hw;
789 if (adapter->msix_entries) {
790 wr32(E1000_EIAC, adapter->eims_enable_mask);
791 wr32(E1000_EIAM, adapter->eims_enable_mask);
792 wr32(E1000_EIMS, adapter->eims_enable_mask);
793 if (adapter->vfs_allocated_count)
794 wr32(E1000_MBVFIMR, 0xFF);
795 wr32(E1000_IMS, (E1000_IMS_LSC | E1000_IMS_VMMB |
796 E1000_IMS_DOUTSYNC));
798 wr32(E1000_IMS, IMS_ENABLE_MASK);
799 wr32(E1000_IAM, IMS_ENABLE_MASK);
803 static void igb_update_mng_vlan(struct igb_adapter *adapter)
805 struct net_device *netdev = adapter->netdev;
806 u16 vid = adapter->hw.mng_cookie.vlan_id;
807 u16 old_vid = adapter->mng_vlan_id;
808 if (adapter->vlgrp) {
809 if (!vlan_group_get_device(adapter->vlgrp, vid)) {
810 if (adapter->hw.mng_cookie.status &
811 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
812 igb_vlan_rx_add_vid(netdev, vid);
813 adapter->mng_vlan_id = vid;
815 adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
817 if ((old_vid != (u16)IGB_MNG_VLAN_NONE) &&
819 !vlan_group_get_device(adapter->vlgrp, old_vid))
820 igb_vlan_rx_kill_vid(netdev, old_vid);
822 adapter->mng_vlan_id = vid;
827 * igb_release_hw_control - release control of the h/w to f/w
828 * @adapter: address of board private structure
830 * igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit.
831 * For ASF and Pass Through versions of f/w this means that the
832 * driver is no longer loaded.
835 static void igb_release_hw_control(struct igb_adapter *adapter)
837 struct e1000_hw *hw = &adapter->hw;
840 /* Let firmware take over control of h/w */
841 ctrl_ext = rd32(E1000_CTRL_EXT);
843 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
848 * igb_get_hw_control - get control of the h/w from f/w
849 * @adapter: address of board private structure
851 * igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit.
852 * For ASF and Pass Through versions of f/w this means that
853 * the driver is loaded.
856 static void igb_get_hw_control(struct igb_adapter *adapter)
858 struct e1000_hw *hw = &adapter->hw;
861 /* Let firmware know the driver has taken over */
862 ctrl_ext = rd32(E1000_CTRL_EXT);
864 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
868 * igb_configure - configure the hardware for RX and TX
869 * @adapter: private board structure
871 static void igb_configure(struct igb_adapter *adapter)
873 struct net_device *netdev = adapter->netdev;
876 igb_get_hw_control(adapter);
877 igb_set_multi(netdev);
879 igb_restore_vlan(adapter);
881 igb_configure_tx(adapter);
882 igb_setup_rctl(adapter);
883 igb_configure_rx(adapter);
885 igb_rx_fifo_flush_82575(&adapter->hw);
887 /* call igb_desc_unused which always leaves
888 * at least 1 descriptor unused to make sure
889 * next_to_use != next_to_clean */
890 for (i = 0; i < adapter->num_rx_queues; i++) {
891 struct igb_ring *ring = &adapter->rx_ring[i];
892 igb_alloc_rx_buffers_adv(ring, igb_desc_unused(ring));
896 adapter->tx_queue_len = netdev->tx_queue_len;
901 * igb_up - Open the interface and prepare it to handle traffic
902 * @adapter: board private structure
905 int igb_up(struct igb_adapter *adapter)
907 struct e1000_hw *hw = &adapter->hw;
910 /* hardware has been reset, we need to reload some things */
911 igb_configure(adapter);
913 clear_bit(__IGB_DOWN, &adapter->state);
915 for (i = 0; i < adapter->num_rx_queues; i++)
916 napi_enable(&adapter->rx_ring[i].napi);
917 if (adapter->msix_entries)
918 igb_configure_msix(adapter);
920 igb_vmm_control(adapter);
921 igb_set_rah_pool(hw, adapter->vfs_allocated_count, 0);
922 igb_set_vmolr(hw, adapter->vfs_allocated_count);
924 /* Clear any pending interrupts. */
926 igb_irq_enable(adapter);
928 /* Fire a link change interrupt to start the watchdog. */
929 wr32(E1000_ICS, E1000_ICS_LSC);
933 void igb_down(struct igb_adapter *adapter)
935 struct e1000_hw *hw = &adapter->hw;
936 struct net_device *netdev = adapter->netdev;
940 /* signal that we're down so the interrupt handler does not
941 * reschedule our watchdog timer */
942 set_bit(__IGB_DOWN, &adapter->state);
944 /* disable receives in the hardware */
945 rctl = rd32(E1000_RCTL);
946 wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN);
947 /* flush and sleep below */
949 netif_tx_stop_all_queues(netdev);
951 /* disable transmits in the hardware */
952 tctl = rd32(E1000_TCTL);
953 tctl &= ~E1000_TCTL_EN;
954 wr32(E1000_TCTL, tctl);
955 /* flush both disables and wait for them to finish */
959 for (i = 0; i < adapter->num_rx_queues; i++)
960 napi_disable(&adapter->rx_ring[i].napi);
962 igb_irq_disable(adapter);
964 del_timer_sync(&adapter->watchdog_timer);
965 del_timer_sync(&adapter->phy_info_timer);
967 netdev->tx_queue_len = adapter->tx_queue_len;
968 netif_carrier_off(netdev);
970 /* record the stats before reset*/
971 igb_update_stats(adapter);
973 adapter->link_speed = 0;
974 adapter->link_duplex = 0;
976 if (!pci_channel_offline(adapter->pdev))
978 igb_clean_all_tx_rings(adapter);
979 igb_clean_all_rx_rings(adapter);
982 void igb_reinit_locked(struct igb_adapter *adapter)
984 WARN_ON(in_interrupt());
985 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
989 clear_bit(__IGB_RESETTING, &adapter->state);
992 void igb_reset(struct igb_adapter *adapter)
994 struct e1000_hw *hw = &adapter->hw;
995 struct e1000_mac_info *mac = &hw->mac;
996 struct e1000_fc_info *fc = &hw->fc;
997 u32 pba = 0, tx_space, min_tx_space, min_rx_space;
1000 /* Repartition Pba for greater than 9k mtu
1001 * To take effect CTRL.RST is required.
1003 switch (mac->type) {
1005 pba = E1000_PBA_64K;
1009 pba = E1000_PBA_34K;
1013 if ((adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) &&
1014 (mac->type < e1000_82576)) {
1015 /* adjust PBA for jumbo frames */
1016 wr32(E1000_PBA, pba);
1018 /* To maintain wire speed transmits, the Tx FIFO should be
1019 * large enough to accommodate two full transmit packets,
1020 * rounded up to the next 1KB and expressed in KB. Likewise,
1021 * the Rx FIFO should be large enough to accommodate at least
1022 * one full receive packet and is similarly rounded up and
1023 * expressed in KB. */
1024 pba = rd32(E1000_PBA);
1025 /* upper 16 bits has Tx packet buffer allocation size in KB */
1026 tx_space = pba >> 16;
1027 /* lower 16 bits has Rx packet buffer allocation size in KB */
1029 /* the tx fifo also stores 16 bytes of information about the tx
1030 * but don't include ethernet FCS because hardware appends it */
1031 min_tx_space = (adapter->max_frame_size +
1032 sizeof(union e1000_adv_tx_desc) -
1034 min_tx_space = ALIGN(min_tx_space, 1024);
1035 min_tx_space >>= 10;
1036 /* software strips receive CRC, so leave room for it */
1037 min_rx_space = adapter->max_frame_size;
1038 min_rx_space = ALIGN(min_rx_space, 1024);
1039 min_rx_space >>= 10;
1041 /* If current Tx allocation is less than the min Tx FIFO size,
1042 * and the min Tx FIFO size is less than the current Rx FIFO
1043 * allocation, take space away from current Rx allocation */
1044 if (tx_space < min_tx_space &&
1045 ((min_tx_space - tx_space) < pba)) {
1046 pba = pba - (min_tx_space - tx_space);
1048 /* if short on rx space, rx wins and must trump tx
1050 if (pba < min_rx_space)
1053 wr32(E1000_PBA, pba);
1056 /* flow control settings */
1057 /* The high water mark must be low enough to fit one full frame
1058 * (or the size used for early receive) above it in the Rx FIFO.
1059 * Set it to the lower of:
1060 * - 90% of the Rx FIFO size, or
1061 * - the full Rx FIFO size minus one full frame */
1062 hwm = min(((pba << 10) * 9 / 10),
1063 ((pba << 10) - 2 * adapter->max_frame_size));
1065 if (mac->type < e1000_82576) {
1066 fc->high_water = hwm & 0xFFF8; /* 8-byte granularity */
1067 fc->low_water = fc->high_water - 8;
1069 fc->high_water = hwm & 0xFFF0; /* 16-byte granularity */
1070 fc->low_water = fc->high_water - 16;
1072 fc->pause_time = 0xFFFF;
1074 fc->type = fc->original_type;
1076 /* disable receive for all VFs and wait one second */
1077 if (adapter->vfs_allocated_count) {
1079 for (i = 0 ; i < adapter->vfs_allocated_count; i++)
1080 adapter->vf_data[i].clear_to_send = false;
1082 /* ping all the active vfs to let them know we are going down */
1083 igb_ping_all_vfs(adapter);
1085 /* disable transmits and receives */
1086 wr32(E1000_VFRE, 0);
1087 wr32(E1000_VFTE, 0);
1090 /* Allow time for pending master requests to run */
1091 adapter->hw.mac.ops.reset_hw(&adapter->hw);
1094 if (adapter->hw.mac.ops.init_hw(&adapter->hw))
1095 dev_err(&adapter->pdev->dev, "Hardware Error\n");
1097 igb_update_mng_vlan(adapter);
1099 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
1100 wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
1102 igb_reset_adaptive(&adapter->hw);
1103 igb_get_phy_info(&adapter->hw);
1106 static const struct net_device_ops igb_netdev_ops = {
1107 .ndo_open = igb_open,
1108 .ndo_stop = igb_close,
1109 .ndo_start_xmit = igb_xmit_frame_adv,
1110 .ndo_get_stats = igb_get_stats,
1111 .ndo_set_multicast_list = igb_set_multi,
1112 .ndo_set_mac_address = igb_set_mac,
1113 .ndo_change_mtu = igb_change_mtu,
1114 .ndo_do_ioctl = igb_ioctl,
1115 .ndo_tx_timeout = igb_tx_timeout,
1116 .ndo_validate_addr = eth_validate_addr,
1117 .ndo_vlan_rx_register = igb_vlan_rx_register,
1118 .ndo_vlan_rx_add_vid = igb_vlan_rx_add_vid,
1119 .ndo_vlan_rx_kill_vid = igb_vlan_rx_kill_vid,
1120 #ifdef CONFIG_NET_POLL_CONTROLLER
1121 .ndo_poll_controller = igb_netpoll,
1126 * igb_probe - Device Initialization Routine
1127 * @pdev: PCI device information struct
1128 * @ent: entry in igb_pci_tbl
1130 * Returns 0 on success, negative on failure
1132 * igb_probe initializes an adapter identified by a pci_dev structure.
1133 * The OS initialization, configuring of the adapter private structure,
1134 * and a hardware reset occur.
1136 static int __devinit igb_probe(struct pci_dev *pdev,
1137 const struct pci_device_id *ent)
1139 struct net_device *netdev;
1140 struct igb_adapter *adapter;
1141 struct e1000_hw *hw;
1142 const struct e1000_info *ei = igb_info_tbl[ent->driver_data];
1143 unsigned long mmio_start, mmio_len;
1144 int err, pci_using_dac;
1145 u16 eeprom_data = 0;
1146 u16 eeprom_apme_mask = IGB_EEPROM_APME;
1149 err = pci_enable_device_mem(pdev);
1154 err = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
1156 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
1160 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
1162 err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
1164 dev_err(&pdev->dev, "No usable DMA "
1165 "configuration, aborting\n");
1171 err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
1177 err = pci_enable_pcie_error_reporting(pdev);
1179 dev_err(&pdev->dev, "pci_enable_pcie_error_reporting failed "
1181 /* non-fatal, continue */
1184 pci_set_master(pdev);
1185 pci_save_state(pdev);
1188 netdev = alloc_etherdev_mq(sizeof(struct igb_adapter),
1189 IGB_ABS_MAX_TX_QUEUES);
1191 goto err_alloc_etherdev;
1193 SET_NETDEV_DEV(netdev, &pdev->dev);
1195 pci_set_drvdata(pdev, netdev);
1196 adapter = netdev_priv(netdev);
1197 adapter->netdev = netdev;
1198 adapter->pdev = pdev;
1201 adapter->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE;
1203 mmio_start = pci_resource_start(pdev, 0);
1204 mmio_len = pci_resource_len(pdev, 0);
1207 hw->hw_addr = ioremap(mmio_start, mmio_len);
1211 netdev->netdev_ops = &igb_netdev_ops;
1212 igb_set_ethtool_ops(netdev);
1213 netdev->watchdog_timeo = 5 * HZ;
1215 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
1217 netdev->mem_start = mmio_start;
1218 netdev->mem_end = mmio_start + mmio_len;
1220 /* PCI config space info */
1221 hw->vendor_id = pdev->vendor;
1222 hw->device_id = pdev->device;
1223 hw->revision_id = pdev->revision;
1224 hw->subsystem_vendor_id = pdev->subsystem_vendor;
1225 hw->subsystem_device_id = pdev->subsystem_device;
1227 /* setup the private structure */
1229 /* Copy the default MAC, PHY and NVM function pointers */
1230 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
1231 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
1232 memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
1233 /* Initialize skew-specific constants */
1234 err = ei->get_invariants(hw);
1238 /* setup the private structure */
1239 err = igb_sw_init(adapter);
1243 igb_get_bus_info_pcie(hw);
1246 switch (hw->mac.type) {
1248 adapter->flags |= IGB_FLAG_NEED_CTX_IDX;
1255 hw->phy.autoneg_wait_to_complete = false;
1256 hw->mac.adaptive_ifs = true;
1258 /* Copper options */
1259 if (hw->phy.media_type == e1000_media_type_copper) {
1260 hw->phy.mdix = AUTO_ALL_MODES;
1261 hw->phy.disable_polarity_correction = false;
1262 hw->phy.ms_type = e1000_ms_hw_default;
1265 if (igb_check_reset_block(hw))
1266 dev_info(&pdev->dev,
1267 "PHY reset is blocked due to SOL/IDER session.\n");
1269 netdev->features = NETIF_F_SG |
1271 NETIF_F_HW_VLAN_TX |
1272 NETIF_F_HW_VLAN_RX |
1273 NETIF_F_HW_VLAN_FILTER;
1275 netdev->features |= NETIF_F_IPV6_CSUM;
1276 netdev->features |= NETIF_F_TSO;
1277 netdev->features |= NETIF_F_TSO6;
1279 netdev->features |= NETIF_F_GRO;
1281 netdev->vlan_features |= NETIF_F_TSO;
1282 netdev->vlan_features |= NETIF_F_TSO6;
1283 netdev->vlan_features |= NETIF_F_IP_CSUM;
1284 netdev->vlan_features |= NETIF_F_SG;
1287 netdev->features |= NETIF_F_HIGHDMA;
1289 adapter->en_mng_pt = igb_enable_mng_pass_thru(&adapter->hw);
1291 /* before reading the NVM, reset the controller to put the device in a
1292 * known good starting state */
1293 hw->mac.ops.reset_hw(hw);
1295 /* make sure the NVM is good */
1296 if (igb_validate_nvm_checksum(hw) < 0) {
1297 dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
1302 /* copy the MAC address out of the NVM */
1303 if (hw->mac.ops.read_mac_addr(hw))
1304 dev_err(&pdev->dev, "NVM Read Error\n");
1306 memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
1307 memcpy(netdev->perm_addr, hw->mac.addr, netdev->addr_len);
1309 if (!is_valid_ether_addr(netdev->perm_addr)) {
1310 dev_err(&pdev->dev, "Invalid MAC Address\n");
1315 init_timer(&adapter->watchdog_timer);
1316 adapter->watchdog_timer.function = &igb_watchdog;
1317 adapter->watchdog_timer.data = (unsigned long) adapter;
1319 init_timer(&adapter->phy_info_timer);
1320 adapter->phy_info_timer.function = &igb_update_phy_info;
1321 adapter->phy_info_timer.data = (unsigned long) adapter;
1323 INIT_WORK(&adapter->reset_task, igb_reset_task);
1324 INIT_WORK(&adapter->watchdog_task, igb_watchdog_task);
1326 /* Initialize link properties that are user-changeable */
1327 adapter->fc_autoneg = true;
1328 hw->mac.autoneg = true;
1329 hw->phy.autoneg_advertised = 0x2f;
1331 hw->fc.original_type = e1000_fc_default;
1332 hw->fc.type = e1000_fc_default;
1334 adapter->itr_setting = IGB_DEFAULT_ITR;
1335 adapter->itr = IGB_START_ITR;
1337 igb_validate_mdi_setting(hw);
1339 adapter->rx_csum = 1;
1341 /* Initial Wake on LAN setting If APM wake is enabled in the EEPROM,
1342 * enable the ACPI Magic Packet filter
1345 if (hw->bus.func == 0)
1346 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
1347 else if (hw->bus.func == 1)
1348 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
1350 if (eeprom_data & eeprom_apme_mask)
1351 adapter->eeprom_wol |= E1000_WUFC_MAG;
1353 /* now that we have the eeprom settings, apply the special cases where
1354 * the eeprom may be wrong or the board simply won't support wake on
1355 * lan on a particular port */
1356 switch (pdev->device) {
1357 case E1000_DEV_ID_82575GB_QUAD_COPPER:
1358 adapter->eeprom_wol = 0;
1360 case E1000_DEV_ID_82575EB_FIBER_SERDES:
1361 case E1000_DEV_ID_82576_FIBER:
1362 case E1000_DEV_ID_82576_SERDES:
1363 /* Wake events only supported on port A for dual fiber
1364 * regardless of eeprom setting */
1365 if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1)
1366 adapter->eeprom_wol = 0;
1368 case E1000_DEV_ID_82576_QUAD_COPPER:
1369 /* if quad port adapter, disable WoL on all but port A */
1370 if (global_quad_port_a != 0)
1371 adapter->eeprom_wol = 0;
1373 adapter->flags |= IGB_FLAG_QUAD_PORT_A;
1374 /* Reset for multiple quad port adapters */
1375 if (++global_quad_port_a == 4)
1376 global_quad_port_a = 0;
1380 /* initialize the wol settings based on the eeprom settings */
1381 adapter->wol = adapter->eeprom_wol;
1382 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
1384 /* reset the hardware with the new settings */
1387 /* let the f/w know that the h/w is now under the control of the
1389 igb_get_hw_control(adapter);
1391 /* tell the stack to leave us alone until igb_open() is called */
1392 netif_carrier_off(netdev);
1393 netif_tx_stop_all_queues(netdev);
1395 strcpy(netdev->name, "eth%d");
1396 err = register_netdev(netdev);
1400 #ifdef CONFIG_PCI_IOV
1401 /* since iov functionality isn't critical to base device function we
1402 * can accept failure. If it fails we don't allow iov to be enabled */
1403 if (hw->mac.type == e1000_82576) {
1404 err = pci_enable_sriov(pdev, 0);
1406 err = device_create_file(&netdev->dev,
1409 dev_err(&pdev->dev, "Failed to initialize IOV\n");
1413 #ifdef CONFIG_IGB_DCA
1414 if (dca_add_requester(&pdev->dev) == 0) {
1415 adapter->flags |= IGB_FLAG_DCA_ENABLED;
1416 dev_info(&pdev->dev, "DCA enabled\n");
1417 /* Always use CB2 mode, difference is masked
1418 * in the CB driver. */
1419 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2);
1420 igb_setup_dca(adapter);
1425 * Initialize hardware timer: we keep it running just in case
1426 * that some program needs it later on.
1428 memset(&adapter->cycles, 0, sizeof(adapter->cycles));
1429 adapter->cycles.read = igb_read_clock;
1430 adapter->cycles.mask = CLOCKSOURCE_MASK(64);
1431 adapter->cycles.mult = 1;
1432 adapter->cycles.shift = IGB_TSYNC_SHIFT;
1435 IGB_TSYNC_CYCLE_TIME_IN_NANOSECONDS * IGB_TSYNC_SCALE);
1438 * Avoid rollover while we initialize by resetting the time counter.
1440 wr32(E1000_SYSTIML, 0x00000000);
1441 wr32(E1000_SYSTIMH, 0x00000000);
1444 * Set registers so that rollover occurs soon to test this.
1446 wr32(E1000_SYSTIML, 0x00000000);
1447 wr32(E1000_SYSTIMH, 0xFF800000);
1450 timecounter_init(&adapter->clock,
1452 ktime_to_ns(ktime_get_real()));
1455 * Synchronize our NIC clock against system wall clock. NIC
1456 * time stamp reading requires ~3us per sample, each sample
1457 * was pretty stable even under load => only require 10
1458 * samples for each offset comparison.
1460 memset(&adapter->compare, 0, sizeof(adapter->compare));
1461 adapter->compare.source = &adapter->clock;
1462 adapter->compare.target = ktime_get_real;
1463 adapter->compare.num_samples = 10;
1464 timecompare_update(&adapter->compare, 0);
1470 "igb: %s: hw %p initialized timer\n",
1471 igb_get_time_str(adapter, buffer),
1476 dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n");
1477 /* print bus type/speed/width info */
1478 dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n",
1480 ((hw->bus.speed == e1000_bus_speed_2500)
1481 ? "2.5Gb/s" : "unknown"),
1482 ((hw->bus.width == e1000_bus_width_pcie_x4)
1483 ? "Width x4" : (hw->bus.width == e1000_bus_width_pcie_x1)
1484 ? "Width x1" : "unknown"),
1487 igb_read_part_num(hw, &part_num);
1488 dev_info(&pdev->dev, "%s: PBA No: %06x-%03x\n", netdev->name,
1489 (part_num >> 8), (part_num & 0xff));
1491 dev_info(&pdev->dev,
1492 "Using %s interrupts. %d rx queue(s), %d tx queue(s)\n",
1493 adapter->msix_entries ? "MSI-X" :
1494 (adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy",
1495 adapter->num_rx_queues, adapter->num_tx_queues);
1500 igb_release_hw_control(adapter);
1502 if (!igb_check_reset_block(hw))
1505 if (hw->flash_address)
1506 iounmap(hw->flash_address);
1508 igb_free_queues(adapter);
1510 iounmap(hw->hw_addr);
1512 free_netdev(netdev);
1514 pci_release_selected_regions(pdev, pci_select_bars(pdev,
1518 pci_disable_device(pdev);
1523 * igb_remove - Device Removal Routine
1524 * @pdev: PCI device information struct
1526 * igb_remove is called by the PCI subsystem to alert the driver
1527 * that it should release a PCI device. The could be caused by a
1528 * Hot-Plug event, or because the driver is going to be removed from
1531 static void __devexit igb_remove(struct pci_dev *pdev)
1533 struct net_device *netdev = pci_get_drvdata(pdev);
1534 struct igb_adapter *adapter = netdev_priv(netdev);
1535 struct e1000_hw *hw = &adapter->hw;
1538 /* flush_scheduled work may reschedule our watchdog task, so
1539 * explicitly disable watchdog tasks from being rescheduled */
1540 set_bit(__IGB_DOWN, &adapter->state);
1541 del_timer_sync(&adapter->watchdog_timer);
1542 del_timer_sync(&adapter->phy_info_timer);
1544 flush_scheduled_work();
1546 #ifdef CONFIG_IGB_DCA
1547 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
1548 dev_info(&pdev->dev, "DCA disabled\n");
1549 dca_remove_requester(&pdev->dev);
1550 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
1551 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
1555 /* Release control of h/w to f/w. If f/w is AMT enabled, this
1556 * would have already happened in close and is redundant. */
1557 igb_release_hw_control(adapter);
1559 unregister_netdev(netdev);
1561 if (!igb_check_reset_block(&adapter->hw))
1562 igb_reset_phy(&adapter->hw);
1564 igb_reset_interrupt_capability(adapter);
1566 igb_free_queues(adapter);
1568 #ifdef CONFIG_PCI_IOV
1569 /* reclaim resources allocated to VFs */
1570 if (adapter->vf_data) {
1571 /* disable iov and allow time for transactions to clear */
1572 pci_disable_sriov(pdev);
1575 kfree(adapter->vf_data);
1576 adapter->vf_data = NULL;
1577 wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
1579 dev_info(&pdev->dev, "IOV Disabled\n");
1582 iounmap(hw->hw_addr);
1583 if (hw->flash_address)
1584 iounmap(hw->flash_address);
1585 pci_release_selected_regions(pdev, pci_select_bars(pdev,
1588 free_netdev(netdev);
1590 err = pci_disable_pcie_error_reporting(pdev);
1593 "pci_disable_pcie_error_reporting failed 0x%x\n", err);
1595 pci_disable_device(pdev);
1599 * igb_sw_init - Initialize general software structures (struct igb_adapter)
1600 * @adapter: board private structure to initialize
1602 * igb_sw_init initializes the Adapter private data structure.
1603 * Fields are initialized based on PCI device information and
1604 * OS network device settings (MTU size).
1606 static int __devinit igb_sw_init(struct igb_adapter *adapter)
1608 struct e1000_hw *hw = &adapter->hw;
1609 struct net_device *netdev = adapter->netdev;
1610 struct pci_dev *pdev = adapter->pdev;
1612 pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
1614 adapter->tx_ring_count = IGB_DEFAULT_TXD;
1615 adapter->rx_ring_count = IGB_DEFAULT_RXD;
1616 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
1617 adapter->rx_ps_hdr_size = 0; /* disable packet split */
1618 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1619 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
1621 /* This call may decrease the number of queues depending on
1622 * interrupt mode. */
1623 igb_set_interrupt_capability(adapter);
1625 if (igb_alloc_queues(adapter)) {
1626 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
1630 /* Explicitly disable IRQ since the NIC can be in any state. */
1631 igb_irq_disable(adapter);
1633 set_bit(__IGB_DOWN, &adapter->state);
1638 * igb_open - Called when a network interface is made active
1639 * @netdev: network interface device structure
1641 * Returns 0 on success, negative value on failure
1643 * The open entry point is called when a network interface is made
1644 * active by the system (IFF_UP). At this point all resources needed
1645 * for transmit and receive operations are allocated, the interrupt
1646 * handler is registered with the OS, the watchdog timer is started,
1647 * and the stack is notified that the interface is ready.
1649 static int igb_open(struct net_device *netdev)
1651 struct igb_adapter *adapter = netdev_priv(netdev);
1652 struct e1000_hw *hw = &adapter->hw;
1656 /* disallow open during test */
1657 if (test_bit(__IGB_TESTING, &adapter->state))
1660 /* allocate transmit descriptors */
1661 err = igb_setup_all_tx_resources(adapter);
1665 /* allocate receive descriptors */
1666 err = igb_setup_all_rx_resources(adapter);
1670 /* e1000_power_up_phy(adapter); */
1672 adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
1673 if ((adapter->hw.mng_cookie.status &
1674 E1000_MNG_DHCP_COOKIE_STATUS_VLAN))
1675 igb_update_mng_vlan(adapter);
1677 /* before we allocate an interrupt, we must be ready to handle it.
1678 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
1679 * as soon as we call pci_request_irq, so we have to setup our
1680 * clean_rx handler before we do so. */
1681 igb_configure(adapter);
1683 igb_vmm_control(adapter);
1684 igb_set_rah_pool(hw, adapter->vfs_allocated_count, 0);
1685 igb_set_vmolr(hw, adapter->vfs_allocated_count);
1687 err = igb_request_irq(adapter);
1691 /* From here on the code is the same as igb_up() */
1692 clear_bit(__IGB_DOWN, &adapter->state);
1694 for (i = 0; i < adapter->num_rx_queues; i++)
1695 napi_enable(&adapter->rx_ring[i].napi);
1697 /* Clear any pending interrupts. */
1700 igb_irq_enable(adapter);
1702 netif_tx_start_all_queues(netdev);
1704 /* Fire a link status change interrupt to start the watchdog. */
1705 wr32(E1000_ICS, E1000_ICS_LSC);
1710 igb_release_hw_control(adapter);
1711 /* e1000_power_down_phy(adapter); */
1712 igb_free_all_rx_resources(adapter);
1714 igb_free_all_tx_resources(adapter);
1722 * igb_close - Disables a network interface
1723 * @netdev: network interface device structure
1725 * Returns 0, this is not allowed to fail
1727 * The close entry point is called when an interface is de-activated
1728 * by the OS. The hardware is still under the driver's control, but
1729 * needs to be disabled. A global MAC reset is issued to stop the
1730 * hardware, and all transmit and receive resources are freed.
1732 static int igb_close(struct net_device *netdev)
1734 struct igb_adapter *adapter = netdev_priv(netdev);
1736 WARN_ON(test_bit(__IGB_RESETTING, &adapter->state));
1739 igb_free_irq(adapter);
1741 igb_free_all_tx_resources(adapter);
1742 igb_free_all_rx_resources(adapter);
1744 /* kill manageability vlan ID if supported, but not if a vlan with
1745 * the same ID is registered on the host OS (let 8021q kill it) */
1746 if ((adapter->hw.mng_cookie.status &
1747 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
1749 vlan_group_get_device(adapter->vlgrp, adapter->mng_vlan_id)))
1750 igb_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
1756 * igb_setup_tx_resources - allocate Tx resources (Descriptors)
1757 * @adapter: board private structure
1758 * @tx_ring: tx descriptor ring (for a specific queue) to setup
1760 * Return 0 on success, negative on failure
1762 int igb_setup_tx_resources(struct igb_adapter *adapter,
1763 struct igb_ring *tx_ring)
1765 struct pci_dev *pdev = adapter->pdev;
1768 size = sizeof(struct igb_buffer) * tx_ring->count;
1769 tx_ring->buffer_info = vmalloc(size);
1770 if (!tx_ring->buffer_info)
1772 memset(tx_ring->buffer_info, 0, size);
1774 /* round up to nearest 4K */
1775 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
1776 tx_ring->size = ALIGN(tx_ring->size, 4096);
1778 tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size,
1784 tx_ring->adapter = adapter;
1785 tx_ring->next_to_use = 0;
1786 tx_ring->next_to_clean = 0;
1790 vfree(tx_ring->buffer_info);
1791 dev_err(&adapter->pdev->dev,
1792 "Unable to allocate memory for the transmit descriptor ring\n");
1797 * igb_setup_all_tx_resources - wrapper to allocate Tx resources
1798 * (Descriptors) for all queues
1799 * @adapter: board private structure
1801 * Return 0 on success, negative on failure
1803 static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
1808 for (i = 0; i < adapter->num_tx_queues; i++) {
1809 err = igb_setup_tx_resources(adapter, &adapter->tx_ring[i]);
1811 dev_err(&adapter->pdev->dev,
1812 "Allocation for Tx Queue %u failed\n", i);
1813 for (i--; i >= 0; i--)
1814 igb_free_tx_resources(&adapter->tx_ring[i]);
1819 for (i = 0; i < IGB_MAX_TX_QUEUES; i++) {
1820 r_idx = i % adapter->num_tx_queues;
1821 adapter->multi_tx_table[i] = &adapter->tx_ring[r_idx];
1827 * igb_configure_tx - Configure transmit Unit after Reset
1828 * @adapter: board private structure
1830 * Configure the Tx unit of the MAC after a reset.
1832 static void igb_configure_tx(struct igb_adapter *adapter)
1835 struct e1000_hw *hw = &adapter->hw;
1840 for (i = 0; i < adapter->num_tx_queues; i++) {
1841 struct igb_ring *ring = &adapter->tx_ring[i];
1843 wr32(E1000_TDLEN(j),
1844 ring->count * sizeof(union e1000_adv_tx_desc));
1846 wr32(E1000_TDBAL(j),
1847 tdba & 0x00000000ffffffffULL);
1848 wr32(E1000_TDBAH(j), tdba >> 32);
1850 ring->head = E1000_TDH(j);
1851 ring->tail = E1000_TDT(j);
1852 writel(0, hw->hw_addr + ring->tail);
1853 writel(0, hw->hw_addr + ring->head);
1854 txdctl = rd32(E1000_TXDCTL(j));
1855 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
1856 wr32(E1000_TXDCTL(j), txdctl);
1858 /* Turn off Relaxed Ordering on head write-backs. The
1859 * writebacks MUST be delivered in order or it will
1860 * completely screw up our bookeeping.
1862 txctrl = rd32(E1000_DCA_TXCTRL(j));
1863 txctrl &= ~E1000_DCA_TXCTRL_TX_WB_RO_EN;
1864 wr32(E1000_DCA_TXCTRL(j), txctrl);
1867 /* disable queue 0 to prevent tail bump w/o re-configuration */
1868 if (adapter->vfs_allocated_count)
1869 wr32(E1000_TXDCTL(0), 0);
1871 /* Program the Transmit Control Register */
1872 tctl = rd32(E1000_TCTL);
1873 tctl &= ~E1000_TCTL_CT;
1874 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
1875 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
1877 igb_config_collision_dist(hw);
1879 /* Setup Transmit Descriptor Settings for eop descriptor */
1880 adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS;
1882 /* Enable transmits */
1883 tctl |= E1000_TCTL_EN;
1885 wr32(E1000_TCTL, tctl);
1889 * igb_setup_rx_resources - allocate Rx resources (Descriptors)
1890 * @adapter: board private structure
1891 * @rx_ring: rx descriptor ring (for a specific queue) to setup
1893 * Returns 0 on success, negative on failure
1895 int igb_setup_rx_resources(struct igb_adapter *adapter,
1896 struct igb_ring *rx_ring)
1898 struct pci_dev *pdev = adapter->pdev;
1901 size = sizeof(struct igb_buffer) * rx_ring->count;
1902 rx_ring->buffer_info = vmalloc(size);
1903 if (!rx_ring->buffer_info)
1905 memset(rx_ring->buffer_info, 0, size);
1907 desc_len = sizeof(union e1000_adv_rx_desc);
1909 /* Round up to nearest 4K */
1910 rx_ring->size = rx_ring->count * desc_len;
1911 rx_ring->size = ALIGN(rx_ring->size, 4096);
1913 rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size,
1919 rx_ring->next_to_clean = 0;
1920 rx_ring->next_to_use = 0;
1922 rx_ring->adapter = adapter;
1927 vfree(rx_ring->buffer_info);
1928 dev_err(&adapter->pdev->dev, "Unable to allocate memory for "
1929 "the receive descriptor ring\n");
1934 * igb_setup_all_rx_resources - wrapper to allocate Rx resources
1935 * (Descriptors) for all queues
1936 * @adapter: board private structure
1938 * Return 0 on success, negative on failure
1940 static int igb_setup_all_rx_resources(struct igb_adapter *adapter)
1944 for (i = 0; i < adapter->num_rx_queues; i++) {
1945 err = igb_setup_rx_resources(adapter, &adapter->rx_ring[i]);
1947 dev_err(&adapter->pdev->dev,
1948 "Allocation for Rx Queue %u failed\n", i);
1949 for (i--; i >= 0; i--)
1950 igb_free_rx_resources(&adapter->rx_ring[i]);
1959 * igb_setup_rctl - configure the receive control registers
1960 * @adapter: Board private structure
1962 static void igb_setup_rctl(struct igb_adapter *adapter)
1964 struct e1000_hw *hw = &adapter->hw;
1969 rctl = rd32(E1000_RCTL);
1971 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
1972 rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
1974 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF |
1975 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
1978 * enable stripping of CRC. It's unlikely this will break BMC
1979 * redirection as it did with e1000. Newer features require
1980 * that the HW strips the CRC.
1982 rctl |= E1000_RCTL_SECRC;
1985 * disable store bad packets and clear size bits.
1987 rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_SZ_256);
1989 /* enable LPE when to prevent packets larger than max_frame_size */
1990 rctl |= E1000_RCTL_LPE;
1992 /* Setup buffer sizes */
1993 switch (adapter->rx_buffer_len) {
1994 case IGB_RXBUFFER_256:
1995 rctl |= E1000_RCTL_SZ_256;
1997 case IGB_RXBUFFER_512:
1998 rctl |= E1000_RCTL_SZ_512;
2001 srrctl = ALIGN(adapter->rx_buffer_len, 1024)
2002 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
2006 /* 82575 and greater support packet-split where the protocol
2007 * header is placed in skb->data and the packet data is
2008 * placed in pages hanging off of skb_shinfo(skb)->nr_frags.
2009 * In the case of a non-split, skb->data is linearly filled,
2010 * followed by the page buffers. Therefore, skb->data is
2011 * sized to hold the largest protocol header.
2013 /* allocations using alloc_page take too long for regular MTU
2014 * so only enable packet split for jumbo frames */
2015 if (adapter->netdev->mtu > ETH_DATA_LEN) {
2016 adapter->rx_ps_hdr_size = IGB_RXBUFFER_128;
2017 srrctl |= adapter->rx_ps_hdr_size <<
2018 E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
2019 srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
2021 adapter->rx_ps_hdr_size = 0;
2022 srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
2025 /* Attention!!! For SR-IOV PF driver operations you must enable
2026 * queue drop for all VF and PF queues to prevent head of line blocking
2027 * if an un-trusted VF does not provide descriptors to hardware.
2029 if (adapter->vfs_allocated_count) {
2032 j = adapter->rx_ring[0].reg_idx;
2034 /* set all queue drop enable bits */
2035 wr32(E1000_QDE, ALL_QUEUES);
2036 srrctl |= E1000_SRRCTL_DROP_EN;
2038 /* disable queue 0 to prevent tail write w/o re-config */
2039 wr32(E1000_RXDCTL(0), 0);
2041 vmolr = rd32(E1000_VMOLR(j));
2042 if (rctl & E1000_RCTL_LPE)
2043 vmolr |= E1000_VMOLR_LPE;
2044 if (adapter->num_rx_queues > 0)
2045 vmolr |= E1000_VMOLR_RSSE;
2046 wr32(E1000_VMOLR(j), vmolr);
2049 for (i = 0; i < adapter->num_rx_queues; i++) {
2050 j = adapter->rx_ring[i].reg_idx;
2051 wr32(E1000_SRRCTL(j), srrctl);
2054 wr32(E1000_RCTL, rctl);
2058 * igb_rlpml_set - set maximum receive packet size
2059 * @adapter: board private structure
2061 * Configure maximum receivable packet size.
2063 static void igb_rlpml_set(struct igb_adapter *adapter)
2065 u32 max_frame_size = adapter->max_frame_size;
2066 struct e1000_hw *hw = &adapter->hw;
2067 u16 pf_id = adapter->vfs_allocated_count;
2070 max_frame_size += VLAN_TAG_SIZE;
2072 /* if vfs are enabled we set RLPML to the largest possible request
2073 * size and set the VMOLR RLPML to the size we need */
2075 igb_set_vf_rlpml(adapter, max_frame_size, pf_id);
2076 max_frame_size = MAX_STD_JUMBO_FRAME_SIZE + VLAN_TAG_SIZE;
2079 wr32(E1000_RLPML, max_frame_size);
2083 * igb_configure_vt_default_pool - Configure VT default pool
2084 * @adapter: board private structure
2086 * Configure the default pool
2088 static void igb_configure_vt_default_pool(struct igb_adapter *adapter)
2090 struct e1000_hw *hw = &adapter->hw;
2091 u16 pf_id = adapter->vfs_allocated_count;
2094 /* not in sr-iov mode - do nothing */
2098 vtctl = rd32(E1000_VT_CTL);
2099 vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK |
2100 E1000_VT_CTL_DISABLE_DEF_POOL);
2101 vtctl |= pf_id << E1000_VT_CTL_DEFAULT_POOL_SHIFT;
2102 wr32(E1000_VT_CTL, vtctl);
2106 * igb_configure_rx - Configure receive Unit after Reset
2107 * @adapter: board private structure
2109 * Configure the Rx unit of the MAC after a reset.
2111 static void igb_configure_rx(struct igb_adapter *adapter)
2114 struct e1000_hw *hw = &adapter->hw;
2119 /* disable receives while setting up the descriptors */
2120 rctl = rd32(E1000_RCTL);
2121 wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN);
2125 if (adapter->itr_setting > 3)
2126 wr32(E1000_ITR, adapter->itr);
2128 /* Setup the HW Rx Head and Tail Descriptor Pointers and
2129 * the Base and Length of the Rx Descriptor Ring */
2130 for (i = 0; i < adapter->num_rx_queues; i++) {
2131 struct igb_ring *ring = &adapter->rx_ring[i];
2132 int j = ring->reg_idx;
2134 wr32(E1000_RDBAL(j),
2135 rdba & 0x00000000ffffffffULL);
2136 wr32(E1000_RDBAH(j), rdba >> 32);
2137 wr32(E1000_RDLEN(j),
2138 ring->count * sizeof(union e1000_adv_rx_desc));
2140 ring->head = E1000_RDH(j);
2141 ring->tail = E1000_RDT(j);
2142 writel(0, hw->hw_addr + ring->tail);
2143 writel(0, hw->hw_addr + ring->head);
2145 rxdctl = rd32(E1000_RXDCTL(j));
2146 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
2147 rxdctl &= 0xFFF00000;
2148 rxdctl |= IGB_RX_PTHRESH;
2149 rxdctl |= IGB_RX_HTHRESH << 8;
2150 rxdctl |= IGB_RX_WTHRESH << 16;
2151 wr32(E1000_RXDCTL(j), rxdctl);
2154 if (adapter->num_rx_queues > 1) {
2163 get_random_bytes(&random[0], 40);
2165 if (hw->mac.type >= e1000_82576)
2169 for (j = 0; j < (32 * 4); j++) {
2171 adapter->rx_ring[(j % adapter->num_rx_queues)].reg_idx << shift;
2174 hw->hw_addr + E1000_RETA(0) + (j & ~3));
2176 if (adapter->vfs_allocated_count)
2177 mrqc = E1000_MRQC_ENABLE_VMDQ_RSS_2Q;
2179 mrqc = E1000_MRQC_ENABLE_RSS_4Q;
2181 /* Fill out hash function seeds */
2182 for (j = 0; j < 10; j++)
2183 array_wr32(E1000_RSSRK(0), j, random[j]);
2185 mrqc |= (E1000_MRQC_RSS_FIELD_IPV4 |
2186 E1000_MRQC_RSS_FIELD_IPV4_TCP);
2187 mrqc |= (E1000_MRQC_RSS_FIELD_IPV6 |
2188 E1000_MRQC_RSS_FIELD_IPV6_TCP);
2189 mrqc |= (E1000_MRQC_RSS_FIELD_IPV4_UDP |
2190 E1000_MRQC_RSS_FIELD_IPV6_UDP);
2191 mrqc |= (E1000_MRQC_RSS_FIELD_IPV6_UDP_EX |
2192 E1000_MRQC_RSS_FIELD_IPV6_TCP_EX);
2195 wr32(E1000_MRQC, mrqc);
2197 /* Multiqueue and raw packet checksumming are mutually
2198 * exclusive. Note that this not the same as TCP/IP
2199 * checksumming, which works fine. */
2200 rxcsum = rd32(E1000_RXCSUM);
2201 rxcsum |= E1000_RXCSUM_PCSD;
2202 wr32(E1000_RXCSUM, rxcsum);
2204 /* Enable multi-queue for sr-iov */
2205 if (adapter->vfs_allocated_count)
2206 wr32(E1000_MRQC, E1000_MRQC_ENABLE_VMDQ);
2207 /* Enable Receive Checksum Offload for TCP and UDP */
2208 rxcsum = rd32(E1000_RXCSUM);
2209 if (adapter->rx_csum)
2210 rxcsum |= E1000_RXCSUM_TUOFL | E1000_RXCSUM_IPPCSE;
2212 rxcsum &= ~(E1000_RXCSUM_TUOFL | E1000_RXCSUM_IPPCSE);
2214 wr32(E1000_RXCSUM, rxcsum);
2217 /* Set the default pool for the PF's first queue */
2218 igb_configure_vt_default_pool(adapter);
2220 igb_rlpml_set(adapter);
2222 /* Enable Receives */
2223 wr32(E1000_RCTL, rctl);
2227 * igb_free_tx_resources - Free Tx Resources per Queue
2228 * @tx_ring: Tx descriptor ring for a specific queue
2230 * Free all transmit software resources
2232 void igb_free_tx_resources(struct igb_ring *tx_ring)
2234 struct pci_dev *pdev = tx_ring->adapter->pdev;
2236 igb_clean_tx_ring(tx_ring);
2238 vfree(tx_ring->buffer_info);
2239 tx_ring->buffer_info = NULL;
2241 pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma);
2243 tx_ring->desc = NULL;
2247 * igb_free_all_tx_resources - Free Tx Resources for All Queues
2248 * @adapter: board private structure
2250 * Free all transmit software resources
2252 static void igb_free_all_tx_resources(struct igb_adapter *adapter)
2256 for (i = 0; i < adapter->num_tx_queues; i++)
2257 igb_free_tx_resources(&adapter->tx_ring[i]);
2260 static void igb_unmap_and_free_tx_resource(struct igb_adapter *adapter,
2261 struct igb_buffer *buffer_info)
2263 if (buffer_info->dma) {
2264 pci_unmap_page(adapter->pdev,
2266 buffer_info->length,
2268 buffer_info->dma = 0;
2270 if (buffer_info->skb) {
2271 dev_kfree_skb_any(buffer_info->skb);
2272 buffer_info->skb = NULL;
2274 buffer_info->time_stamp = 0;
2275 buffer_info->next_to_watch = 0;
2276 /* buffer_info must be completely set up in the transmit path */
2280 * igb_clean_tx_ring - Free Tx Buffers
2281 * @tx_ring: ring to be cleaned
2283 static void igb_clean_tx_ring(struct igb_ring *tx_ring)
2285 struct igb_adapter *adapter = tx_ring->adapter;
2286 struct igb_buffer *buffer_info;
2290 if (!tx_ring->buffer_info)
2292 /* Free all the Tx ring sk_buffs */
2294 for (i = 0; i < tx_ring->count; i++) {
2295 buffer_info = &tx_ring->buffer_info[i];
2296 igb_unmap_and_free_tx_resource(adapter, buffer_info);
2299 size = sizeof(struct igb_buffer) * tx_ring->count;
2300 memset(tx_ring->buffer_info, 0, size);
2302 /* Zero out the descriptor ring */
2304 memset(tx_ring->desc, 0, tx_ring->size);
2306 tx_ring->next_to_use = 0;
2307 tx_ring->next_to_clean = 0;
2309 writel(0, adapter->hw.hw_addr + tx_ring->head);
2310 writel(0, adapter->hw.hw_addr + tx_ring->tail);
2314 * igb_clean_all_tx_rings - Free Tx Buffers for all queues
2315 * @adapter: board private structure
2317 static void igb_clean_all_tx_rings(struct igb_adapter *adapter)
2321 for (i = 0; i < adapter->num_tx_queues; i++)
2322 igb_clean_tx_ring(&adapter->tx_ring[i]);
2326 * igb_free_rx_resources - Free Rx Resources
2327 * @rx_ring: ring to clean the resources from
2329 * Free all receive software resources
2331 void igb_free_rx_resources(struct igb_ring *rx_ring)
2333 struct pci_dev *pdev = rx_ring->adapter->pdev;
2335 igb_clean_rx_ring(rx_ring);
2337 vfree(rx_ring->buffer_info);
2338 rx_ring->buffer_info = NULL;
2340 pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
2342 rx_ring->desc = NULL;
2346 * igb_free_all_rx_resources - Free Rx Resources for All Queues
2347 * @adapter: board private structure
2349 * Free all receive software resources
2351 static void igb_free_all_rx_resources(struct igb_adapter *adapter)
2355 for (i = 0; i < adapter->num_rx_queues; i++)
2356 igb_free_rx_resources(&adapter->rx_ring[i]);
2360 * igb_clean_rx_ring - Free Rx Buffers per Queue
2361 * @rx_ring: ring to free buffers from
2363 static void igb_clean_rx_ring(struct igb_ring *rx_ring)
2365 struct igb_adapter *adapter = rx_ring->adapter;
2366 struct igb_buffer *buffer_info;
2367 struct pci_dev *pdev = adapter->pdev;
2371 if (!rx_ring->buffer_info)
2373 /* Free all the Rx ring sk_buffs */
2374 for (i = 0; i < rx_ring->count; i++) {
2375 buffer_info = &rx_ring->buffer_info[i];
2376 if (buffer_info->dma) {
2377 if (adapter->rx_ps_hdr_size)
2378 pci_unmap_single(pdev, buffer_info->dma,
2379 adapter->rx_ps_hdr_size,
2380 PCI_DMA_FROMDEVICE);
2382 pci_unmap_single(pdev, buffer_info->dma,
2383 adapter->rx_buffer_len,
2384 PCI_DMA_FROMDEVICE);
2385 buffer_info->dma = 0;
2388 if (buffer_info->skb) {
2389 dev_kfree_skb(buffer_info->skb);
2390 buffer_info->skb = NULL;
2392 if (buffer_info->page) {
2393 if (buffer_info->page_dma)
2394 pci_unmap_page(pdev, buffer_info->page_dma,
2396 PCI_DMA_FROMDEVICE);
2397 put_page(buffer_info->page);
2398 buffer_info->page = NULL;
2399 buffer_info->page_dma = 0;
2400 buffer_info->page_offset = 0;
2404 size = sizeof(struct igb_buffer) * rx_ring->count;
2405 memset(rx_ring->buffer_info, 0, size);
2407 /* Zero out the descriptor ring */
2408 memset(rx_ring->desc, 0, rx_ring->size);
2410 rx_ring->next_to_clean = 0;
2411 rx_ring->next_to_use = 0;
2413 writel(0, adapter->hw.hw_addr + rx_ring->head);
2414 writel(0, adapter->hw.hw_addr + rx_ring->tail);
2418 * igb_clean_all_rx_rings - Free Rx Buffers for all queues
2419 * @adapter: board private structure
2421 static void igb_clean_all_rx_rings(struct igb_adapter *adapter)
2425 for (i = 0; i < adapter->num_rx_queues; i++)
2426 igb_clean_rx_ring(&adapter->rx_ring[i]);
2430 * igb_set_mac - Change the Ethernet Address of the NIC
2431 * @netdev: network interface device structure
2432 * @p: pointer to an address structure
2434 * Returns 0 on success, negative on failure
2436 static int igb_set_mac(struct net_device *netdev, void *p)
2438 struct igb_adapter *adapter = netdev_priv(netdev);
2439 struct e1000_hw *hw = &adapter->hw;
2440 struct sockaddr *addr = p;
2442 if (!is_valid_ether_addr(addr->sa_data))
2443 return -EADDRNOTAVAIL;
2445 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2446 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
2448 hw->mac.ops.rar_set(hw, hw->mac.addr, 0);
2450 igb_set_rah_pool(hw, adapter->vfs_allocated_count, 0);
2456 * igb_set_multi - Multicast and Promiscuous mode set
2457 * @netdev: network interface device structure
2459 * The set_multi entry point is called whenever the multicast address
2460 * list or the network interface flags are updated. This routine is
2461 * responsible for configuring the hardware for proper multicast,
2462 * promiscuous mode, and all-multi behavior.
2464 static void igb_set_multi(struct net_device *netdev)
2466 struct igb_adapter *adapter = netdev_priv(netdev);
2467 struct e1000_hw *hw = &adapter->hw;
2468 struct e1000_mac_info *mac = &hw->mac;
2469 struct dev_mc_list *mc_ptr;
2474 /* Check for Promiscuous and All Multicast modes */
2476 rctl = rd32(E1000_RCTL);
2478 if (netdev->flags & IFF_PROMISC) {
2479 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
2480 rctl &= ~E1000_RCTL_VFE;
2482 if (netdev->flags & IFF_ALLMULTI) {
2483 rctl |= E1000_RCTL_MPE;
2484 rctl &= ~E1000_RCTL_UPE;
2486 rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE);
2487 rctl |= E1000_RCTL_VFE;
2489 wr32(E1000_RCTL, rctl);
2491 if (!netdev->mc_count) {
2492 /* nothing to program, so clear mc list */
2493 igb_update_mc_addr_list(hw, NULL, 0, 1,
2494 mac->rar_entry_count);
2498 mta_list = kzalloc(netdev->mc_count * 6, GFP_ATOMIC);
2502 /* The shared function expects a packed array of only addresses. */
2503 mc_ptr = netdev->mc_list;
2505 for (i = 0; i < netdev->mc_count; i++) {
2508 memcpy(mta_list + (i*ETH_ALEN), mc_ptr->dmi_addr, ETH_ALEN);
2509 mc_ptr = mc_ptr->next;
2511 igb_update_mc_addr_list(hw, mta_list, i,
2512 adapter->vfs_allocated_count + 1,
2513 mac->rar_entry_count);
2515 igb_set_mc_list_pools(adapter, i, mac->rar_entry_count);
2516 igb_restore_vf_multicasts(adapter);
2521 /* Need to wait a few seconds after link up to get diagnostic information from
2523 static void igb_update_phy_info(unsigned long data)
2525 struct igb_adapter *adapter = (struct igb_adapter *) data;
2526 igb_get_phy_info(&adapter->hw);
2530 * igb_has_link - check shared code for link and determine up/down
2531 * @adapter: pointer to driver private info
2533 static bool igb_has_link(struct igb_adapter *adapter)
2535 struct e1000_hw *hw = &adapter->hw;
2536 bool link_active = false;
2539 /* get_link_status is set on LSC (link status) interrupt or
2540 * rx sequence error interrupt. get_link_status will stay
2541 * false until the e1000_check_for_link establishes link
2542 * for copper adapters ONLY
2544 switch (hw->phy.media_type) {
2545 case e1000_media_type_copper:
2546 if (hw->mac.get_link_status) {
2547 ret_val = hw->mac.ops.check_for_link(hw);
2548 link_active = !hw->mac.get_link_status;
2553 case e1000_media_type_fiber:
2554 ret_val = hw->mac.ops.check_for_link(hw);
2555 link_active = !!(rd32(E1000_STATUS) & E1000_STATUS_LU);
2557 case e1000_media_type_internal_serdes:
2558 ret_val = hw->mac.ops.check_for_link(hw);
2559 link_active = hw->mac.serdes_has_link;
2562 case e1000_media_type_unknown:
2570 * igb_watchdog - Timer Call-back
2571 * @data: pointer to adapter cast into an unsigned long
2573 static void igb_watchdog(unsigned long data)
2575 struct igb_adapter *adapter = (struct igb_adapter *)data;
2576 /* Do the rest outside of interrupt context */
2577 schedule_work(&adapter->watchdog_task);
2580 static void igb_watchdog_task(struct work_struct *work)
2582 struct igb_adapter *adapter = container_of(work,
2583 struct igb_adapter, watchdog_task);
2584 struct e1000_hw *hw = &adapter->hw;
2585 struct net_device *netdev = adapter->netdev;
2586 struct igb_ring *tx_ring = adapter->tx_ring;
2591 link = igb_has_link(adapter);
2592 if ((netif_carrier_ok(netdev)) && link)
2596 if (!netif_carrier_ok(netdev)) {
2598 hw->mac.ops.get_speed_and_duplex(&adapter->hw,
2599 &adapter->link_speed,
2600 &adapter->link_duplex);
2602 ctrl = rd32(E1000_CTRL);
2603 /* Links status message must follow this format */
2604 printk(KERN_INFO "igb: %s NIC Link is Up %d Mbps %s, "
2605 "Flow Control: %s\n",
2607 adapter->link_speed,
2608 adapter->link_duplex == FULL_DUPLEX ?
2609 "Full Duplex" : "Half Duplex",
2610 ((ctrl & E1000_CTRL_TFCE) && (ctrl &
2611 E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl &
2612 E1000_CTRL_RFCE) ? "RX" : ((ctrl &
2613 E1000_CTRL_TFCE) ? "TX" : "None")));
2615 /* tweak tx_queue_len according to speed/duplex and
2616 * adjust the timeout factor */
2617 netdev->tx_queue_len = adapter->tx_queue_len;
2618 adapter->tx_timeout_factor = 1;
2619 switch (adapter->link_speed) {
2621 netdev->tx_queue_len = 10;
2622 adapter->tx_timeout_factor = 14;
2625 netdev->tx_queue_len = 100;
2626 /* maybe add some timeout factor ? */
2630 netif_carrier_on(netdev);
2631 netif_tx_wake_all_queues(netdev);
2633 igb_ping_all_vfs(adapter);
2635 /* link state has changed, schedule phy info update */
2636 if (!test_bit(__IGB_DOWN, &adapter->state))
2637 mod_timer(&adapter->phy_info_timer,
2638 round_jiffies(jiffies + 2 * HZ));
2641 if (netif_carrier_ok(netdev)) {
2642 adapter->link_speed = 0;
2643 adapter->link_duplex = 0;
2644 /* Links status message must follow this format */
2645 printk(KERN_INFO "igb: %s NIC Link is Down\n",
2647 netif_carrier_off(netdev);
2648 netif_tx_stop_all_queues(netdev);
2650 igb_ping_all_vfs(adapter);
2652 /* link state has changed, schedule phy info update */
2653 if (!test_bit(__IGB_DOWN, &adapter->state))
2654 mod_timer(&adapter->phy_info_timer,
2655 round_jiffies(jiffies + 2 * HZ));
2660 igb_update_stats(adapter);
2662 hw->mac.tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
2663 adapter->tpt_old = adapter->stats.tpt;
2664 hw->mac.collision_delta = adapter->stats.colc - adapter->colc_old;
2665 adapter->colc_old = adapter->stats.colc;
2667 adapter->gorc = adapter->stats.gorc - adapter->gorc_old;
2668 adapter->gorc_old = adapter->stats.gorc;
2669 adapter->gotc = adapter->stats.gotc - adapter->gotc_old;
2670 adapter->gotc_old = adapter->stats.gotc;
2672 igb_update_adaptive(&adapter->hw);
2674 if (!netif_carrier_ok(netdev)) {
2675 if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) {
2676 /* We've lost link, so the controller stops DMA,
2677 * but we've got queued Tx work that's never going
2678 * to get done, so reset controller to flush Tx.
2679 * (Do the reset outside of interrupt context). */
2680 adapter->tx_timeout_count++;
2681 schedule_work(&adapter->reset_task);
2685 /* Cause software interrupt to ensure rx ring is cleaned */
2686 if (adapter->msix_entries) {
2687 for (i = 0; i < adapter->num_rx_queues; i++)
2688 eics |= adapter->rx_ring[i].eims_value;
2689 wr32(E1000_EICS, eics);
2691 wr32(E1000_ICS, E1000_ICS_RXDMT0);
2694 /* Force detection of hung controller every watchdog period */
2695 tx_ring->detect_tx_hung = true;
2697 /* Reset the timer */
2698 if (!test_bit(__IGB_DOWN, &adapter->state))
2699 mod_timer(&adapter->watchdog_timer,
2700 round_jiffies(jiffies + 2 * HZ));
2703 enum latency_range {
2707 latency_invalid = 255
2712 * igb_update_ring_itr - update the dynamic ITR value based on packet size
2714 * Stores a new ITR value based on strictly on packet size. This
2715 * algorithm is less sophisticated than that used in igb_update_itr,
2716 * due to the difficulty of synchronizing statistics across multiple
2717 * receive rings. The divisors and thresholds used by this fuction
2718 * were determined based on theoretical maximum wire speed and testing
2719 * data, in order to minimize response time while increasing bulk
2721 * This functionality is controlled by the InterruptThrottleRate module
2722 * parameter (see igb_param.c)
2723 * NOTE: This function is called only when operating in a multiqueue
2724 * receive environment.
2725 * @rx_ring: pointer to ring
2727 static void igb_update_ring_itr(struct igb_ring *rx_ring)
2729 int new_val = rx_ring->itr_val;
2730 int avg_wire_size = 0;
2731 struct igb_adapter *adapter = rx_ring->adapter;
2733 if (!rx_ring->total_packets)
2734 goto clear_counts; /* no packets, so don't do anything */
2736 /* For non-gigabit speeds, just fix the interrupt rate at 4000
2737 * ints/sec - ITR timer value of 120 ticks.
2739 if (adapter->link_speed != SPEED_1000) {
2743 avg_wire_size = rx_ring->total_bytes / rx_ring->total_packets;
2745 /* Add 24 bytes to size to account for CRC, preamble, and gap */
2746 avg_wire_size += 24;
2748 /* Don't starve jumbo frames */
2749 avg_wire_size = min(avg_wire_size, 3000);
2751 /* Give a little boost to mid-size frames */
2752 if ((avg_wire_size > 300) && (avg_wire_size < 1200))
2753 new_val = avg_wire_size / 3;
2755 new_val = avg_wire_size / 2;
2758 if (new_val != rx_ring->itr_val) {
2759 rx_ring->itr_val = new_val;
2760 rx_ring->set_itr = 1;
2763 rx_ring->total_bytes = 0;
2764 rx_ring->total_packets = 0;
2768 * igb_update_itr - update the dynamic ITR value based on statistics
2769 * Stores a new ITR value based on packets and byte
2770 * counts during the last interrupt. The advantage of per interrupt
2771 * computation is faster updates and more accurate ITR for the current
2772 * traffic pattern. Constants in this function were computed
2773 * based on theoretical maximum wire speed and thresholds were set based
2774 * on testing data as well as attempting to minimize response time
2775 * while increasing bulk throughput.
2776 * this functionality is controlled by the InterruptThrottleRate module
2777 * parameter (see igb_param.c)
2778 * NOTE: These calculations are only valid when operating in a single-
2779 * queue environment.
2780 * @adapter: pointer to adapter
2781 * @itr_setting: current adapter->itr
2782 * @packets: the number of packets during this measurement interval
2783 * @bytes: the number of bytes during this measurement interval
2785 static unsigned int igb_update_itr(struct igb_adapter *adapter, u16 itr_setting,
2786 int packets, int bytes)
2788 unsigned int retval = itr_setting;
2791 goto update_itr_done;
2793 switch (itr_setting) {
2794 case lowest_latency:
2795 /* handle TSO and jumbo frames */
2796 if (bytes/packets > 8000)
2797 retval = bulk_latency;
2798 else if ((packets < 5) && (bytes > 512))
2799 retval = low_latency;
2801 case low_latency: /* 50 usec aka 20000 ints/s */
2802 if (bytes > 10000) {
2803 /* this if handles the TSO accounting */
2804 if (bytes/packets > 8000) {
2805 retval = bulk_latency;
2806 } else if ((packets < 10) || ((bytes/packets) > 1200)) {
2807 retval = bulk_latency;
2808 } else if ((packets > 35)) {
2809 retval = lowest_latency;
2811 } else if (bytes/packets > 2000) {
2812 retval = bulk_latency;
2813 } else if (packets <= 2 && bytes < 512) {
2814 retval = lowest_latency;
2817 case bulk_latency: /* 250 usec aka 4000 ints/s */
2818 if (bytes > 25000) {
2820 retval = low_latency;
2821 } else if (bytes < 1500) {
2822 retval = low_latency;
2831 static void igb_set_itr(struct igb_adapter *adapter)
2834 u32 new_itr = adapter->itr;
2836 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
2837 if (adapter->link_speed != SPEED_1000) {
2843 adapter->rx_itr = igb_update_itr(adapter,
2845 adapter->rx_ring->total_packets,
2846 adapter->rx_ring->total_bytes);
2848 if (adapter->rx_ring->buddy) {
2849 adapter->tx_itr = igb_update_itr(adapter,
2851 adapter->tx_ring->total_packets,
2852 adapter->tx_ring->total_bytes);
2853 current_itr = max(adapter->rx_itr, adapter->tx_itr);
2855 current_itr = adapter->rx_itr;
2858 /* conservative mode (itr 3) eliminates the lowest_latency setting */
2859 if (adapter->itr_setting == 3 && current_itr == lowest_latency)
2860 current_itr = low_latency;
2862 switch (current_itr) {
2863 /* counts and packets in update_itr are dependent on these numbers */
2864 case lowest_latency:
2868 new_itr = 20000; /* aka hwitr = ~200 */
2878 adapter->rx_ring->total_bytes = 0;
2879 adapter->rx_ring->total_packets = 0;
2880 if (adapter->rx_ring->buddy) {
2881 adapter->rx_ring->buddy->total_bytes = 0;
2882 adapter->rx_ring->buddy->total_packets = 0;
2885 if (new_itr != adapter->itr) {
2886 /* this attempts to bias the interrupt rate towards Bulk
2887 * by adding intermediate steps when interrupt rate is
2889 new_itr = new_itr > adapter->itr ?
2890 min(adapter->itr + (new_itr >> 2), new_itr) :
2892 /* Don't write the value here; it resets the adapter's
2893 * internal timer, and causes us to delay far longer than
2894 * we should between interrupts. Instead, we write the ITR
2895 * value at the beginning of the next interrupt so the timing
2896 * ends up being correct.
2898 adapter->itr = new_itr;
2899 adapter->rx_ring->itr_val = 1000000000 / (new_itr * 256);
2900 adapter->rx_ring->set_itr = 1;
2907 #define IGB_TX_FLAGS_CSUM 0x00000001
2908 #define IGB_TX_FLAGS_VLAN 0x00000002
2909 #define IGB_TX_FLAGS_TSO 0x00000004
2910 #define IGB_TX_FLAGS_IPV4 0x00000008
2911 #define IGB_TX_FLAGS_TSTAMP 0x00000010
2912 #define IGB_TX_FLAGS_VLAN_MASK 0xffff0000
2913 #define IGB_TX_FLAGS_VLAN_SHIFT 16
2915 static inline int igb_tso_adv(struct igb_adapter *adapter,
2916 struct igb_ring *tx_ring,
2917 struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
2919 struct e1000_adv_tx_context_desc *context_desc;
2922 struct igb_buffer *buffer_info;
2923 u32 info = 0, tu_cmd = 0;
2924 u32 mss_l4len_idx, l4len;
2927 if (skb_header_cloned(skb)) {
2928 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2933 l4len = tcp_hdrlen(skb);
2936 if (skb->protocol == htons(ETH_P_IP)) {
2937 struct iphdr *iph = ip_hdr(skb);
2940 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2944 } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) {
2945 ipv6_hdr(skb)->payload_len = 0;
2946 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2947 &ipv6_hdr(skb)->daddr,
2951 i = tx_ring->next_to_use;
2953 buffer_info = &tx_ring->buffer_info[i];
2954 context_desc = E1000_TX_CTXTDESC_ADV(*tx_ring, i);
2955 /* VLAN MACLEN IPLEN */
2956 if (tx_flags & IGB_TX_FLAGS_VLAN)
2957 info |= (tx_flags & IGB_TX_FLAGS_VLAN_MASK);
2958 info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT);
2959 *hdr_len += skb_network_offset(skb);
2960 info |= skb_network_header_len(skb);
2961 *hdr_len += skb_network_header_len(skb);
2962 context_desc->vlan_macip_lens = cpu_to_le32(info);
2964 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
2965 tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT);
2967 if (skb->protocol == htons(ETH_P_IP))
2968 tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
2969 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
2971 context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd);
2974 mss_l4len_idx = (skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT);
2975 mss_l4len_idx |= (l4len << E1000_ADVTXD_L4LEN_SHIFT);
2977 /* For 82575, context index must be unique per ring. */
2978 if (adapter->flags & IGB_FLAG_NEED_CTX_IDX)
2979 mss_l4len_idx |= tx_ring->queue_index << 4;
2981 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
2982 context_desc->seqnum_seed = 0;
2984 buffer_info->time_stamp = jiffies;
2985 buffer_info->next_to_watch = i;
2986 buffer_info->dma = 0;
2988 if (i == tx_ring->count)
2991 tx_ring->next_to_use = i;
2996 static inline bool igb_tx_csum_adv(struct igb_adapter *adapter,
2997 struct igb_ring *tx_ring,
2998 struct sk_buff *skb, u32 tx_flags)
3000 struct e1000_adv_tx_context_desc *context_desc;
3002 struct igb_buffer *buffer_info;
3003 u32 info = 0, tu_cmd = 0;
3005 if ((skb->ip_summed == CHECKSUM_PARTIAL) ||
3006 (tx_flags & IGB_TX_FLAGS_VLAN)) {
3007 i = tx_ring->next_to_use;
3008 buffer_info = &tx_ring->buffer_info[i];
3009 context_desc = E1000_TX_CTXTDESC_ADV(*tx_ring, i);
3011 if (tx_flags & IGB_TX_FLAGS_VLAN)
3012 info |= (tx_flags & IGB_TX_FLAGS_VLAN_MASK);
3013 info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT);
3014 if (skb->ip_summed == CHECKSUM_PARTIAL)
3015 info |= skb_network_header_len(skb);
3017 context_desc->vlan_macip_lens = cpu_to_le32(info);
3019 tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT);
3021 if (skb->ip_summed == CHECKSUM_PARTIAL) {
3024 if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) {
3025 const struct vlan_ethhdr *vhdr =
3026 (const struct vlan_ethhdr*)skb->data;
3028 protocol = vhdr->h_vlan_encapsulated_proto;
3030 protocol = skb->protocol;
3034 case cpu_to_be16(ETH_P_IP):
3035 tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
3036 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
3037 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
3039 case cpu_to_be16(ETH_P_IPV6):
3040 /* XXX what about other V6 headers?? */
3041 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
3042 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
3045 if (unlikely(net_ratelimit()))
3046 dev_warn(&adapter->pdev->dev,
3047 "partial checksum but proto=%x!\n",
3053 context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd);
3054 context_desc->seqnum_seed = 0;
3055 if (adapter->flags & IGB_FLAG_NEED_CTX_IDX)
3056 context_desc->mss_l4len_idx =
3057 cpu_to_le32(tx_ring->queue_index << 4);
3059 context_desc->mss_l4len_idx = 0;
3061 buffer_info->time_stamp = jiffies;
3062 buffer_info->next_to_watch = i;
3063 buffer_info->dma = 0;
3066 if (i == tx_ring->count)
3068 tx_ring->next_to_use = i;
3075 #define IGB_MAX_TXD_PWR 16
3076 #define IGB_MAX_DATA_PER_TXD (1<<IGB_MAX_TXD_PWR)
3078 static inline int igb_tx_map_adv(struct igb_adapter *adapter,
3079 struct igb_ring *tx_ring, struct sk_buff *skb,
3082 struct igb_buffer *buffer_info;
3083 unsigned int len = skb_headlen(skb);
3084 unsigned int count = 0, i;
3087 i = tx_ring->next_to_use;
3089 buffer_info = &tx_ring->buffer_info[i];
3090 BUG_ON(len >= IGB_MAX_DATA_PER_TXD);
3091 buffer_info->length = len;
3092 /* set time_stamp *before* dma to help avoid a possible race */
3093 buffer_info->time_stamp = jiffies;
3094 buffer_info->next_to_watch = i;
3095 buffer_info->dma = pci_map_single(adapter->pdev, skb->data, len,
3099 if (i == tx_ring->count)
3102 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
3103 struct skb_frag_struct *frag;
3105 frag = &skb_shinfo(skb)->frags[f];
3108 buffer_info = &tx_ring->buffer_info[i];
3109 BUG_ON(len >= IGB_MAX_DATA_PER_TXD);
3110 buffer_info->length = len;
3111 buffer_info->time_stamp = jiffies;
3112 buffer_info->next_to_watch = i;
3113 buffer_info->dma = pci_map_page(adapter->pdev,
3121 if (i == tx_ring->count)
3125 i = ((i == 0) ? tx_ring->count - 1 : i - 1);
3126 tx_ring->buffer_info[i].skb = skb;
3127 tx_ring->buffer_info[first].next_to_watch = i;
3132 static inline void igb_tx_queue_adv(struct igb_adapter *adapter,
3133 struct igb_ring *tx_ring,
3134 int tx_flags, int count, u32 paylen,
3137 union e1000_adv_tx_desc *tx_desc = NULL;
3138 struct igb_buffer *buffer_info;
3139 u32 olinfo_status = 0, cmd_type_len;
3142 cmd_type_len = (E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_IFCS |
3143 E1000_ADVTXD_DCMD_DEXT);
3145 if (tx_flags & IGB_TX_FLAGS_VLAN)
3146 cmd_type_len |= E1000_ADVTXD_DCMD_VLE;
3148 if (tx_flags & IGB_TX_FLAGS_TSTAMP)
3149 cmd_type_len |= E1000_ADVTXD_MAC_TSTAMP;
3151 if (tx_flags & IGB_TX_FLAGS_TSO) {
3152 cmd_type_len |= E1000_ADVTXD_DCMD_TSE;
3154 /* insert tcp checksum */
3155 olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
3157 /* insert ip checksum */
3158 if (tx_flags & IGB_TX_FLAGS_IPV4)
3159 olinfo_status |= E1000_TXD_POPTS_IXSM << 8;
3161 } else if (tx_flags & IGB_TX_FLAGS_CSUM) {
3162 olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
3165 if ((adapter->flags & IGB_FLAG_NEED_CTX_IDX) &&
3166 (tx_flags & (IGB_TX_FLAGS_CSUM | IGB_TX_FLAGS_TSO |
3167 IGB_TX_FLAGS_VLAN)))
3168 olinfo_status |= tx_ring->queue_index << 4;
3170 olinfo_status |= ((paylen - hdr_len) << E1000_ADVTXD_PAYLEN_SHIFT);
3172 i = tx_ring->next_to_use;
3174 buffer_info = &tx_ring->buffer_info[i];
3175 tx_desc = E1000_TX_DESC_ADV(*tx_ring, i);
3176 tx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
3177 tx_desc->read.cmd_type_len =
3178 cpu_to_le32(cmd_type_len | buffer_info->length);
3179 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
3181 if (i == tx_ring->count)
3185 tx_desc->read.cmd_type_len |= cpu_to_le32(adapter->txd_cmd);
3186 /* Force memory writes to complete before letting h/w
3187 * know there are new descriptors to fetch. (Only
3188 * applicable for weak-ordered memory model archs,
3189 * such as IA-64). */
3192 tx_ring->next_to_use = i;
3193 writel(i, adapter->hw.hw_addr + tx_ring->tail);
3194 /* we need this if more than one processor can write to our tail
3195 * at a time, it syncronizes IO on IA64/Altix systems */
3199 static int __igb_maybe_stop_tx(struct net_device *netdev,
3200 struct igb_ring *tx_ring, int size)
3202 struct igb_adapter *adapter = netdev_priv(netdev);
3204 netif_stop_subqueue(netdev, tx_ring->queue_index);
3206 /* Herbert's original patch had:
3207 * smp_mb__after_netif_stop_queue();
3208 * but since that doesn't exist yet, just open code it. */
3211 /* We need to check again in a case another CPU has just
3212 * made room available. */
3213 if (igb_desc_unused(tx_ring) < size)
3217 netif_wake_subqueue(netdev, tx_ring->queue_index);
3218 ++adapter->restart_queue;
3222 static int igb_maybe_stop_tx(struct net_device *netdev,
3223 struct igb_ring *tx_ring, int size)
3225 if (igb_desc_unused(tx_ring) >= size)
3227 return __igb_maybe_stop_tx(netdev, tx_ring, size);
3230 static int igb_xmit_frame_ring_adv(struct sk_buff *skb,
3231 struct net_device *netdev,
3232 struct igb_ring *tx_ring)
3234 struct igb_adapter *adapter = netdev_priv(netdev);
3236 unsigned int tx_flags = 0;
3239 union skb_shared_tx *shtx;
3241 if (test_bit(__IGB_DOWN, &adapter->state)) {
3242 dev_kfree_skb_any(skb);
3243 return NETDEV_TX_OK;
3246 if (skb->len <= 0) {
3247 dev_kfree_skb_any(skb);
3248 return NETDEV_TX_OK;
3251 /* need: 1 descriptor per page,
3252 * + 2 desc gap to keep tail from touching head,
3253 * + 1 desc for skb->data,
3254 * + 1 desc for context descriptor,
3255 * otherwise try next time */
3256 if (igb_maybe_stop_tx(netdev, tx_ring, skb_shinfo(skb)->nr_frags + 4)) {
3257 /* this is a hard error */
3258 return NETDEV_TX_BUSY;
3262 * TODO: check that there currently is no other packet with
3263 * time stamping in the queue
3265 * When doing time stamping, keep the connection to the socket
3266 * a while longer: it is still needed by skb_hwtstamp_tx(),
3267 * called either in igb_tx_hwtstamp() or by our caller when
3268 * doing software time stamping.
3271 if (unlikely(shtx->hardware)) {
3272 shtx->in_progress = 1;
3273 tx_flags |= IGB_TX_FLAGS_TSTAMP;
3276 if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
3277 tx_flags |= IGB_TX_FLAGS_VLAN;
3278 tx_flags |= (vlan_tx_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);
3281 if (skb->protocol == htons(ETH_P_IP))
3282 tx_flags |= IGB_TX_FLAGS_IPV4;
3284 first = tx_ring->next_to_use;
3285 tso = skb_is_gso(skb) ? igb_tso_adv(adapter, tx_ring, skb, tx_flags,
3289 dev_kfree_skb_any(skb);
3290 return NETDEV_TX_OK;
3294 tx_flags |= IGB_TX_FLAGS_TSO;
3295 else if (igb_tx_csum_adv(adapter, tx_ring, skb, tx_flags) &&
3296 (skb->ip_summed == CHECKSUM_PARTIAL))
3297 tx_flags |= IGB_TX_FLAGS_CSUM;
3299 igb_tx_queue_adv(adapter, tx_ring, tx_flags,
3300 igb_tx_map_adv(adapter, tx_ring, skb, first),
3303 netdev->trans_start = jiffies;
3305 /* Make sure there is space in the ring for the next send. */
3306 igb_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 4);
3308 return NETDEV_TX_OK;
3311 static int igb_xmit_frame_adv(struct sk_buff *skb, struct net_device *netdev)
3313 struct igb_adapter *adapter = netdev_priv(netdev);
3314 struct igb_ring *tx_ring;
3317 r_idx = skb->queue_mapping & (IGB_ABS_MAX_TX_QUEUES - 1);
3318 tx_ring = adapter->multi_tx_table[r_idx];
3320 /* This goes back to the question of how to logically map a tx queue
3321 * to a flow. Right now, performance is impacted slightly negatively
3322 * if using multiple tx queues. If the stack breaks away from a
3323 * single qdisc implementation, we can look at this again. */
3324 return (igb_xmit_frame_ring_adv(skb, netdev, tx_ring));
3328 * igb_tx_timeout - Respond to a Tx Hang
3329 * @netdev: network interface device structure
3331 static void igb_tx_timeout(struct net_device *netdev)
3333 struct igb_adapter *adapter = netdev_priv(netdev);
3334 struct e1000_hw *hw = &adapter->hw;
3336 /* Do the reset outside of interrupt context */
3337 adapter->tx_timeout_count++;
3338 schedule_work(&adapter->reset_task);
3340 (adapter->eims_enable_mask & ~adapter->eims_other));
3343 static void igb_reset_task(struct work_struct *work)
3345 struct igb_adapter *adapter;
3346 adapter = container_of(work, struct igb_adapter, reset_task);
3348 igb_reinit_locked(adapter);
3352 * igb_get_stats - Get System Network Statistics
3353 * @netdev: network interface device structure
3355 * Returns the address of the device statistics structure.
3356 * The statistics are actually updated from the timer callback.
3358 static struct net_device_stats *igb_get_stats(struct net_device *netdev)
3360 struct igb_adapter *adapter = netdev_priv(netdev);
3362 /* only return the current stats */
3363 return &adapter->net_stats;
3367 * igb_change_mtu - Change the Maximum Transfer Unit
3368 * @netdev: network interface device structure
3369 * @new_mtu: new value for maximum frame size
3371 * Returns 0 on success, negative on failure
3373 static int igb_change_mtu(struct net_device *netdev, int new_mtu)
3375 struct igb_adapter *adapter = netdev_priv(netdev);
3376 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
3378 if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) ||
3379 (max_frame > MAX_JUMBO_FRAME_SIZE)) {
3380 dev_err(&adapter->pdev->dev, "Invalid MTU setting\n");
3384 if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
3385 dev_err(&adapter->pdev->dev, "MTU > 9216 not supported.\n");
3389 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
3392 /* igb_down has a dependency on max_frame_size */
3393 adapter->max_frame_size = max_frame;
3394 if (netif_running(netdev))
3397 /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
3398 * means we reserve 2 more, this pushes us to allocate from the next
3400 * i.e. RXBUFFER_2048 --> size-4096 slab
3403 if (max_frame <= IGB_RXBUFFER_256)
3404 adapter->rx_buffer_len = IGB_RXBUFFER_256;
3405 else if (max_frame <= IGB_RXBUFFER_512)
3406 adapter->rx_buffer_len = IGB_RXBUFFER_512;
3407 else if (max_frame <= IGB_RXBUFFER_1024)
3408 adapter->rx_buffer_len = IGB_RXBUFFER_1024;
3409 else if (max_frame <= IGB_RXBUFFER_2048)
3410 adapter->rx_buffer_len = IGB_RXBUFFER_2048;
3412 #if (PAGE_SIZE / 2) > IGB_RXBUFFER_16384
3413 adapter->rx_buffer_len = IGB_RXBUFFER_16384;
3415 adapter->rx_buffer_len = PAGE_SIZE / 2;
3418 /* if sr-iov is enabled we need to force buffer size to 1K or larger */
3419 if (adapter->vfs_allocated_count &&
3420 (adapter->rx_buffer_len < IGB_RXBUFFER_1024))
3421 adapter->rx_buffer_len = IGB_RXBUFFER_1024;
3423 /* adjust allocation if LPE protects us, and we aren't using SBP */
3424 if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) ||
3425 (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE))
3426 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
3428 dev_info(&adapter->pdev->dev, "changing MTU from %d to %d\n",
3429 netdev->mtu, new_mtu);
3430 netdev->mtu = new_mtu;
3432 if (netif_running(netdev))
3437 clear_bit(__IGB_RESETTING, &adapter->state);
3443 * igb_update_stats - Update the board statistics counters
3444 * @adapter: board private structure
3447 void igb_update_stats(struct igb_adapter *adapter)
3449 struct e1000_hw *hw = &adapter->hw;
3450 struct pci_dev *pdev = adapter->pdev;
3453 #define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
3456 * Prevent stats update while adapter is being reset, or if the pci
3457 * connection is down.
3459 if (adapter->link_speed == 0)
3461 if (pci_channel_offline(pdev))
3464 adapter->stats.crcerrs += rd32(E1000_CRCERRS);
3465 adapter->stats.gprc += rd32(E1000_GPRC);
3466 adapter->stats.gorc += rd32(E1000_GORCL);
3467 rd32(E1000_GORCH); /* clear GORCL */
3468 adapter->stats.bprc += rd32(E1000_BPRC);
3469 adapter->stats.mprc += rd32(E1000_MPRC);
3470 adapter->stats.roc += rd32(E1000_ROC);
3472 adapter->stats.prc64 += rd32(E1000_PRC64);
3473 adapter->stats.prc127 += rd32(E1000_PRC127);
3474 adapter->stats.prc255 += rd32(E1000_PRC255);
3475 adapter->stats.prc511 += rd32(E1000_PRC511);
3476 adapter->stats.prc1023 += rd32(E1000_PRC1023);
3477 adapter->stats.prc1522 += rd32(E1000_PRC1522);
3478 adapter->stats.symerrs += rd32(E1000_SYMERRS);
3479 adapter->stats.sec += rd32(E1000_SEC);
3481 adapter->stats.mpc += rd32(E1000_MPC);
3482 adapter->stats.scc += rd32(E1000_SCC);
3483 adapter->stats.ecol += rd32(E1000_ECOL);
3484 adapter->stats.mcc += rd32(E1000_MCC);
3485 adapter->stats.latecol += rd32(E1000_LATECOL);
3486 adapter->stats.dc += rd32(E1000_DC);
3487 adapter->stats.rlec += rd32(E1000_RLEC);
3488 adapter->stats.xonrxc += rd32(E1000_XONRXC);
3489 adapter->stats.xontxc += rd32(E1000_XONTXC);
3490 adapter->stats.xoffrxc += rd32(E1000_XOFFRXC);
3491 adapter->stats.xofftxc += rd32(E1000_XOFFTXC);
3492 adapter->stats.fcruc += rd32(E1000_FCRUC);
3493 adapter->stats.gptc += rd32(E1000_GPTC);
3494 adapter->stats.gotc += rd32(E1000_GOTCL);
3495 rd32(E1000_GOTCH); /* clear GOTCL */
3496 adapter->stats.rnbc += rd32(E1000_RNBC);
3497 adapter->stats.ruc += rd32(E1000_RUC);
3498 adapter->stats.rfc += rd32(E1000_RFC);
3499 adapter->stats.rjc += rd32(E1000_RJC);
3500 adapter->stats.tor += rd32(E1000_TORH);
3501 adapter->stats.tot += rd32(E1000_TOTH);
3502 adapter->stats.tpr += rd32(E1000_TPR);
3504 adapter->stats.ptc64 += rd32(E1000_PTC64);
3505 adapter->stats.ptc127 += rd32(E1000_PTC127);
3506 adapter->stats.ptc255 += rd32(E1000_PTC255);
3507 adapter->stats.ptc511 += rd32(E1000_PTC511);
3508 adapter->stats.ptc1023 += rd32(E1000_PTC1023);
3509 adapter->stats.ptc1522 += rd32(E1000_PTC1522);
3511 adapter->stats.mptc += rd32(E1000_MPTC);
3512 adapter->stats.bptc += rd32(E1000_BPTC);
3514 /* used for adaptive IFS */
3516 hw->mac.tx_packet_delta = rd32(E1000_TPT);
3517 adapter->stats.tpt += hw->mac.tx_packet_delta;
3518 hw->mac.collision_delta = rd32(E1000_COLC);
3519 adapter->stats.colc += hw->mac.collision_delta;
3521 adapter->stats.algnerrc += rd32(E1000_ALGNERRC);
3522 adapter->stats.rxerrc += rd32(E1000_RXERRC);
3523 adapter->stats.tncrs += rd32(E1000_TNCRS);
3524 adapter->stats.tsctc += rd32(E1000_TSCTC);
3525 adapter->stats.tsctfc += rd32(E1000_TSCTFC);
3527 adapter->stats.iac += rd32(E1000_IAC);
3528 adapter->stats.icrxoc += rd32(E1000_ICRXOC);
3529 adapter->stats.icrxptc += rd32(E1000_ICRXPTC);
3530 adapter->stats.icrxatc += rd32(E1000_ICRXATC);
3531 adapter->stats.ictxptc += rd32(E1000_ICTXPTC);
3532 adapter->stats.ictxatc += rd32(E1000_ICTXATC);
3533 adapter->stats.ictxqec += rd32(E1000_ICTXQEC);
3534 adapter->stats.ictxqmtc += rd32(E1000_ICTXQMTC);
3535 adapter->stats.icrxdmtc += rd32(E1000_ICRXDMTC);
3537 /* Fill out the OS statistics structure */
3538 adapter->net_stats.multicast = adapter->stats.mprc;
3539 adapter->net_stats.collisions = adapter->stats.colc;
3543 /* RLEC on some newer hardware can be incorrect so build
3544 * our own version based on RUC and ROC */
3545 adapter->net_stats.rx_errors = adapter->stats.rxerrc +
3546 adapter->stats.crcerrs + adapter->stats.algnerrc +
3547 adapter->stats.ruc + adapter->stats.roc +
3548 adapter->stats.cexterr;
3549 adapter->net_stats.rx_length_errors = adapter->stats.ruc +
3551 adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs;
3552 adapter->net_stats.rx_frame_errors = adapter->stats.algnerrc;
3553 adapter->net_stats.rx_missed_errors = adapter->stats.mpc;
3556 adapter->net_stats.tx_errors = adapter->stats.ecol +
3557 adapter->stats.latecol;
3558 adapter->net_stats.tx_aborted_errors = adapter->stats.ecol;
3559 adapter->net_stats.tx_window_errors = adapter->stats.latecol;
3560 adapter->net_stats.tx_carrier_errors = adapter->stats.tncrs;
3562 /* Tx Dropped needs to be maintained elsewhere */
3565 if (hw->phy.media_type == e1000_media_type_copper) {
3566 if ((adapter->link_speed == SPEED_1000) &&
3567 (!igb_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
3568 phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
3569 adapter->phy_stats.idle_errors += phy_tmp;
3573 /* Management Stats */
3574 adapter->stats.mgptc += rd32(E1000_MGTPTC);
3575 adapter->stats.mgprc += rd32(E1000_MGTPRC);
3576 adapter->stats.mgpdc += rd32(E1000_MGTPDC);
3579 static irqreturn_t igb_msix_other(int irq, void *data)
3581 struct net_device *netdev = data;
3582 struct igb_adapter *adapter = netdev_priv(netdev);
3583 struct e1000_hw *hw = &adapter->hw;
3584 u32 icr = rd32(E1000_ICR);
3586 /* reading ICR causes bit 31 of EICR to be cleared */
3588 if(icr & E1000_ICR_DOUTSYNC) {
3589 /* HW is reporting DMA is out of sync */
3590 adapter->stats.doosync++;
3593 /* Check for a mailbox event */
3594 if (icr & E1000_ICR_VMMB)
3595 igb_msg_task(adapter);
3597 if (icr & E1000_ICR_LSC) {
3598 hw->mac.get_link_status = 1;
3599 /* guard against interrupt when we're going down */
3600 if (!test_bit(__IGB_DOWN, &adapter->state))
3601 mod_timer(&adapter->watchdog_timer, jiffies + 1);
3604 wr32(E1000_IMS, E1000_IMS_LSC | E1000_IMS_DOUTSYNC | E1000_IMS_VMMB);
3605 wr32(E1000_EIMS, adapter->eims_other);
3610 static irqreturn_t igb_msix_tx(int irq, void *data)
3612 struct igb_ring *tx_ring = data;
3613 struct igb_adapter *adapter = tx_ring->adapter;
3614 struct e1000_hw *hw = &adapter->hw;
3616 #ifdef CONFIG_IGB_DCA
3617 if (adapter->flags & IGB_FLAG_DCA_ENABLED)
3618 igb_update_tx_dca(tx_ring);
3621 tx_ring->total_bytes = 0;
3622 tx_ring->total_packets = 0;
3624 /* auto mask will automatically reenable the interrupt when we write
3626 if (!igb_clean_tx_irq(tx_ring))
3627 /* Ring was not completely cleaned, so fire another interrupt */
3628 wr32(E1000_EICS, tx_ring->eims_value);
3630 wr32(E1000_EIMS, tx_ring->eims_value);
3635 static void igb_write_itr(struct igb_ring *ring)
3637 struct e1000_hw *hw = &ring->adapter->hw;
3638 if ((ring->adapter->itr_setting & 3) && ring->set_itr) {
3639 switch (hw->mac.type) {
3641 wr32(ring->itr_register, ring->itr_val |
3645 wr32(ring->itr_register, ring->itr_val |
3646 (ring->itr_val << 16));
3653 static irqreturn_t igb_msix_rx(int irq, void *data)
3655 struct igb_ring *rx_ring = data;
3657 /* Write the ITR value calculated at the end of the
3658 * previous interrupt.
3661 igb_write_itr(rx_ring);
3663 if (napi_schedule_prep(&rx_ring->napi))
3664 __napi_schedule(&rx_ring->napi);
3666 #ifdef CONFIG_IGB_DCA
3667 if (rx_ring->adapter->flags & IGB_FLAG_DCA_ENABLED)
3668 igb_update_rx_dca(rx_ring);
3673 #ifdef CONFIG_IGB_DCA
3674 static void igb_update_rx_dca(struct igb_ring *rx_ring)
3677 struct igb_adapter *adapter = rx_ring->adapter;
3678 struct e1000_hw *hw = &adapter->hw;
3679 int cpu = get_cpu();
3680 int q = rx_ring->reg_idx;
3682 if (rx_ring->cpu != cpu) {
3683 dca_rxctrl = rd32(E1000_DCA_RXCTRL(q));
3684 if (hw->mac.type == e1000_82576) {
3685 dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK_82576;
3686 dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
3687 E1000_DCA_RXCTRL_CPUID_SHIFT;
3689 dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK;
3690 dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
3692 dca_rxctrl |= E1000_DCA_RXCTRL_DESC_DCA_EN;
3693 dca_rxctrl |= E1000_DCA_RXCTRL_HEAD_DCA_EN;
3694 dca_rxctrl |= E1000_DCA_RXCTRL_DATA_DCA_EN;
3695 wr32(E1000_DCA_RXCTRL(q), dca_rxctrl);
3701 static void igb_update_tx_dca(struct igb_ring *tx_ring)
3704 struct igb_adapter *adapter = tx_ring->adapter;
3705 struct e1000_hw *hw = &adapter->hw;
3706 int cpu = get_cpu();
3707 int q = tx_ring->reg_idx;
3709 if (tx_ring->cpu != cpu) {
3710 dca_txctrl = rd32(E1000_DCA_TXCTRL(q));
3711 if (hw->mac.type == e1000_82576) {
3712 dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK_82576;
3713 dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
3714 E1000_DCA_TXCTRL_CPUID_SHIFT;
3716 dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK;
3717 dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
3719 dca_txctrl |= E1000_DCA_TXCTRL_DESC_DCA_EN;
3720 wr32(E1000_DCA_TXCTRL(q), dca_txctrl);
3726 static void igb_setup_dca(struct igb_adapter *adapter)
3730 if (!(adapter->flags & IGB_FLAG_DCA_ENABLED))
3733 for (i = 0; i < adapter->num_tx_queues; i++) {
3734 adapter->tx_ring[i].cpu = -1;
3735 igb_update_tx_dca(&adapter->tx_ring[i]);
3737 for (i = 0; i < adapter->num_rx_queues; i++) {
3738 adapter->rx_ring[i].cpu = -1;
3739 igb_update_rx_dca(&adapter->rx_ring[i]);
3743 static int __igb_notify_dca(struct device *dev, void *data)
3745 struct net_device *netdev = dev_get_drvdata(dev);
3746 struct igb_adapter *adapter = netdev_priv(netdev);
3747 struct e1000_hw *hw = &adapter->hw;
3748 unsigned long event = *(unsigned long *)data;
3751 case DCA_PROVIDER_ADD:
3752 /* if already enabled, don't do it again */
3753 if (adapter->flags & IGB_FLAG_DCA_ENABLED)
3755 /* Always use CB2 mode, difference is masked
3756 * in the CB driver. */
3757 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2);
3758 if (dca_add_requester(dev) == 0) {
3759 adapter->flags |= IGB_FLAG_DCA_ENABLED;
3760 dev_info(&adapter->pdev->dev, "DCA enabled\n");
3761 igb_setup_dca(adapter);
3764 /* Fall Through since DCA is disabled. */
3765 case DCA_PROVIDER_REMOVE:
3766 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
3767 /* without this a class_device is left
3768 * hanging around in the sysfs model */
3769 dca_remove_requester(dev);
3770 dev_info(&adapter->pdev->dev, "DCA disabled\n");
3771 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
3772 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
3780 static int igb_notify_dca(struct notifier_block *nb, unsigned long event,
3785 ret_val = driver_for_each_device(&igb_driver.driver, NULL, &event,
3788 return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
3790 #endif /* CONFIG_IGB_DCA */
3792 static void igb_ping_all_vfs(struct igb_adapter *adapter)
3794 struct e1000_hw *hw = &adapter->hw;
3798 for (i = 0 ; i < adapter->vfs_allocated_count; i++) {
3799 ping = E1000_PF_CONTROL_MSG;
3800 if (adapter->vf_data[i].clear_to_send)
3801 ping |= E1000_VT_MSGTYPE_CTS;
3802 igb_write_mbx(hw, &ping, 1, i);
3806 static int igb_set_vf_multicasts(struct igb_adapter *adapter,
3807 u32 *msgbuf, u32 vf)
3809 int n = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
3810 u16 *hash_list = (u16 *)&msgbuf[1];
3811 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
3814 /* only up to 30 hash values supported */
3818 /* salt away the number of multi cast addresses assigned
3819 * to this VF for later use to restore when the PF multi cast
3822 vf_data->num_vf_mc_hashes = n;
3824 /* VFs are limited to using the MTA hash table for their multicast
3826 for (i = 0; i < n; i++)
3827 vf_data->vf_mc_hashes[i] = hash_list[i];;
3829 /* Flush and reset the mta with the new values */
3830 igb_set_multi(adapter->netdev);
3835 static void igb_restore_vf_multicasts(struct igb_adapter *adapter)
3837 struct e1000_hw *hw = &adapter->hw;
3838 struct vf_data_storage *vf_data;
3841 for (i = 0; i < adapter->vfs_allocated_count; i++) {
3842 vf_data = &adapter->vf_data[i];
3843 for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
3844 igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
3848 static void igb_clear_vf_vfta(struct igb_adapter *adapter, u32 vf)
3850 struct e1000_hw *hw = &adapter->hw;
3851 u32 pool_mask, reg, vid;
3854 pool_mask = 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
3856 /* Find the vlan filter for this id */
3857 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
3858 reg = rd32(E1000_VLVF(i));
3860 /* remove the vf from the pool */
3863 /* if pool is empty then remove entry from vfta */
3864 if (!(reg & E1000_VLVF_POOLSEL_MASK) &&
3865 (reg & E1000_VLVF_VLANID_ENABLE)) {
3867 vid = reg & E1000_VLVF_VLANID_MASK;
3868 igb_vfta_set(hw, vid, false);
3871 wr32(E1000_VLVF(i), reg);
3875 static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
3877 struct e1000_hw *hw = &adapter->hw;
3880 /* It is an error to call this function when VFs are not enabled */
3881 if (!adapter->vfs_allocated_count)
3884 /* Find the vlan filter for this id */
3885 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
3886 reg = rd32(E1000_VLVF(i));
3887 if ((reg & E1000_VLVF_VLANID_ENABLE) &&
3888 vid == (reg & E1000_VLVF_VLANID_MASK))
3893 if (i == E1000_VLVF_ARRAY_SIZE) {
3894 /* Did not find a matching VLAN ID entry that was
3895 * enabled. Search for a free filter entry, i.e.
3896 * one without the enable bit set
3898 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
3899 reg = rd32(E1000_VLVF(i));
3900 if (!(reg & E1000_VLVF_VLANID_ENABLE))
3904 if (i < E1000_VLVF_ARRAY_SIZE) {
3905 /* Found an enabled/available entry */
3906 reg |= 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
3908 /* if !enabled we need to set this up in vfta */
3909 if (!(reg & E1000_VLVF_VLANID_ENABLE)) {
3910 /* add VID to filter table, if bit already set
3911 * PF must have added it outside of table */
3912 if (igb_vfta_set(hw, vid, true))
3913 reg |= 1 << (E1000_VLVF_POOLSEL_SHIFT +
3914 adapter->vfs_allocated_count);
3915 reg |= E1000_VLVF_VLANID_ENABLE;
3917 reg &= ~E1000_VLVF_VLANID_MASK;
3920 wr32(E1000_VLVF(i), reg);
3924 if (i < E1000_VLVF_ARRAY_SIZE) {
3925 /* remove vf from the pool */
3926 reg &= ~(1 << (E1000_VLVF_POOLSEL_SHIFT + vf));
3927 /* if pool is empty then remove entry from vfta */
3928 if (!(reg & E1000_VLVF_POOLSEL_MASK)) {
3930 igb_vfta_set(hw, vid, false);
3932 wr32(E1000_VLVF(i), reg);
3939 static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
3941 int add = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
3942 int vid = (msgbuf[1] & E1000_VLVF_VLANID_MASK);
3944 return igb_vlvf_set(adapter, vid, add, vf);
3947 static inline void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf)
3949 struct e1000_hw *hw = &adapter->hw;
3951 /* disable mailbox functionality for vf */
3952 adapter->vf_data[vf].clear_to_send = false;
3954 /* reset offloads to defaults */
3955 igb_set_vmolr(hw, vf);
3957 /* reset vlans for device */
3958 igb_clear_vf_vfta(adapter, vf);
3960 /* reset multicast table array for vf */
3961 adapter->vf_data[vf].num_vf_mc_hashes = 0;
3963 /* Flush and reset the mta with the new values */
3964 igb_set_multi(adapter->netdev);
3967 static inline void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
3969 struct e1000_hw *hw = &adapter->hw;
3970 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
3972 u8 *addr = (u8 *)(&msgbuf[1]);
3974 /* process all the same items cleared in a function level reset */
3975 igb_vf_reset_event(adapter, vf);
3977 /* set vf mac address */
3978 igb_rar_set(hw, vf_mac, vf + 1);
3979 igb_set_rah_pool(hw, vf, vf + 1);
3981 /* enable transmit and receive for vf */
3982 reg = rd32(E1000_VFTE);
3983 wr32(E1000_VFTE, reg | (1 << vf));
3984 reg = rd32(E1000_VFRE);
3985 wr32(E1000_VFRE, reg | (1 << vf));
3987 /* enable mailbox functionality for vf */
3988 adapter->vf_data[vf].clear_to_send = true;
3990 /* reply to reset with ack and vf mac address */
3991 msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK;
3992 memcpy(addr, vf_mac, 6);
3993 igb_write_mbx(hw, msgbuf, 3, vf);
3996 static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf)
3998 unsigned char *addr = (char *)&msg[1];
4001 if (is_valid_ether_addr(addr))
4002 err = igb_set_vf_mac(adapter, vf, addr);
4008 static void igb_rcv_ack_from_vf(struct igb_adapter *adapter, u32 vf)
4010 struct e1000_hw *hw = &adapter->hw;
4011 u32 msg = E1000_VT_MSGTYPE_NACK;
4013 /* if device isn't clear to send it shouldn't be reading either */
4014 if (!adapter->vf_data[vf].clear_to_send)
4015 igb_write_mbx(hw, &msg, 1, vf);
4019 static void igb_msg_task(struct igb_adapter *adapter)
4021 struct e1000_hw *hw = &adapter->hw;
4024 for (vf = 0; vf < adapter->vfs_allocated_count; vf++) {
4025 /* process any reset requests */
4026 if (!igb_check_for_rst(hw, vf)) {
4027 adapter->vf_data[vf].clear_to_send = false;
4028 igb_vf_reset_event(adapter, vf);
4031 /* process any messages pending */
4032 if (!igb_check_for_msg(hw, vf))
4033 igb_rcv_msg_from_vf(adapter, vf);
4035 /* process any acks */
4036 if (!igb_check_for_ack(hw, vf))
4037 igb_rcv_ack_from_vf(adapter, vf);
4042 static int igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
4044 u32 mbx_size = E1000_VFMAILBOX_SIZE;
4045 u32 msgbuf[mbx_size];
4046 struct e1000_hw *hw = &adapter->hw;
4049 retval = igb_read_mbx(hw, msgbuf, mbx_size, vf);
4052 dev_err(&adapter->pdev->dev,
4053 "Error receiving message from VF\n");
4055 /* this is a message we already processed, do nothing */
4056 if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK))
4060 * until the vf completes a reset it should not be
4061 * allowed to start any configuration.
4064 if (msgbuf[0] == E1000_VF_RESET) {
4065 igb_vf_reset_msg(adapter, vf);
4070 if (!adapter->vf_data[vf].clear_to_send) {
4071 msgbuf[0] |= E1000_VT_MSGTYPE_NACK;
4072 igb_write_mbx(hw, msgbuf, 1, vf);
4076 switch ((msgbuf[0] & 0xFFFF)) {
4077 case E1000_VF_SET_MAC_ADDR:
4078 retval = igb_set_vf_mac_addr(adapter, msgbuf, vf);
4080 case E1000_VF_SET_MULTICAST:
4081 retval = igb_set_vf_multicasts(adapter, msgbuf, vf);
4083 case E1000_VF_SET_LPE:
4084 retval = igb_set_vf_rlpml(adapter, msgbuf[1], vf);
4086 case E1000_VF_SET_VLAN:
4087 retval = igb_set_vf_vlan(adapter, msgbuf, vf);
4090 dev_err(&adapter->pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]);
4095 /* notify the VF of the results of what it sent us */
4097 msgbuf[0] |= E1000_VT_MSGTYPE_NACK;
4099 msgbuf[0] |= E1000_VT_MSGTYPE_ACK;
4101 msgbuf[0] |= E1000_VT_MSGTYPE_CTS;
4103 igb_write_mbx(hw, msgbuf, 1, vf);
4109 * igb_intr_msi - Interrupt Handler
4110 * @irq: interrupt number
4111 * @data: pointer to a network interface device structure
4113 static irqreturn_t igb_intr_msi(int irq, void *data)
4115 struct net_device *netdev = data;
4116 struct igb_adapter *adapter = netdev_priv(netdev);
4117 struct e1000_hw *hw = &adapter->hw;
4118 /* read ICR disables interrupts using IAM */
4119 u32 icr = rd32(E1000_ICR);
4121 igb_write_itr(adapter->rx_ring);
4123 if(icr & E1000_ICR_DOUTSYNC) {
4124 /* HW is reporting DMA is out of sync */
4125 adapter->stats.doosync++;
4128 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
4129 hw->mac.get_link_status = 1;
4130 if (!test_bit(__IGB_DOWN, &adapter->state))
4131 mod_timer(&adapter->watchdog_timer, jiffies + 1);
4134 napi_schedule(&adapter->rx_ring[0].napi);
4140 * igb_intr - Legacy Interrupt Handler
4141 * @irq: interrupt number
4142 * @data: pointer to a network interface device structure
4144 static irqreturn_t igb_intr(int irq, void *data)
4146 struct net_device *netdev = data;
4147 struct igb_adapter *adapter = netdev_priv(netdev);
4148 struct e1000_hw *hw = &adapter->hw;
4149 /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No
4150 * need for the IMC write */
4151 u32 icr = rd32(E1000_ICR);
4153 return IRQ_NONE; /* Not our interrupt */
4155 igb_write_itr(adapter->rx_ring);
4157 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
4158 * not set, then the adapter didn't send an interrupt */
4159 if (!(icr & E1000_ICR_INT_ASSERTED))
4162 if(icr & E1000_ICR_DOUTSYNC) {
4163 /* HW is reporting DMA is out of sync */
4164 adapter->stats.doosync++;
4167 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
4168 hw->mac.get_link_status = 1;
4169 /* guard against interrupt when we're going down */
4170 if (!test_bit(__IGB_DOWN, &adapter->state))
4171 mod_timer(&adapter->watchdog_timer, jiffies + 1);
4174 napi_schedule(&adapter->rx_ring[0].napi);
4179 static inline void igb_rx_irq_enable(struct igb_ring *rx_ring)
4181 struct igb_adapter *adapter = rx_ring->adapter;
4182 struct e1000_hw *hw = &adapter->hw;
4184 if (adapter->itr_setting & 3) {
4185 if (adapter->num_rx_queues == 1)
4186 igb_set_itr(adapter);
4188 igb_update_ring_itr(rx_ring);
4191 if (!test_bit(__IGB_DOWN, &adapter->state)) {
4192 if (adapter->msix_entries)
4193 wr32(E1000_EIMS, rx_ring->eims_value);
4195 igb_irq_enable(adapter);
4200 * igb_poll - NAPI Rx polling callback
4201 * @napi: napi polling structure
4202 * @budget: count of how many packets we should handle
4204 static int igb_poll(struct napi_struct *napi, int budget)
4206 struct igb_ring *rx_ring = container_of(napi, struct igb_ring, napi);
4209 #ifdef CONFIG_IGB_DCA
4210 if (rx_ring->adapter->flags & IGB_FLAG_DCA_ENABLED)
4211 igb_update_rx_dca(rx_ring);
4213 igb_clean_rx_irq_adv(rx_ring, &work_done, budget);
4215 if (rx_ring->buddy) {
4216 #ifdef CONFIG_IGB_DCA
4217 if (rx_ring->adapter->flags & IGB_FLAG_DCA_ENABLED)
4218 igb_update_tx_dca(rx_ring->buddy);
4220 if (!igb_clean_tx_irq(rx_ring->buddy))
4224 /* If not enough Rx work done, exit the polling mode */
4225 if (work_done < budget) {
4226 napi_complete(napi);
4227 igb_rx_irq_enable(rx_ring);
4234 * igb_hwtstamp - utility function which checks for TX time stamp
4235 * @adapter: board private structure
4236 * @skb: packet that was just sent
4238 * If we were asked to do hardware stamping and such a time stamp is
4239 * available, then it must have been for this skb here because we only
4240 * allow only one such packet into the queue.
4242 static void igb_tx_hwtstamp(struct igb_adapter *adapter, struct sk_buff *skb)
4244 union skb_shared_tx *shtx = skb_tx(skb);
4245 struct e1000_hw *hw = &adapter->hw;
4247 if (unlikely(shtx->hardware)) {
4248 u32 valid = rd32(E1000_TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID;
4250 u64 regval = rd32(E1000_TXSTMPL);
4252 struct skb_shared_hwtstamps shhwtstamps;
4254 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
4255 regval |= (u64)rd32(E1000_TXSTMPH) << 32;
4256 ns = timecounter_cyc2time(&adapter->clock,
4258 timecompare_update(&adapter->compare, ns);
4259 shhwtstamps.hwtstamp = ns_to_ktime(ns);
4260 shhwtstamps.syststamp =
4261 timecompare_transform(&adapter->compare, ns);
4262 skb_tstamp_tx(skb, &shhwtstamps);
4268 * igb_clean_tx_irq - Reclaim resources after transmit completes
4269 * @adapter: board private structure
4270 * returns true if ring is completely cleaned
4272 static bool igb_clean_tx_irq(struct igb_ring *tx_ring)
4274 struct igb_adapter *adapter = tx_ring->adapter;
4275 struct net_device *netdev = adapter->netdev;
4276 struct e1000_hw *hw = &adapter->hw;
4277 struct igb_buffer *buffer_info;
4278 struct sk_buff *skb;
4279 union e1000_adv_tx_desc *tx_desc, *eop_desc;
4280 unsigned int total_bytes = 0, total_packets = 0;
4281 unsigned int i, eop, count = 0;
4282 bool cleaned = false;
4284 i = tx_ring->next_to_clean;
4285 eop = tx_ring->buffer_info[i].next_to_watch;
4286 eop_desc = E1000_TX_DESC_ADV(*tx_ring, eop);
4288 while ((eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)) &&
4289 (count < tx_ring->count)) {
4290 for (cleaned = false; !cleaned; count++) {
4291 tx_desc = E1000_TX_DESC_ADV(*tx_ring, i);
4292 buffer_info = &tx_ring->buffer_info[i];
4293 cleaned = (i == eop);
4294 skb = buffer_info->skb;
4297 unsigned int segs, bytecount;
4298 /* gso_segs is currently only valid for tcp */
4299 segs = skb_shinfo(skb)->gso_segs ?: 1;
4300 /* multiply data chunks by size of headers */
4301 bytecount = ((segs - 1) * skb_headlen(skb)) +
4303 total_packets += segs;
4304 total_bytes += bytecount;
4306 igb_tx_hwtstamp(adapter, skb);
4309 igb_unmap_and_free_tx_resource(adapter, buffer_info);
4310 tx_desc->wb.status = 0;
4313 if (i == tx_ring->count)
4316 eop = tx_ring->buffer_info[i].next_to_watch;
4317 eop_desc = E1000_TX_DESC_ADV(*tx_ring, eop);
4320 tx_ring->next_to_clean = i;
4322 if (unlikely(count &&
4323 netif_carrier_ok(netdev) &&
4324 igb_desc_unused(tx_ring) >= IGB_TX_QUEUE_WAKE)) {
4325 /* Make sure that anybody stopping the queue after this
4326 * sees the new next_to_clean.
4329 if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
4330 !(test_bit(__IGB_DOWN, &adapter->state))) {
4331 netif_wake_subqueue(netdev, tx_ring->queue_index);
4332 ++adapter->restart_queue;
4336 if (tx_ring->detect_tx_hung) {
4337 /* Detect a transmit hang in hardware, this serializes the
4338 * check with the clearing of time_stamp and movement of i */
4339 tx_ring->detect_tx_hung = false;
4340 if (tx_ring->buffer_info[i].time_stamp &&
4341 time_after(jiffies, tx_ring->buffer_info[i].time_stamp +
4342 (adapter->tx_timeout_factor * HZ))
4343 && !(rd32(E1000_STATUS) &
4344 E1000_STATUS_TXOFF)) {
4346 /* detected Tx unit hang */
4347 dev_err(&adapter->pdev->dev,
4348 "Detected Tx Unit Hang\n"
4352 " next_to_use <%x>\n"
4353 " next_to_clean <%x>\n"
4354 "buffer_info[next_to_clean]\n"
4355 " time_stamp <%lx>\n"
4356 " next_to_watch <%x>\n"
4358 " desc.status <%x>\n",
4359 tx_ring->queue_index,
4360 readl(adapter->hw.hw_addr + tx_ring->head),
4361 readl(adapter->hw.hw_addr + tx_ring->tail),
4362 tx_ring->next_to_use,
4363 tx_ring->next_to_clean,
4364 tx_ring->buffer_info[i].time_stamp,
4367 eop_desc->wb.status);
4368 netif_stop_subqueue(netdev, tx_ring->queue_index);
4371 tx_ring->total_bytes += total_bytes;
4372 tx_ring->total_packets += total_packets;
4373 tx_ring->tx_stats.bytes += total_bytes;
4374 tx_ring->tx_stats.packets += total_packets;
4375 adapter->net_stats.tx_bytes += total_bytes;
4376 adapter->net_stats.tx_packets += total_packets;
4377 return (count < tx_ring->count);
4381 * igb_receive_skb - helper function to handle rx indications
4382 * @ring: pointer to receive ring receving this packet
4383 * @status: descriptor status field as written by hardware
4384 * @rx_desc: receive descriptor containing vlan and type information.
4385 * @skb: pointer to sk_buff to be indicated to stack
4387 static void igb_receive_skb(struct igb_ring *ring, u8 status,
4388 union e1000_adv_rx_desc * rx_desc,
4389 struct sk_buff *skb)
4391 struct igb_adapter * adapter = ring->adapter;
4392 bool vlan_extracted = (adapter->vlgrp && (status & E1000_RXD_STAT_VP));
4394 skb_record_rx_queue(skb, ring->queue_index);
4395 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
4397 vlan_gro_receive(&ring->napi, adapter->vlgrp,
4398 le16_to_cpu(rx_desc->wb.upper.vlan),
4401 napi_gro_receive(&ring->napi, skb);
4404 vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
4405 le16_to_cpu(rx_desc->wb.upper.vlan));
4407 netif_receive_skb(skb);
4411 static inline void igb_rx_checksum_adv(struct igb_adapter *adapter,
4412 u32 status_err, struct sk_buff *skb)
4414 skb->ip_summed = CHECKSUM_NONE;
4416 /* Ignore Checksum bit is set or checksum is disabled through ethtool */
4417 if ((status_err & E1000_RXD_STAT_IXSM) || !adapter->rx_csum)
4419 /* TCP/UDP checksum error bit is set */
4421 (E1000_RXDEXT_STATERR_TCPE | E1000_RXDEXT_STATERR_IPE)) {
4422 /* let the stack verify checksum errors */
4423 adapter->hw_csum_err++;
4426 /* It must be a TCP or UDP packet with a valid checksum */
4427 if (status_err & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))
4428 skb->ip_summed = CHECKSUM_UNNECESSARY;
4430 adapter->hw_csum_good++;
4433 static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring,
4434 int *work_done, int budget)
4436 struct igb_adapter *adapter = rx_ring->adapter;
4437 struct net_device *netdev = adapter->netdev;
4438 struct e1000_hw *hw = &adapter->hw;
4439 struct pci_dev *pdev = adapter->pdev;
4440 union e1000_adv_rx_desc *rx_desc , *next_rxd;
4441 struct igb_buffer *buffer_info , *next_buffer;
4442 struct sk_buff *skb;
4443 bool cleaned = false;
4444 int cleaned_count = 0;
4445 unsigned int total_bytes = 0, total_packets = 0;
4447 u32 length, hlen, staterr;
4449 i = rx_ring->next_to_clean;
4450 buffer_info = &rx_ring->buffer_info[i];
4451 rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
4452 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
4454 while (staterr & E1000_RXD_STAT_DD) {
4455 if (*work_done >= budget)
4459 skb = buffer_info->skb;
4460 prefetch(skb->data - NET_IP_ALIGN);
4461 buffer_info->skb = NULL;
4464 if (i == rx_ring->count)
4466 next_rxd = E1000_RX_DESC_ADV(*rx_ring, i);
4468 next_buffer = &rx_ring->buffer_info[i];
4470 length = le16_to_cpu(rx_desc->wb.upper.length);
4474 if (!adapter->rx_ps_hdr_size) {
4475 pci_unmap_single(pdev, buffer_info->dma,
4476 adapter->rx_buffer_len +
4478 PCI_DMA_FROMDEVICE);
4479 skb_put(skb, length);
4483 /* HW will not DMA in data larger than the given buffer, even
4484 * if it parses the (NFS, of course) header to be larger. In
4485 * that case, it fills the header buffer and spills the rest
4488 hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hdr_info) &
4489 E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT;
4490 if (hlen > adapter->rx_ps_hdr_size)
4491 hlen = adapter->rx_ps_hdr_size;
4493 if (!skb_shinfo(skb)->nr_frags) {
4494 pci_unmap_single(pdev, buffer_info->dma,
4495 adapter->rx_ps_hdr_size + NET_IP_ALIGN,
4496 PCI_DMA_FROMDEVICE);
4501 pci_unmap_page(pdev, buffer_info->page_dma,
4502 PAGE_SIZE / 2, PCI_DMA_FROMDEVICE);
4503 buffer_info->page_dma = 0;
4505 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags++,
4507 buffer_info->page_offset,
4510 if ((adapter->rx_buffer_len > (PAGE_SIZE / 2)) ||
4511 (page_count(buffer_info->page) != 1))
4512 buffer_info->page = NULL;
4514 get_page(buffer_info->page);
4517 skb->data_len += length;
4519 skb->truesize += length;
4522 if (!(staterr & E1000_RXD_STAT_EOP)) {
4523 buffer_info->skb = next_buffer->skb;
4524 buffer_info->dma = next_buffer->dma;
4525 next_buffer->skb = skb;
4526 next_buffer->dma = 0;
4531 * If this bit is set, then the RX registers contain
4532 * the time stamp. No other packet will be time
4533 * stamped until we read these registers, so read the
4534 * registers to make them available again. Because
4535 * only one packet can be time stamped at a time, we
4536 * know that the register values must belong to this
4537 * one here and therefore we don't need to compare
4538 * any of the additional attributes stored for it.
4540 * If nothing went wrong, then it should have a
4541 * skb_shared_tx that we can turn into a
4542 * skb_shared_hwtstamps.
4544 * TODO: can time stamping be triggered (thus locking
4545 * the registers) without the packet reaching this point
4546 * here? In that case RX time stamping would get stuck.
4548 * TODO: in "time stamp all packets" mode this bit is
4549 * not set. Need a global flag for this mode and then
4550 * always read the registers. Cannot be done without
4553 if (unlikely(staterr & E1000_RXD_STAT_TS)) {
4556 struct skb_shared_hwtstamps *shhwtstamps =
4559 WARN(!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID),
4560 "igb: no RX time stamp available for time stamped packet");
4561 regval = rd32(E1000_RXSTMPL);
4562 regval |= (u64)rd32(E1000_RXSTMPH) << 32;
4563 ns = timecounter_cyc2time(&adapter->clock, regval);
4564 timecompare_update(&adapter->compare, ns);
4565 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
4566 shhwtstamps->hwtstamp = ns_to_ktime(ns);
4567 shhwtstamps->syststamp =
4568 timecompare_transform(&adapter->compare, ns);
4571 if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) {
4572 dev_kfree_skb_irq(skb);
4576 total_bytes += skb->len;
4579 igb_rx_checksum_adv(adapter, staterr, skb);
4581 skb->protocol = eth_type_trans(skb, netdev);
4583 igb_receive_skb(rx_ring, staterr, rx_desc, skb);
4586 rx_desc->wb.upper.status_error = 0;
4588 /* return some buffers to hardware, one at a time is too slow */
4589 if (cleaned_count >= IGB_RX_BUFFER_WRITE) {
4590 igb_alloc_rx_buffers_adv(rx_ring, cleaned_count);
4594 /* use prefetched values */
4596 buffer_info = next_buffer;
4597 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
4600 rx_ring->next_to_clean = i;
4601 cleaned_count = igb_desc_unused(rx_ring);
4604 igb_alloc_rx_buffers_adv(rx_ring, cleaned_count);
4606 rx_ring->total_packets += total_packets;
4607 rx_ring->total_bytes += total_bytes;
4608 rx_ring->rx_stats.packets += total_packets;
4609 rx_ring->rx_stats.bytes += total_bytes;
4610 adapter->net_stats.rx_bytes += total_bytes;
4611 adapter->net_stats.rx_packets += total_packets;
4616 * igb_alloc_rx_buffers_adv - Replace used receive buffers; packet split
4617 * @adapter: address of board private structure
4619 static void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring,
4622 struct igb_adapter *adapter = rx_ring->adapter;
4623 struct net_device *netdev = adapter->netdev;
4624 struct pci_dev *pdev = adapter->pdev;
4625 union e1000_adv_rx_desc *rx_desc;
4626 struct igb_buffer *buffer_info;
4627 struct sk_buff *skb;
4631 i = rx_ring->next_to_use;
4632 buffer_info = &rx_ring->buffer_info[i];
4634 if (adapter->rx_ps_hdr_size)
4635 bufsz = adapter->rx_ps_hdr_size;
4637 bufsz = adapter->rx_buffer_len;
4638 bufsz += NET_IP_ALIGN;
4640 while (cleaned_count--) {
4641 rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
4643 if (adapter->rx_ps_hdr_size && !buffer_info->page_dma) {
4644 if (!buffer_info->page) {
4645 buffer_info->page = alloc_page(GFP_ATOMIC);
4646 if (!buffer_info->page) {
4647 adapter->alloc_rx_buff_failed++;
4650 buffer_info->page_offset = 0;
4652 buffer_info->page_offset ^= PAGE_SIZE / 2;
4654 buffer_info->page_dma =
4655 pci_map_page(pdev, buffer_info->page,
4656 buffer_info->page_offset,
4658 PCI_DMA_FROMDEVICE);
4661 if (!buffer_info->skb) {
4662 skb = netdev_alloc_skb(netdev, bufsz);
4664 adapter->alloc_rx_buff_failed++;
4668 /* Make buffer alignment 2 beyond a 16 byte boundary
4669 * this will result in a 16 byte aligned IP header after
4670 * the 14 byte MAC header is removed
4672 skb_reserve(skb, NET_IP_ALIGN);
4674 buffer_info->skb = skb;
4675 buffer_info->dma = pci_map_single(pdev, skb->data,
4677 PCI_DMA_FROMDEVICE);
4679 /* Refresh the desc even if buffer_addrs didn't change because
4680 * each write-back erases this info. */
4681 if (adapter->rx_ps_hdr_size) {
4682 rx_desc->read.pkt_addr =
4683 cpu_to_le64(buffer_info->page_dma);
4684 rx_desc->read.hdr_addr = cpu_to_le64(buffer_info->dma);
4686 rx_desc->read.pkt_addr =
4687 cpu_to_le64(buffer_info->dma);
4688 rx_desc->read.hdr_addr = 0;
4692 if (i == rx_ring->count)
4694 buffer_info = &rx_ring->buffer_info[i];
4698 if (rx_ring->next_to_use != i) {
4699 rx_ring->next_to_use = i;
4701 i = (rx_ring->count - 1);
4705 /* Force memory writes to complete before letting h/w
4706 * know there are new descriptors to fetch. (Only
4707 * applicable for weak-ordered memory model archs,
4708 * such as IA-64). */
4710 writel(i, adapter->hw.hw_addr + rx_ring->tail);
4720 static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
4722 struct igb_adapter *adapter = netdev_priv(netdev);
4723 struct mii_ioctl_data *data = if_mii(ifr);
4725 if (adapter->hw.phy.media_type != e1000_media_type_copper)
4730 data->phy_id = adapter->hw.phy.addr;
4733 if (!capable(CAP_NET_ADMIN))
4735 if (igb_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
4747 * igb_hwtstamp_ioctl - control hardware time stamping
4752 * Outgoing time stamping can be enabled and disabled. Play nice and
4753 * disable it when requested, although it shouldn't case any overhead
4754 * when no packet needs it. At most one packet in the queue may be
4755 * marked for time stamping, otherwise it would be impossible to tell
4756 * for sure to which packet the hardware time stamp belongs.
4758 * Incoming time stamping has to be configured via the hardware
4759 * filters. Not all combinations are supported, in particular event
4760 * type has to be specified. Matching the kind of event packet is
4761 * not supported, with the exception of "all V2 events regardless of
4765 static int igb_hwtstamp_ioctl(struct net_device *netdev,
4766 struct ifreq *ifr, int cmd)
4768 struct igb_adapter *adapter = netdev_priv(netdev);
4769 struct e1000_hw *hw = &adapter->hw;
4770 struct hwtstamp_config config;
4771 u32 tsync_tx_ctl_bit = E1000_TSYNCTXCTL_ENABLED;
4772 u32 tsync_rx_ctl_bit = E1000_TSYNCRXCTL_ENABLED;
4773 u32 tsync_rx_ctl_type = 0;
4774 u32 tsync_rx_cfg = 0;
4777 short port = 319; /* PTP */
4780 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
4783 /* reserved for future extensions */
4787 switch (config.tx_type) {
4788 case HWTSTAMP_TX_OFF:
4789 tsync_tx_ctl_bit = 0;
4791 case HWTSTAMP_TX_ON:
4792 tsync_tx_ctl_bit = E1000_TSYNCTXCTL_ENABLED;
4798 switch (config.rx_filter) {
4799 case HWTSTAMP_FILTER_NONE:
4800 tsync_rx_ctl_bit = 0;
4802 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
4803 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
4804 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
4805 case HWTSTAMP_FILTER_ALL:
4807 * register TSYNCRXCFG must be set, therefore it is not
4808 * possible to time stamp both Sync and Delay_Req messages
4809 * => fall back to time stamping all packets
4811 tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_ALL;
4812 config.rx_filter = HWTSTAMP_FILTER_ALL;
4814 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
4815 tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_L4_V1;
4816 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE;
4819 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
4820 tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_L4_V1;
4821 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE;
4824 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
4825 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
4826 tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
4827 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE;
4830 config.rx_filter = HWTSTAMP_FILTER_SOME;
4832 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
4833 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
4834 tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
4835 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE;
4838 config.rx_filter = HWTSTAMP_FILTER_SOME;
4840 case HWTSTAMP_FILTER_PTP_V2_EVENT:
4841 case HWTSTAMP_FILTER_PTP_V2_SYNC:
4842 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
4843 tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_EVENT_V2;
4844 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
4851 /* enable/disable TX */
4852 regval = rd32(E1000_TSYNCTXCTL);
4853 regval = (regval & ~E1000_TSYNCTXCTL_ENABLED) | tsync_tx_ctl_bit;
4854 wr32(E1000_TSYNCTXCTL, regval);
4856 /* enable/disable RX, define which PTP packets are time stamped */
4857 regval = rd32(E1000_TSYNCRXCTL);
4858 regval = (regval & ~E1000_TSYNCRXCTL_ENABLED) | tsync_rx_ctl_bit;
4859 regval = (regval & ~0xE) | tsync_rx_ctl_type;
4860 wr32(E1000_TSYNCRXCTL, regval);
4861 wr32(E1000_TSYNCRXCFG, tsync_rx_cfg);
4864 * Ethertype Filter Queue Filter[0][15:0] = 0x88F7
4865 * (Ethertype to filter on)
4866 * Ethertype Filter Queue Filter[0][26] = 0x1 (Enable filter)
4867 * Ethertype Filter Queue Filter[0][30] = 0x1 (Enable Timestamping)
4869 wr32(E1000_ETQF0, is_l2 ? 0x440088f7 : 0);
4871 /* L4 Queue Filter[0]: only filter by source and destination port */
4872 wr32(E1000_SPQF0, htons(port));
4873 wr32(E1000_IMIREXT(0), is_l4 ?
4874 ((1<<12) | (1<<19) /* bypass size and control flags */) : 0);
4875 wr32(E1000_IMIR(0), is_l4 ?
4877 | (0<<16) /* immediate interrupt disabled */
4878 | 0 /* (1<<17) bit cleared: do not bypass
4879 destination port check */)
4881 wr32(E1000_FTQF0, is_l4 ?
4883 | (1<<15) /* VF not compared */
4884 | (1<<27) /* Enable Timestamping */
4885 | (7<<28) /* only source port filter enabled,
4886 source/target address and protocol
4888 : ((1<<15) | (15<<28) /* all mask bits set = filter not
4893 adapter->hwtstamp_config = config;
4895 /* clear TX/RX time stamp registers, just to be sure */
4896 regval = rd32(E1000_TXSTMPH);
4897 regval = rd32(E1000_RXSTMPH);
4899 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
4909 static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
4915 return igb_mii_ioctl(netdev, ifr, cmd);
4917 return igb_hwtstamp_ioctl(netdev, ifr, cmd);
4923 static void igb_vlan_rx_register(struct net_device *netdev,
4924 struct vlan_group *grp)
4926 struct igb_adapter *adapter = netdev_priv(netdev);
4927 struct e1000_hw *hw = &adapter->hw;
4930 igb_irq_disable(adapter);
4931 adapter->vlgrp = grp;
4934 /* enable VLAN tag insert/strip */
4935 ctrl = rd32(E1000_CTRL);
4936 ctrl |= E1000_CTRL_VME;
4937 wr32(E1000_CTRL, ctrl);
4939 /* enable VLAN receive filtering */
4940 rctl = rd32(E1000_RCTL);
4941 rctl &= ~E1000_RCTL_CFIEN;
4942 wr32(E1000_RCTL, rctl);
4943 igb_update_mng_vlan(adapter);
4945 /* disable VLAN tag insert/strip */
4946 ctrl = rd32(E1000_CTRL);
4947 ctrl &= ~E1000_CTRL_VME;
4948 wr32(E1000_CTRL, ctrl);
4950 if (adapter->mng_vlan_id != (u16)IGB_MNG_VLAN_NONE) {
4951 igb_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
4952 adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
4956 igb_rlpml_set(adapter);
4958 if (!test_bit(__IGB_DOWN, &adapter->state))
4959 igb_irq_enable(adapter);
4962 static void igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
4964 struct igb_adapter *adapter = netdev_priv(netdev);
4965 struct e1000_hw *hw = &adapter->hw;
4966 int pf_id = adapter->vfs_allocated_count;
4968 if ((hw->mng_cookie.status &
4969 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
4970 (vid == adapter->mng_vlan_id))
4973 /* add vid to vlvf if sr-iov is enabled,
4974 * if that fails add directly to filter table */
4975 if (igb_vlvf_set(adapter, vid, true, pf_id))
4976 igb_vfta_set(hw, vid, true);
4980 static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
4982 struct igb_adapter *adapter = netdev_priv(netdev);
4983 struct e1000_hw *hw = &adapter->hw;
4984 int pf_id = adapter->vfs_allocated_count;
4986 igb_irq_disable(adapter);
4987 vlan_group_set_device(adapter->vlgrp, vid, NULL);
4989 if (!test_bit(__IGB_DOWN, &adapter->state))
4990 igb_irq_enable(adapter);
4992 if ((adapter->hw.mng_cookie.status &
4993 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
4994 (vid == adapter->mng_vlan_id)) {
4995 /* release control to f/w */
4996 igb_release_hw_control(adapter);
5000 /* remove vid from vlvf if sr-iov is enabled,
5001 * if not in vlvf remove from vfta */
5002 if (igb_vlvf_set(adapter, vid, false, pf_id))
5003 igb_vfta_set(hw, vid, false);
5006 static void igb_restore_vlan(struct igb_adapter *adapter)
5008 igb_vlan_rx_register(adapter->netdev, adapter->vlgrp);
5010 if (adapter->vlgrp) {
5012 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
5013 if (!vlan_group_get_device(adapter->vlgrp, vid))
5015 igb_vlan_rx_add_vid(adapter->netdev, vid);
5020 int igb_set_spd_dplx(struct igb_adapter *adapter, u16 spddplx)
5022 struct e1000_mac_info *mac = &adapter->hw.mac;
5026 /* Fiber NICs only allow 1000 gbps Full duplex */
5027 if ((adapter->hw.phy.media_type == e1000_media_type_fiber) &&
5028 spddplx != (SPEED_1000 + DUPLEX_FULL)) {
5029 dev_err(&adapter->pdev->dev,
5030 "Unsupported Speed/Duplex configuration\n");
5035 case SPEED_10 + DUPLEX_HALF:
5036 mac->forced_speed_duplex = ADVERTISE_10_HALF;
5038 case SPEED_10 + DUPLEX_FULL:
5039 mac->forced_speed_duplex = ADVERTISE_10_FULL;
5041 case SPEED_100 + DUPLEX_HALF:
5042 mac->forced_speed_duplex = ADVERTISE_100_HALF;
5044 case SPEED_100 + DUPLEX_FULL:
5045 mac->forced_speed_duplex = ADVERTISE_100_FULL;
5047 case SPEED_1000 + DUPLEX_FULL:
5049 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
5051 case SPEED_1000 + DUPLEX_HALF: /* not supported */
5053 dev_err(&adapter->pdev->dev,
5054 "Unsupported Speed/Duplex configuration\n");
5060 static int igb_suspend(struct pci_dev *pdev, pm_message_t state)
5062 struct net_device *netdev = pci_get_drvdata(pdev);
5063 struct igb_adapter *adapter = netdev_priv(netdev);
5064 struct e1000_hw *hw = &adapter->hw;
5065 u32 ctrl, rctl, status;
5066 u32 wufc = adapter->wol;
5071 netif_device_detach(netdev);
5073 if (netif_running(netdev))
5076 igb_reset_interrupt_capability(adapter);
5078 igb_free_queues(adapter);
5081 retval = pci_save_state(pdev);
5086 status = rd32(E1000_STATUS);
5087 if (status & E1000_STATUS_LU)
5088 wufc &= ~E1000_WUFC_LNKC;
5091 igb_setup_rctl(adapter);
5092 igb_set_multi(netdev);
5094 /* turn on all-multi mode if wake on multicast is enabled */
5095 if (wufc & E1000_WUFC_MC) {
5096 rctl = rd32(E1000_RCTL);
5097 rctl |= E1000_RCTL_MPE;
5098 wr32(E1000_RCTL, rctl);
5101 ctrl = rd32(E1000_CTRL);
5102 /* advertise wake from D3Cold */
5103 #define E1000_CTRL_ADVD3WUC 0x00100000
5104 /* phy power management enable */
5105 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
5106 ctrl |= E1000_CTRL_ADVD3WUC;
5107 wr32(E1000_CTRL, ctrl);
5109 /* Allow time for pending master requests to run */
5110 igb_disable_pcie_master(&adapter->hw);
5112 wr32(E1000_WUC, E1000_WUC_PME_EN);
5113 wr32(E1000_WUFC, wufc);
5116 wr32(E1000_WUFC, 0);
5119 /* make sure adapter isn't asleep if manageability/wol is enabled */
5120 if (wufc || adapter->en_mng_pt) {
5121 pci_enable_wake(pdev, PCI_D3hot, 1);
5122 pci_enable_wake(pdev, PCI_D3cold, 1);
5124 igb_shutdown_fiber_serdes_link_82575(hw);
5125 pci_enable_wake(pdev, PCI_D3hot, 0);
5126 pci_enable_wake(pdev, PCI_D3cold, 0);
5129 /* Release control of h/w to f/w. If f/w is AMT enabled, this
5130 * would have already happened in close and is redundant. */
5131 igb_release_hw_control(adapter);
5133 pci_disable_device(pdev);
5135 pci_set_power_state(pdev, pci_choose_state(pdev, state));
5141 static int igb_resume(struct pci_dev *pdev)
5143 struct net_device *netdev = pci_get_drvdata(pdev);
5144 struct igb_adapter *adapter = netdev_priv(netdev);
5145 struct e1000_hw *hw = &adapter->hw;
5148 pci_set_power_state(pdev, PCI_D0);
5149 pci_restore_state(pdev);
5151 err = pci_enable_device_mem(pdev);
5154 "igb: Cannot enable PCI device from suspend\n");
5157 pci_set_master(pdev);
5159 pci_enable_wake(pdev, PCI_D3hot, 0);
5160 pci_enable_wake(pdev, PCI_D3cold, 0);
5162 igb_set_interrupt_capability(adapter);
5164 if (igb_alloc_queues(adapter)) {
5165 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
5169 /* e1000_power_up_phy(adapter); */
5173 /* let the f/w know that the h/w is now under the control of the
5175 igb_get_hw_control(adapter);
5177 wr32(E1000_WUS, ~0);
5179 if (netif_running(netdev)) {
5180 err = igb_open(netdev);
5185 netif_device_attach(netdev);
5191 static void igb_shutdown(struct pci_dev *pdev)
5193 igb_suspend(pdev, PMSG_SUSPEND);
5196 #ifdef CONFIG_NET_POLL_CONTROLLER
5198 * Polling 'interrupt' - used by things like netconsole to send skbs
5199 * without having to re-enable interrupts. It's not called while
5200 * the interrupt routine is executing.
5202 static void igb_netpoll(struct net_device *netdev)
5204 struct igb_adapter *adapter = netdev_priv(netdev);
5205 struct e1000_hw *hw = &adapter->hw;
5208 if (!adapter->msix_entries) {
5209 igb_irq_disable(adapter);
5210 napi_schedule(&adapter->rx_ring[0].napi);
5214 for (i = 0; i < adapter->num_tx_queues; i++) {
5215 struct igb_ring *tx_ring = &adapter->tx_ring[i];
5216 wr32(E1000_EIMC, tx_ring->eims_value);
5217 igb_clean_tx_irq(tx_ring);
5218 wr32(E1000_EIMS, tx_ring->eims_value);
5221 for (i = 0; i < adapter->num_rx_queues; i++) {
5222 struct igb_ring *rx_ring = &adapter->rx_ring[i];
5223 wr32(E1000_EIMC, rx_ring->eims_value);
5224 napi_schedule(&rx_ring->napi);
5227 #endif /* CONFIG_NET_POLL_CONTROLLER */
5230 * igb_io_error_detected - called when PCI error is detected
5231 * @pdev: Pointer to PCI device
5232 * @state: The current pci connection state
5234 * This function is called after a PCI bus error affecting
5235 * this device has been detected.
5237 static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
5238 pci_channel_state_t state)
5240 struct net_device *netdev = pci_get_drvdata(pdev);
5241 struct igb_adapter *adapter = netdev_priv(netdev);
5243 netif_device_detach(netdev);
5245 if (netif_running(netdev))
5247 pci_disable_device(pdev);
5249 /* Request a slot slot reset. */
5250 return PCI_ERS_RESULT_NEED_RESET;
5254 * igb_io_slot_reset - called after the pci bus has been reset.
5255 * @pdev: Pointer to PCI device
5257 * Restart the card from scratch, as if from a cold-boot. Implementation
5258 * resembles the first-half of the igb_resume routine.
5260 static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
5262 struct net_device *netdev = pci_get_drvdata(pdev);
5263 struct igb_adapter *adapter = netdev_priv(netdev);
5264 struct e1000_hw *hw = &adapter->hw;
5265 pci_ers_result_t result;
5268 if (pci_enable_device_mem(pdev)) {
5270 "Cannot re-enable PCI device after reset.\n");
5271 result = PCI_ERS_RESULT_DISCONNECT;
5273 pci_set_master(pdev);
5274 pci_restore_state(pdev);
5276 pci_enable_wake(pdev, PCI_D3hot, 0);
5277 pci_enable_wake(pdev, PCI_D3cold, 0);
5280 wr32(E1000_WUS, ~0);
5281 result = PCI_ERS_RESULT_RECOVERED;
5284 err = pci_cleanup_aer_uncorrect_error_status(pdev);
5286 dev_err(&pdev->dev, "pci_cleanup_aer_uncorrect_error_status "
5287 "failed 0x%0x\n", err);
5288 /* non-fatal, continue */
5295 * igb_io_resume - called when traffic can start flowing again.
5296 * @pdev: Pointer to PCI device
5298 * This callback is called when the error recovery driver tells us that
5299 * its OK to resume normal operation. Implementation resembles the
5300 * second-half of the igb_resume routine.
5302 static void igb_io_resume(struct pci_dev *pdev)
5304 struct net_device *netdev = pci_get_drvdata(pdev);
5305 struct igb_adapter *adapter = netdev_priv(netdev);
5307 if (netif_running(netdev)) {
5308 if (igb_up(adapter)) {
5309 dev_err(&pdev->dev, "igb_up failed after reset\n");
5314 netif_device_attach(netdev);
5316 /* let the f/w know that the h/w is now under the control of the
5318 igb_get_hw_control(adapter);
5321 static inline void igb_set_vmolr(struct e1000_hw *hw, int vfn)
5325 reg_data = rd32(E1000_VMOLR(vfn));
5326 reg_data |= E1000_VMOLR_BAM | /* Accept broadcast */
5327 E1000_VMOLR_ROPE | /* Accept packets matched in UTA */
5328 E1000_VMOLR_ROMPE | /* Accept packets matched in MTA */
5329 E1000_VMOLR_AUPE | /* Accept untagged packets */
5330 E1000_VMOLR_STRVLAN; /* Strip vlan tags */
5331 wr32(E1000_VMOLR(vfn), reg_data);
5334 static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
5337 struct e1000_hw *hw = &adapter->hw;
5340 vmolr = rd32(E1000_VMOLR(vfn));
5341 vmolr &= ~E1000_VMOLR_RLPML_MASK;
5342 vmolr |= size | E1000_VMOLR_LPE;
5343 wr32(E1000_VMOLR(vfn), vmolr);
5348 static inline void igb_set_rah_pool(struct e1000_hw *hw, int pool, int entry)
5352 reg_data = rd32(E1000_RAH(entry));
5353 reg_data &= ~E1000_RAH_POOL_MASK;
5354 reg_data |= E1000_RAH_POOL_1 << pool;;
5355 wr32(E1000_RAH(entry), reg_data);
5358 static void igb_set_mc_list_pools(struct igb_adapter *adapter,
5359 int entry_count, u16 total_rar_filters)
5361 struct e1000_hw *hw = &adapter->hw;
5362 int i = adapter->vfs_allocated_count + 1;
5364 if ((i + entry_count) < total_rar_filters)
5365 total_rar_filters = i + entry_count;
5367 for (; i < total_rar_filters; i++)
5368 igb_set_rah_pool(hw, adapter->vfs_allocated_count, i);
5371 static int igb_set_vf_mac(struct igb_adapter *adapter,
5372 int vf, unsigned char *mac_addr)
5374 struct e1000_hw *hw = &adapter->hw;
5375 int rar_entry = vf + 1; /* VF MAC addresses start at entry 1 */
5377 igb_rar_set(hw, mac_addr, rar_entry);
5379 memcpy(adapter->vf_data[vf].vf_mac_addresses, mac_addr, ETH_ALEN);
5381 igb_set_rah_pool(hw, vf, rar_entry);
5386 static void igb_vmm_control(struct igb_adapter *adapter)
5388 struct e1000_hw *hw = &adapter->hw;
5391 if (!adapter->vfs_allocated_count)
5394 /* VF's need PF reset indication before they
5395 * can send/receive mail */
5396 reg_data = rd32(E1000_CTRL_EXT);
5397 reg_data |= E1000_CTRL_EXT_PFRSTD;
5398 wr32(E1000_CTRL_EXT, reg_data);
5400 igb_vmdq_set_loopback_pf(hw, true);
5401 igb_vmdq_set_replication_pf(hw, true);
5404 #ifdef CONFIG_PCI_IOV
5405 static ssize_t igb_show_num_vfs(struct device *dev,
5406 struct device_attribute *attr, char *buf)
5408 struct igb_adapter *adapter = netdev_priv(to_net_dev(dev));
5410 return sprintf(buf, "%d\n", adapter->vfs_allocated_count);
5413 static ssize_t igb_set_num_vfs(struct device *dev,
5414 struct device_attribute *attr,
5415 const char *buf, size_t count)
5417 struct net_device *netdev = to_net_dev(dev);
5418 struct igb_adapter *adapter = netdev_priv(netdev);
5419 struct e1000_hw *hw = &adapter->hw;
5420 struct pci_dev *pdev = adapter->pdev;
5421 unsigned int num_vfs, i;
5422 unsigned char mac_addr[ETH_ALEN];
5425 sscanf(buf, "%u", &num_vfs);
5430 /* value unchanged do nothing */
5431 if (num_vfs == adapter->vfs_allocated_count)
5434 if (netdev->flags & IFF_UP)
5437 igb_reset_interrupt_capability(adapter);
5438 igb_free_queues(adapter);
5439 adapter->tx_ring = NULL;
5440 adapter->rx_ring = NULL;
5441 adapter->vfs_allocated_count = 0;
5443 /* reclaim resources allocated to VFs since we are changing count */
5444 if (adapter->vf_data) {
5445 /* disable iov and allow time for transactions to clear */
5446 pci_disable_sriov(pdev);
5449 kfree(adapter->vf_data);
5450 adapter->vf_data = NULL;
5451 wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
5453 dev_info(&pdev->dev, "IOV Disabled\n");
5457 adapter->vf_data = kcalloc(num_vfs,
5458 sizeof(struct vf_data_storage),
5460 if (!adapter->vf_data) {
5461 dev_err(&pdev->dev, "Could not allocate VF private "
5462 "data - IOV enable failed\n");
5464 err = pci_enable_sriov(pdev, num_vfs);
5466 adapter->vfs_allocated_count = num_vfs;
5467 dev_info(&pdev->dev, "%d vfs allocated\n", num_vfs);
5468 for (i = 0; i < adapter->vfs_allocated_count; i++) {
5469 random_ether_addr(mac_addr);
5470 igb_set_vf_mac(adapter, i, mac_addr);
5473 kfree(adapter->vf_data);
5474 adapter->vf_data = NULL;
5479 igb_set_interrupt_capability(adapter);
5480 igb_alloc_queues(adapter);
5483 if (netdev->flags & IFF_UP)
5488 #endif /* CONFIG_PCI_IOV */