1 /************************************************************************
2 * s2io.c: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
3 * Copyright(c) 2002-2007 Neterion Inc.
5 * This software may be used and distributed according to the terms of
6 * the GNU General Public License (GPL), incorporated herein by reference.
7 * Drivers based on or derived from this code fall under the GPL and must
8 * retain the authorship, copyright and license notice. This file is not
9 * a complete program and may only be used when the entire operating
10 * system is licensed under the GPL.
11 * See the file COPYING in this distribution for more information.
14 * Jeff Garzik : For pointing out the improper error condition
15 * check in the s2io_xmit routine and also some
16 * issues in the Tx watch dog function. Also for
17 * patiently answering all those innumerable
18 * questions regaring the 2.6 porting issues.
19 * Stephen Hemminger : Providing proper 2.6 porting mechanism for some
20 * macros available only in 2.6 Kernel.
21 * Francois Romieu : For pointing out all code part that were
22 * deprecated and also styling related comments.
23 * Grant Grundler : For helping me get rid of some Architecture
25 * Christopher Hellwig : Some more 2.6 specific issues in the driver.
27 * The module loadable parameters that are supported by the driver and a brief
28 * explaination of all the variables.
30 * rx_ring_num : This can be used to program the number of receive rings used
32 * rx_ring_sz: This defines the number of receive blocks each ring can have.
33 * This is also an array of size 8.
34 * rx_ring_mode: This defines the operation mode of all 8 rings. The valid
36 * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
37 * tx_fifo_len: This too is an array of 8. Each element defines the number of
38 * Tx descriptors that can be associated with each corresponding FIFO.
39 * intr_type: This defines the type of interrupt. The values can be 0(INTA),
40 * 2(MSI_X). Default value is '2(MSI_X)'
41 * lro_enable: Specifies whether to enable Large Receive Offload (LRO) or not.
42 * Possible values '1' for enable '0' for disable. Default is '0'
43 * lro_max_pkts: This parameter defines maximum number of packets can be
44 * aggregated as a single large packet
45 * napi: This parameter used to enable/disable NAPI (polling Rx)
46 * Possible values '1' for enable and '0' for disable. Default is '1'
47 * ufo: This parameter used to enable/disable UDP Fragmentation Offload(UFO)
48 * Possible values '1' for enable and '0' for disable. Default is '0'
49 * vlan_tag_strip: This can be used to enable or disable vlan stripping.
50 * Possible values '1' for enable , '0' for disable.
51 * Default is '2' - which means disable in promisc mode
52 * and enable in non-promiscuous mode.
53 * multiq: This parameter used to enable/disable MULTIQUEUE support.
54 * Possible values '1' for enable and '0' for disable. Default is '0'
55 ************************************************************************/
57 #include <linux/module.h>
58 #include <linux/types.h>
59 #include <linux/errno.h>
60 #include <linux/ioport.h>
61 #include <linux/pci.h>
62 #include <linux/dma-mapping.h>
63 #include <linux/kernel.h>
64 #include <linux/netdevice.h>
65 #include <linux/etherdevice.h>
66 #include <linux/skbuff.h>
67 #include <linux/init.h>
68 #include <linux/delay.h>
69 #include <linux/stddef.h>
70 #include <linux/ioctl.h>
71 #include <linux/timex.h>
72 #include <linux/ethtool.h>
73 #include <linux/workqueue.h>
74 #include <linux/if_vlan.h>
76 #include <linux/tcp.h>
79 #include <asm/system.h>
80 #include <asm/uaccess.h>
82 #include <asm/div64.h>
87 #include "s2io-regs.h"
89 #define DRV_VERSION "2.0.26.24"
91 /* S2io Driver name & version. */
92 static char s2io_driver_name[] = "Neterion";
93 static char s2io_driver_version[] = DRV_VERSION;
95 static int rxd_size[2] = {32,48};
96 static int rxd_count[2] = {127,85};
98 static inline int RXD_IS_UP2DT(struct RxD_t *rxdp)
102 ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
103 (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
109 * Cards with following subsystem_id have a link state indication
110 * problem, 600B, 600C, 600D, 640B, 640C and 640D.
111 * macro below identifies these cards given the subsystem_id.
113 #define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid) \
114 (dev_type == XFRAME_I_DEVICE) ? \
115 ((((subid >= 0x600B) && (subid <= 0x600D)) || \
116 ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
118 #define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
119 ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
121 static inline int is_s2io_card_up(const struct s2io_nic * sp)
123 return test_bit(__S2IO_STATE_CARD_UP, &sp->state);
126 /* Ethtool related variables and Macros. */
127 static char s2io_gstrings[][ETH_GSTRING_LEN] = {
128 "Register test\t(offline)",
129 "Eeprom test\t(offline)",
130 "Link test\t(online)",
131 "RLDRAM test\t(offline)",
132 "BIST Test\t(offline)"
135 static char ethtool_xena_stats_keys[][ETH_GSTRING_LEN] = {
137 {"tmac_data_octets"},
141 {"tmac_pause_ctrl_frms"},
145 {"tmac_any_err_frms"},
146 {"tmac_ttl_less_fb_octets"},
147 {"tmac_vld_ip_octets"},
155 {"rmac_data_octets"},
156 {"rmac_fcs_err_frms"},
158 {"rmac_vld_mcst_frms"},
159 {"rmac_vld_bcst_frms"},
160 {"rmac_in_rng_len_err_frms"},
161 {"rmac_out_rng_len_err_frms"},
163 {"rmac_pause_ctrl_frms"},
164 {"rmac_unsup_ctrl_frms"},
166 {"rmac_accepted_ucst_frms"},
167 {"rmac_accepted_nucst_frms"},
168 {"rmac_discarded_frms"},
169 {"rmac_drop_events"},
170 {"rmac_ttl_less_fb_octets"},
172 {"rmac_usized_frms"},
173 {"rmac_osized_frms"},
175 {"rmac_jabber_frms"},
176 {"rmac_ttl_64_frms"},
177 {"rmac_ttl_65_127_frms"},
178 {"rmac_ttl_128_255_frms"},
179 {"rmac_ttl_256_511_frms"},
180 {"rmac_ttl_512_1023_frms"},
181 {"rmac_ttl_1024_1518_frms"},
189 {"rmac_err_drp_udp"},
190 {"rmac_xgmii_err_sym"},
208 {"rmac_xgmii_data_err_cnt"},
209 {"rmac_xgmii_ctrl_err_cnt"},
210 {"rmac_accepted_ip"},
214 {"new_rd_req_rtry_cnt"},
216 {"wr_rtry_rd_ack_cnt"},
219 {"new_wr_req_rtry_cnt"},
222 {"rd_rtry_wr_ack_cnt"},
232 static char ethtool_enhanced_stats_keys[][ETH_GSTRING_LEN] = {
233 {"rmac_ttl_1519_4095_frms"},
234 {"rmac_ttl_4096_8191_frms"},
235 {"rmac_ttl_8192_max_frms"},
236 {"rmac_ttl_gt_max_frms"},
237 {"rmac_osized_alt_frms"},
238 {"rmac_jabber_alt_frms"},
239 {"rmac_gt_max_alt_frms"},
241 {"rmac_len_discard"},
242 {"rmac_fcs_discard"},
245 {"rmac_red_discard"},
246 {"rmac_rts_discard"},
247 {"rmac_ingm_full_discard"},
251 static char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = {
252 {"\n DRIVER STATISTICS"},
253 {"single_bit_ecc_errs"},
254 {"double_bit_ecc_errs"},
267 {"alarm_transceiver_temp_high"},
268 {"alarm_transceiver_temp_low"},
269 {"alarm_laser_bias_current_high"},
270 {"alarm_laser_bias_current_low"},
271 {"alarm_laser_output_power_high"},
272 {"alarm_laser_output_power_low"},
273 {"warn_transceiver_temp_high"},
274 {"warn_transceiver_temp_low"},
275 {"warn_laser_bias_current_high"},
276 {"warn_laser_bias_current_low"},
277 {"warn_laser_output_power_high"},
278 {"warn_laser_output_power_low"},
279 {"lro_aggregated_pkts"},
280 {"lro_flush_both_count"},
281 {"lro_out_of_sequence_pkts"},
282 {"lro_flush_due_to_max_pkts"},
283 {"lro_avg_aggr_pkts"},
284 {"mem_alloc_fail_cnt"},
285 {"pci_map_fail_cnt"},
286 {"watchdog_timer_cnt"},
293 {"tx_tcode_buf_abort_cnt"},
294 {"tx_tcode_desc_abort_cnt"},
295 {"tx_tcode_parity_err_cnt"},
296 {"tx_tcode_link_loss_cnt"},
297 {"tx_tcode_list_proc_err_cnt"},
298 {"rx_tcode_parity_err_cnt"},
299 {"rx_tcode_abort_cnt"},
300 {"rx_tcode_parity_abort_cnt"},
301 {"rx_tcode_rda_fail_cnt"},
302 {"rx_tcode_unkn_prot_cnt"},
303 {"rx_tcode_fcs_err_cnt"},
304 {"rx_tcode_buf_size_err_cnt"},
305 {"rx_tcode_rxd_corrupt_cnt"},
306 {"rx_tcode_unkn_err_cnt"},
314 {"mac_tmac_err_cnt"},
315 {"mac_rmac_err_cnt"},
316 {"xgxs_txgxs_err_cnt"},
317 {"xgxs_rxgxs_err_cnt"},
319 {"prc_pcix_err_cnt"},
326 #define S2IO_XENA_STAT_LEN ARRAY_SIZE(ethtool_xena_stats_keys)
327 #define S2IO_ENHANCED_STAT_LEN ARRAY_SIZE(ethtool_enhanced_stats_keys)
328 #define S2IO_DRIVER_STAT_LEN ARRAY_SIZE(ethtool_driver_stats_keys)
330 #define XFRAME_I_STAT_LEN (S2IO_XENA_STAT_LEN + S2IO_DRIVER_STAT_LEN )
331 #define XFRAME_II_STAT_LEN (XFRAME_I_STAT_LEN + S2IO_ENHANCED_STAT_LEN )
333 #define XFRAME_I_STAT_STRINGS_LEN ( XFRAME_I_STAT_LEN * ETH_GSTRING_LEN )
334 #define XFRAME_II_STAT_STRINGS_LEN ( XFRAME_II_STAT_LEN * ETH_GSTRING_LEN )
336 #define S2IO_TEST_LEN ARRAY_SIZE(s2io_gstrings)
337 #define S2IO_STRINGS_LEN S2IO_TEST_LEN * ETH_GSTRING_LEN
339 #define S2IO_TIMER_CONF(timer, handle, arg, exp) \
340 init_timer(&timer); \
341 timer.function = handle; \
342 timer.data = (unsigned long) arg; \
343 mod_timer(&timer, (jiffies + exp)) \
345 /* copy mac addr to def_mac_addr array */
346 static void do_s2io_copy_mac_addr(struct s2io_nic *sp, int offset, u64 mac_addr)
348 sp->def_mac_addr[offset].mac_addr[5] = (u8) (mac_addr);
349 sp->def_mac_addr[offset].mac_addr[4] = (u8) (mac_addr >> 8);
350 sp->def_mac_addr[offset].mac_addr[3] = (u8) (mac_addr >> 16);
351 sp->def_mac_addr[offset].mac_addr[2] = (u8) (mac_addr >> 24);
352 sp->def_mac_addr[offset].mac_addr[1] = (u8) (mac_addr >> 32);
353 sp->def_mac_addr[offset].mac_addr[0] = (u8) (mac_addr >> 40);
356 static void s2io_vlan_rx_register(struct net_device *dev,
357 struct vlan_group *grp)
360 struct s2io_nic *nic = dev->priv;
361 unsigned long flags[MAX_TX_FIFOS];
362 struct mac_info *mac_control = &nic->mac_control;
363 struct config_param *config = &nic->config;
365 for (i = 0; i < config->tx_fifo_num; i++)
366 spin_lock_irqsave(&mac_control->fifos[i].tx_lock, flags[i]);
369 for (i = config->tx_fifo_num - 1; i >= 0; i--)
370 spin_unlock_irqrestore(&mac_control->fifos[i].tx_lock,
374 /* A flag indicating whether 'RX_PA_CFG_STRIP_VLAN_TAG' bit is set or not */
375 static int vlan_strip_flag;
377 /* Unregister the vlan */
378 static void s2io_vlan_rx_kill_vid(struct net_device *dev, unsigned long vid)
381 struct s2io_nic *nic = dev->priv;
382 unsigned long flags[MAX_TX_FIFOS];
383 struct mac_info *mac_control = &nic->mac_control;
384 struct config_param *config = &nic->config;
386 for (i = 0; i < config->tx_fifo_num; i++)
387 spin_lock_irqsave(&mac_control->fifos[i].tx_lock, flags[i]);
390 vlan_group_set_device(nic->vlgrp, vid, NULL);
392 for (i = config->tx_fifo_num - 1; i >= 0; i--)
393 spin_unlock_irqrestore(&mac_control->fifos[i].tx_lock,
398 * Constants to be programmed into the Xena's registers, to configure
403 static const u64 herc_act_dtx_cfg[] = {
405 0x8000051536750000ULL, 0x80000515367500E0ULL,
407 0x8000051536750004ULL, 0x80000515367500E4ULL,
409 0x80010515003F0000ULL, 0x80010515003F00E0ULL,
411 0x80010515003F0004ULL, 0x80010515003F00E4ULL,
413 0x801205150D440000ULL, 0x801205150D4400E0ULL,
415 0x801205150D440004ULL, 0x801205150D4400E4ULL,
417 0x80020515F2100000ULL, 0x80020515F21000E0ULL,
419 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
424 static const u64 xena_dtx_cfg[] = {
426 0x8000051500000000ULL, 0x80000515000000E0ULL,
428 0x80000515D9350004ULL, 0x80000515D93500E4ULL,
430 0x8001051500000000ULL, 0x80010515000000E0ULL,
432 0x80010515001E0004ULL, 0x80010515001E00E4ULL,
434 0x8002051500000000ULL, 0x80020515000000E0ULL,
436 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
441 * Constants for Fixing the MacAddress problem seen mostly on
444 static const u64 fix_mac[] = {
445 0x0060000000000000ULL, 0x0060600000000000ULL,
446 0x0040600000000000ULL, 0x0000600000000000ULL,
447 0x0020600000000000ULL, 0x0060600000000000ULL,
448 0x0020600000000000ULL, 0x0060600000000000ULL,
449 0x0020600000000000ULL, 0x0060600000000000ULL,
450 0x0020600000000000ULL, 0x0060600000000000ULL,
451 0x0020600000000000ULL, 0x0060600000000000ULL,
452 0x0020600000000000ULL, 0x0060600000000000ULL,
453 0x0020600000000000ULL, 0x0060600000000000ULL,
454 0x0020600000000000ULL, 0x0060600000000000ULL,
455 0x0020600000000000ULL, 0x0060600000000000ULL,
456 0x0020600000000000ULL, 0x0060600000000000ULL,
457 0x0020600000000000ULL, 0x0000600000000000ULL,
458 0x0040600000000000ULL, 0x0060600000000000ULL,
462 MODULE_LICENSE("GPL");
463 MODULE_VERSION(DRV_VERSION);
466 /* Module Loadable parameters. */
467 S2IO_PARM_INT(tx_fifo_num, FIFO_DEFAULT_NUM);
468 S2IO_PARM_INT(rx_ring_num, 1);
469 S2IO_PARM_INT(multiq, 0);
470 S2IO_PARM_INT(rx_ring_mode, 1);
471 S2IO_PARM_INT(use_continuous_tx_intrs, 1);
472 S2IO_PARM_INT(rmac_pause_time, 0x100);
473 S2IO_PARM_INT(mc_pause_threshold_q0q3, 187);
474 S2IO_PARM_INT(mc_pause_threshold_q4q7, 187);
475 S2IO_PARM_INT(shared_splits, 0);
476 S2IO_PARM_INT(tmac_util_period, 5);
477 S2IO_PARM_INT(rmac_util_period, 5);
478 S2IO_PARM_INT(l3l4hdr_size, 128);
479 /* 0 is no steering, 1 is Priority steering, 2 is Default steering */
480 S2IO_PARM_INT(tx_steering_type, TX_DEFAULT_STEERING);
481 /* Frequency of Rx desc syncs expressed as power of 2 */
482 S2IO_PARM_INT(rxsync_frequency, 3);
483 /* Interrupt type. Values can be 0(INTA), 2(MSI_X) */
484 S2IO_PARM_INT(intr_type, 2);
485 /* Large receive offload feature */
486 static unsigned int lro_enable;
487 module_param_named(lro, lro_enable, uint, 0);
489 /* Max pkts to be aggregated by LRO at one time. If not specified,
490 * aggregation happens until we hit max IP pkt size(64K)
492 S2IO_PARM_INT(lro_max_pkts, 0xFFFF);
493 S2IO_PARM_INT(indicate_max_pkts, 0);
495 S2IO_PARM_INT(napi, 1);
496 S2IO_PARM_INT(ufo, 0);
497 S2IO_PARM_INT(vlan_tag_strip, NO_STRIP_IN_PROMISC);
499 static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
500 {DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN};
501 static unsigned int rx_ring_sz[MAX_RX_RINGS] =
502 {[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT};
503 static unsigned int rts_frm_len[MAX_RX_RINGS] =
504 {[0 ...(MAX_RX_RINGS - 1)] = 0 };
506 module_param_array(tx_fifo_len, uint, NULL, 0);
507 module_param_array(rx_ring_sz, uint, NULL, 0);
508 module_param_array(rts_frm_len, uint, NULL, 0);
512 * This table lists all the devices that this driver supports.
514 static struct pci_device_id s2io_tbl[] __devinitdata = {
515 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
516 PCI_ANY_ID, PCI_ANY_ID},
517 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
518 PCI_ANY_ID, PCI_ANY_ID},
519 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
520 PCI_ANY_ID, PCI_ANY_ID},
521 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
522 PCI_ANY_ID, PCI_ANY_ID},
526 MODULE_DEVICE_TABLE(pci, s2io_tbl);
528 static struct pci_error_handlers s2io_err_handler = {
529 .error_detected = s2io_io_error_detected,
530 .slot_reset = s2io_io_slot_reset,
531 .resume = s2io_io_resume,
534 static struct pci_driver s2io_driver = {
536 .id_table = s2io_tbl,
537 .probe = s2io_init_nic,
538 .remove = __devexit_p(s2io_rem_nic),
539 .err_handler = &s2io_err_handler,
542 /* A simplifier macro used both by init and free shared_mem Fns(). */
543 #define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
545 /* netqueue manipulation helper functions */
546 static inline void s2io_stop_all_tx_queue(struct s2io_nic *sp)
549 if (sp->config.multiq) {
550 for (i = 0; i < sp->config.tx_fifo_num; i++)
551 netif_stop_subqueue(sp->dev, i);
553 for (i = 0; i < sp->config.tx_fifo_num; i++)
554 sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_STOP;
555 netif_stop_queue(sp->dev);
559 static inline void s2io_stop_tx_queue(struct s2io_nic *sp, int fifo_no)
561 if (sp->config.multiq)
562 netif_stop_subqueue(sp->dev, fifo_no);
564 sp->mac_control.fifos[fifo_no].queue_state =
566 netif_stop_queue(sp->dev);
570 static inline void s2io_start_all_tx_queue(struct s2io_nic *sp)
573 if (sp->config.multiq) {
574 for (i = 0; i < sp->config.tx_fifo_num; i++)
575 netif_start_subqueue(sp->dev, i);
577 for (i = 0; i < sp->config.tx_fifo_num; i++)
578 sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START;
579 netif_start_queue(sp->dev);
583 static inline void s2io_start_tx_queue(struct s2io_nic *sp, int fifo_no)
585 if (sp->config.multiq)
586 netif_start_subqueue(sp->dev, fifo_no);
588 sp->mac_control.fifos[fifo_no].queue_state =
590 netif_start_queue(sp->dev);
594 static inline void s2io_wake_all_tx_queue(struct s2io_nic *sp)
597 if (sp->config.multiq) {
598 for (i = 0; i < sp->config.tx_fifo_num; i++)
599 netif_wake_subqueue(sp->dev, i);
601 for (i = 0; i < sp->config.tx_fifo_num; i++)
602 sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START;
603 netif_wake_queue(sp->dev);
607 static inline void s2io_wake_tx_queue(
608 struct fifo_info *fifo, int cnt, u8 multiq)
612 if (cnt && __netif_subqueue_stopped(fifo->dev, fifo->fifo_no))
613 netif_wake_subqueue(fifo->dev, fifo->fifo_no);
614 } else if (cnt && (fifo->queue_state == FIFO_QUEUE_STOP)) {
615 if (netif_queue_stopped(fifo->dev)) {
616 fifo->queue_state = FIFO_QUEUE_START;
617 netif_wake_queue(fifo->dev);
623 * init_shared_mem - Allocation and Initialization of Memory
624 * @nic: Device private variable.
625 * Description: The function allocates all the memory areas shared
626 * between the NIC and the driver. This includes Tx descriptors,
627 * Rx descriptors and the statistics block.
630 static int init_shared_mem(struct s2io_nic *nic)
633 void *tmp_v_addr, *tmp_v_addr_next;
634 dma_addr_t tmp_p_addr, tmp_p_addr_next;
635 struct RxD_block *pre_rxd_blk = NULL;
637 int lst_size, lst_per_page;
638 struct net_device *dev = nic->dev;
642 struct mac_info *mac_control;
643 struct config_param *config;
644 unsigned long long mem_allocated = 0;
646 mac_control = &nic->mac_control;
647 config = &nic->config;
650 /* Allocation and initialization of TXDLs in FIOFs */
652 for (i = 0; i < config->tx_fifo_num; i++) {
653 size += config->tx_cfg[i].fifo_len;
655 if (size > MAX_AVAILABLE_TXDS) {
656 DBG_PRINT(ERR_DBG, "s2io: Requested TxDs too high, ");
657 DBG_PRINT(ERR_DBG, "Requested: %d, max supported: 8192\n", size);
662 for (i = 0; i < config->tx_fifo_num; i++) {
663 size = config->tx_cfg[i].fifo_len;
665 * Legal values are from 2 to 8192
668 DBG_PRINT(ERR_DBG, "s2io: Invalid fifo len (%d)", size);
669 DBG_PRINT(ERR_DBG, "for fifo %d\n", i);
670 DBG_PRINT(ERR_DBG, "s2io: Legal values for fifo len"
676 lst_size = (sizeof(struct TxD) * config->max_txds);
677 lst_per_page = PAGE_SIZE / lst_size;
679 for (i = 0; i < config->tx_fifo_num; i++) {
680 int fifo_len = config->tx_cfg[i].fifo_len;
681 int list_holder_size = fifo_len * sizeof(struct list_info_hold);
682 mac_control->fifos[i].list_info = kzalloc(list_holder_size,
684 if (!mac_control->fifos[i].list_info) {
686 "Malloc failed for list_info\n");
689 mem_allocated += list_holder_size;
691 for (i = 0; i < config->tx_fifo_num; i++) {
692 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
694 mac_control->fifos[i].tx_curr_put_info.offset = 0;
695 mac_control->fifos[i].tx_curr_put_info.fifo_len =
696 config->tx_cfg[i].fifo_len - 1;
697 mac_control->fifos[i].tx_curr_get_info.offset = 0;
698 mac_control->fifos[i].tx_curr_get_info.fifo_len =
699 config->tx_cfg[i].fifo_len - 1;
700 mac_control->fifos[i].fifo_no = i;
701 mac_control->fifos[i].nic = nic;
702 mac_control->fifos[i].max_txds = MAX_SKB_FRAGS + 2;
703 mac_control->fifos[i].dev = dev;
705 for (j = 0; j < page_num; j++) {
709 tmp_v = pci_alloc_consistent(nic->pdev,
713 "pci_alloc_consistent ");
714 DBG_PRINT(INFO_DBG, "failed for TxDL\n");
717 /* If we got a zero DMA address(can happen on
718 * certain platforms like PPC), reallocate.
719 * Store virtual address of page we don't want,
723 mac_control->zerodma_virt_addr = tmp_v;
725 "%s: Zero DMA address for TxDL. ", dev->name);
727 "Virtual address %p\n", tmp_v);
728 tmp_v = pci_alloc_consistent(nic->pdev,
732 "pci_alloc_consistent ");
733 DBG_PRINT(INFO_DBG, "failed for TxDL\n");
736 mem_allocated += PAGE_SIZE;
738 while (k < lst_per_page) {
739 int l = (j * lst_per_page) + k;
740 if (l == config->tx_cfg[i].fifo_len)
742 mac_control->fifos[i].list_info[l].list_virt_addr =
743 tmp_v + (k * lst_size);
744 mac_control->fifos[i].list_info[l].list_phy_addr =
745 tmp_p + (k * lst_size);
751 for (i = 0; i < config->tx_fifo_num; i++) {
752 size = config->tx_cfg[i].fifo_len;
753 mac_control->fifos[i].ufo_in_band_v
754 = kcalloc(size, sizeof(u64), GFP_KERNEL);
755 if (!mac_control->fifos[i].ufo_in_band_v)
757 mem_allocated += (size * sizeof(u64));
760 /* Allocation and initialization of RXDs in Rings */
762 for (i = 0; i < config->rx_ring_num; i++) {
763 if (config->rx_cfg[i].num_rxd %
764 (rxd_count[nic->rxd_mode] + 1)) {
765 DBG_PRINT(ERR_DBG, "%s: RxD count of ", dev->name);
766 DBG_PRINT(ERR_DBG, "Ring%d is not a multiple of ",
768 DBG_PRINT(ERR_DBG, "RxDs per Block");
771 size += config->rx_cfg[i].num_rxd;
772 mac_control->rings[i].block_count =
773 config->rx_cfg[i].num_rxd /
774 (rxd_count[nic->rxd_mode] + 1 );
775 mac_control->rings[i].pkt_cnt = config->rx_cfg[i].num_rxd -
776 mac_control->rings[i].block_count;
778 if (nic->rxd_mode == RXD_MODE_1)
779 size = (size * (sizeof(struct RxD1)));
781 size = (size * (sizeof(struct RxD3)));
783 for (i = 0; i < config->rx_ring_num; i++) {
784 mac_control->rings[i].rx_curr_get_info.block_index = 0;
785 mac_control->rings[i].rx_curr_get_info.offset = 0;
786 mac_control->rings[i].rx_curr_get_info.ring_len =
787 config->rx_cfg[i].num_rxd - 1;
788 mac_control->rings[i].rx_curr_put_info.block_index = 0;
789 mac_control->rings[i].rx_curr_put_info.offset = 0;
790 mac_control->rings[i].rx_curr_put_info.ring_len =
791 config->rx_cfg[i].num_rxd - 1;
792 mac_control->rings[i].nic = nic;
793 mac_control->rings[i].ring_no = i;
794 mac_control->rings[i].lro = lro_enable;
796 blk_cnt = config->rx_cfg[i].num_rxd /
797 (rxd_count[nic->rxd_mode] + 1);
798 /* Allocating all the Rx blocks */
799 for (j = 0; j < blk_cnt; j++) {
800 struct rx_block_info *rx_blocks;
803 rx_blocks = &mac_control->rings[i].rx_blocks[j];
804 size = SIZE_OF_BLOCK; //size is always page size
805 tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
807 if (tmp_v_addr == NULL) {
809 * In case of failure, free_shared_mem()
810 * is called, which should free any
811 * memory that was alloced till the
814 rx_blocks->block_virt_addr = tmp_v_addr;
817 mem_allocated += size;
818 memset(tmp_v_addr, 0, size);
819 rx_blocks->block_virt_addr = tmp_v_addr;
820 rx_blocks->block_dma_addr = tmp_p_addr;
821 rx_blocks->rxds = kmalloc(sizeof(struct rxd_info)*
822 rxd_count[nic->rxd_mode],
824 if (!rx_blocks->rxds)
827 (sizeof(struct rxd_info)* rxd_count[nic->rxd_mode]);
828 for (l=0; l<rxd_count[nic->rxd_mode];l++) {
829 rx_blocks->rxds[l].virt_addr =
830 rx_blocks->block_virt_addr +
831 (rxd_size[nic->rxd_mode] * l);
832 rx_blocks->rxds[l].dma_addr =
833 rx_blocks->block_dma_addr +
834 (rxd_size[nic->rxd_mode] * l);
837 /* Interlinking all Rx Blocks */
838 for (j = 0; j < blk_cnt; j++) {
840 mac_control->rings[i].rx_blocks[j].block_virt_addr;
842 mac_control->rings[i].rx_blocks[(j + 1) %
843 blk_cnt].block_virt_addr;
845 mac_control->rings[i].rx_blocks[j].block_dma_addr;
847 mac_control->rings[i].rx_blocks[(j + 1) %
848 blk_cnt].block_dma_addr;
850 pre_rxd_blk = (struct RxD_block *) tmp_v_addr;
851 pre_rxd_blk->reserved_2_pNext_RxD_block =
852 (unsigned long) tmp_v_addr_next;
853 pre_rxd_blk->pNext_RxD_Blk_physical =
854 (u64) tmp_p_addr_next;
857 if (nic->rxd_mode == RXD_MODE_3B) {
859 * Allocation of Storages for buffer addresses in 2BUFF mode
860 * and the buffers as well.
862 for (i = 0; i < config->rx_ring_num; i++) {
863 blk_cnt = config->rx_cfg[i].num_rxd /
864 (rxd_count[nic->rxd_mode]+ 1);
865 mac_control->rings[i].ba =
866 kmalloc((sizeof(struct buffAdd *) * blk_cnt),
868 if (!mac_control->rings[i].ba)
870 mem_allocated +=(sizeof(struct buffAdd *) * blk_cnt);
871 for (j = 0; j < blk_cnt; j++) {
873 mac_control->rings[i].ba[j] =
874 kmalloc((sizeof(struct buffAdd) *
875 (rxd_count[nic->rxd_mode] + 1)),
877 if (!mac_control->rings[i].ba[j])
879 mem_allocated += (sizeof(struct buffAdd) * \
880 (rxd_count[nic->rxd_mode] + 1));
881 while (k != rxd_count[nic->rxd_mode]) {
882 ba = &mac_control->rings[i].ba[j][k];
884 ba->ba_0_org = (void *) kmalloc
885 (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL);
889 (BUF0_LEN + ALIGN_SIZE);
890 tmp = (unsigned long)ba->ba_0_org;
892 tmp &= ~((unsigned long) ALIGN_SIZE);
893 ba->ba_0 = (void *) tmp;
895 ba->ba_1_org = (void *) kmalloc
896 (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL);
900 += (BUF1_LEN + ALIGN_SIZE);
901 tmp = (unsigned long) ba->ba_1_org;
903 tmp &= ~((unsigned long) ALIGN_SIZE);
904 ba->ba_1 = (void *) tmp;
911 /* Allocation and initialization of Statistics block */
912 size = sizeof(struct stat_block);
913 mac_control->stats_mem = pci_alloc_consistent
914 (nic->pdev, size, &mac_control->stats_mem_phy);
916 if (!mac_control->stats_mem) {
918 * In case of failure, free_shared_mem() is called, which
919 * should free any memory that was alloced till the
924 mem_allocated += size;
925 mac_control->stats_mem_sz = size;
927 tmp_v_addr = mac_control->stats_mem;
928 mac_control->stats_info = (struct stat_block *) tmp_v_addr;
929 memset(tmp_v_addr, 0, size);
930 DBG_PRINT(INIT_DBG, "%s:Ring Mem PHY: 0x%llx\n", dev->name,
931 (unsigned long long) tmp_p_addr);
932 mac_control->stats_info->sw_stat.mem_allocated += mem_allocated;
937 * free_shared_mem - Free the allocated Memory
938 * @nic: Device private variable.
939 * Description: This function is to free all memory locations allocated by
940 * the init_shared_mem() function and return it to the kernel.
943 static void free_shared_mem(struct s2io_nic *nic)
945 int i, j, blk_cnt, size;
947 dma_addr_t tmp_p_addr;
948 struct mac_info *mac_control;
949 struct config_param *config;
950 int lst_size, lst_per_page;
951 struct net_device *dev;
959 mac_control = &nic->mac_control;
960 config = &nic->config;
962 lst_size = (sizeof(struct TxD) * config->max_txds);
963 lst_per_page = PAGE_SIZE / lst_size;
965 for (i = 0; i < config->tx_fifo_num; i++) {
966 page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
968 for (j = 0; j < page_num; j++) {
969 int mem_blks = (j * lst_per_page);
970 if (!mac_control->fifos[i].list_info)
972 if (!mac_control->fifos[i].list_info[mem_blks].
975 pci_free_consistent(nic->pdev, PAGE_SIZE,
976 mac_control->fifos[i].
979 mac_control->fifos[i].
982 nic->mac_control.stats_info->sw_stat.mem_freed
985 /* If we got a zero DMA address during allocation,
988 if (mac_control->zerodma_virt_addr) {
989 pci_free_consistent(nic->pdev, PAGE_SIZE,
990 mac_control->zerodma_virt_addr,
993 "%s: Freeing TxDL with zero DMA addr. ",
995 DBG_PRINT(INIT_DBG, "Virtual address %p\n",
996 mac_control->zerodma_virt_addr);
997 nic->mac_control.stats_info->sw_stat.mem_freed
1000 kfree(mac_control->fifos[i].list_info);
1001 nic->mac_control.stats_info->sw_stat.mem_freed +=
1002 (nic->config.tx_cfg[i].fifo_len *sizeof(struct list_info_hold));
1005 size = SIZE_OF_BLOCK;
1006 for (i = 0; i < config->rx_ring_num; i++) {
1007 blk_cnt = mac_control->rings[i].block_count;
1008 for (j = 0; j < blk_cnt; j++) {
1009 tmp_v_addr = mac_control->rings[i].rx_blocks[j].
1011 tmp_p_addr = mac_control->rings[i].rx_blocks[j].
1013 if (tmp_v_addr == NULL)
1015 pci_free_consistent(nic->pdev, size,
1016 tmp_v_addr, tmp_p_addr);
1017 nic->mac_control.stats_info->sw_stat.mem_freed += size;
1018 kfree(mac_control->rings[i].rx_blocks[j].rxds);
1019 nic->mac_control.stats_info->sw_stat.mem_freed +=
1020 ( sizeof(struct rxd_info)* rxd_count[nic->rxd_mode]);
1024 if (nic->rxd_mode == RXD_MODE_3B) {
1025 /* Freeing buffer storage addresses in 2BUFF mode. */
1026 for (i = 0; i < config->rx_ring_num; i++) {
1027 blk_cnt = config->rx_cfg[i].num_rxd /
1028 (rxd_count[nic->rxd_mode] + 1);
1029 for (j = 0; j < blk_cnt; j++) {
1031 if (!mac_control->rings[i].ba[j])
1033 while (k != rxd_count[nic->rxd_mode]) {
1034 struct buffAdd *ba =
1035 &mac_control->rings[i].ba[j][k];
1036 kfree(ba->ba_0_org);
1037 nic->mac_control.stats_info->sw_stat.\
1038 mem_freed += (BUF0_LEN + ALIGN_SIZE);
1039 kfree(ba->ba_1_org);
1040 nic->mac_control.stats_info->sw_stat.\
1041 mem_freed += (BUF1_LEN + ALIGN_SIZE);
1044 kfree(mac_control->rings[i].ba[j]);
1045 nic->mac_control.stats_info->sw_stat.mem_freed +=
1046 (sizeof(struct buffAdd) *
1047 (rxd_count[nic->rxd_mode] + 1));
1049 kfree(mac_control->rings[i].ba);
1050 nic->mac_control.stats_info->sw_stat.mem_freed +=
1051 (sizeof(struct buffAdd *) * blk_cnt);
1055 for (i = 0; i < nic->config.tx_fifo_num; i++) {
1056 if (mac_control->fifos[i].ufo_in_band_v) {
1057 nic->mac_control.stats_info->sw_stat.mem_freed
1058 += (config->tx_cfg[i].fifo_len * sizeof(u64));
1059 kfree(mac_control->fifos[i].ufo_in_band_v);
1063 if (mac_control->stats_mem) {
1064 nic->mac_control.stats_info->sw_stat.mem_freed +=
1065 mac_control->stats_mem_sz;
1066 pci_free_consistent(nic->pdev,
1067 mac_control->stats_mem_sz,
1068 mac_control->stats_mem,
1069 mac_control->stats_mem_phy);
1074 * s2io_verify_pci_mode -
1077 static int s2io_verify_pci_mode(struct s2io_nic *nic)
1079 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1080 register u64 val64 = 0;
1083 val64 = readq(&bar0->pci_mode);
1084 mode = (u8)GET_PCI_MODE(val64);
1086 if ( val64 & PCI_MODE_UNKNOWN_MODE)
1087 return -1; /* Unknown PCI mode */
1091 #define NEC_VENID 0x1033
1092 #define NEC_DEVID 0x0125
1093 static int s2io_on_nec_bridge(struct pci_dev *s2io_pdev)
1095 struct pci_dev *tdev = NULL;
1096 while ((tdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, tdev)) != NULL) {
1097 if (tdev->vendor == NEC_VENID && tdev->device == NEC_DEVID) {
1098 if (tdev->bus == s2io_pdev->bus->parent) {
1107 static int bus_speed[8] = {33, 133, 133, 200, 266, 133, 200, 266};
1109 * s2io_print_pci_mode -
1111 static int s2io_print_pci_mode(struct s2io_nic *nic)
1113 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1114 register u64 val64 = 0;
1116 struct config_param *config = &nic->config;
1118 val64 = readq(&bar0->pci_mode);
1119 mode = (u8)GET_PCI_MODE(val64);
1121 if ( val64 & PCI_MODE_UNKNOWN_MODE)
1122 return -1; /* Unknown PCI mode */
1124 config->bus_speed = bus_speed[mode];
1126 if (s2io_on_nec_bridge(nic->pdev)) {
1127 DBG_PRINT(ERR_DBG, "%s: Device is on PCI-E bus\n",
1132 if (val64 & PCI_MODE_32_BITS) {
1133 DBG_PRINT(ERR_DBG, "%s: Device is on 32 bit ", nic->dev->name);
1135 DBG_PRINT(ERR_DBG, "%s: Device is on 64 bit ", nic->dev->name);
1139 case PCI_MODE_PCI_33:
1140 DBG_PRINT(ERR_DBG, "33MHz PCI bus\n");
1142 case PCI_MODE_PCI_66:
1143 DBG_PRINT(ERR_DBG, "66MHz PCI bus\n");
1145 case PCI_MODE_PCIX_M1_66:
1146 DBG_PRINT(ERR_DBG, "66MHz PCIX(M1) bus\n");
1148 case PCI_MODE_PCIX_M1_100:
1149 DBG_PRINT(ERR_DBG, "100MHz PCIX(M1) bus\n");
1151 case PCI_MODE_PCIX_M1_133:
1152 DBG_PRINT(ERR_DBG, "133MHz PCIX(M1) bus\n");
1154 case PCI_MODE_PCIX_M2_66:
1155 DBG_PRINT(ERR_DBG, "133MHz PCIX(M2) bus\n");
1157 case PCI_MODE_PCIX_M2_100:
1158 DBG_PRINT(ERR_DBG, "200MHz PCIX(M2) bus\n");
1160 case PCI_MODE_PCIX_M2_133:
1161 DBG_PRINT(ERR_DBG, "266MHz PCIX(M2) bus\n");
1164 return -1; /* Unsupported bus speed */
1171 * init_tti - Initialization transmit traffic interrupt scheme
1172 * @nic: device private variable
1173 * @link: link status (UP/DOWN) used to enable/disable continuous
1174 * transmit interrupts
1175 * Description: The function configures transmit traffic interrupts
1176 * Return Value: SUCCESS on success and
1180 static int init_tti(struct s2io_nic *nic, int link)
1182 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1183 register u64 val64 = 0;
1185 struct config_param *config;
1187 config = &nic->config;
1189 for (i = 0; i < config->tx_fifo_num; i++) {
1191 * TTI Initialization. Default Tx timer gets us about
1192 * 250 interrupts per sec. Continuous interrupts are enabled
1195 if (nic->device_type == XFRAME_II_DEVICE) {
1196 int count = (nic->config.bus_speed * 125)/2;
1197 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count);
1199 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
1201 val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) |
1202 TTI_DATA1_MEM_TX_URNG_B(0x10) |
1203 TTI_DATA1_MEM_TX_URNG_C(0x30) |
1204 TTI_DATA1_MEM_TX_TIMER_AC_EN;
1206 if (use_continuous_tx_intrs && (link == LINK_UP))
1207 val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
1208 writeq(val64, &bar0->tti_data1_mem);
1210 if (nic->config.intr_type == MSI_X) {
1211 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1212 TTI_DATA2_MEM_TX_UFC_B(0x100) |
1213 TTI_DATA2_MEM_TX_UFC_C(0x200) |
1214 TTI_DATA2_MEM_TX_UFC_D(0x300);
1216 if ((nic->config.tx_steering_type ==
1217 TX_DEFAULT_STEERING) &&
1218 (config->tx_fifo_num > 1) &&
1219 (i >= nic->udp_fifo_idx) &&
1220 (i < (nic->udp_fifo_idx +
1221 nic->total_udp_fifos)))
1222 val64 = TTI_DATA2_MEM_TX_UFC_A(0x50) |
1223 TTI_DATA2_MEM_TX_UFC_B(0x80) |
1224 TTI_DATA2_MEM_TX_UFC_C(0x100) |
1225 TTI_DATA2_MEM_TX_UFC_D(0x120);
1227 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1228 TTI_DATA2_MEM_TX_UFC_B(0x20) |
1229 TTI_DATA2_MEM_TX_UFC_C(0x40) |
1230 TTI_DATA2_MEM_TX_UFC_D(0x80);
1233 writeq(val64, &bar0->tti_data2_mem);
1235 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD |
1236 TTI_CMD_MEM_OFFSET(i);
1237 writeq(val64, &bar0->tti_command_mem);
1239 if (wait_for_cmd_complete(&bar0->tti_command_mem,
1240 TTI_CMD_MEM_STROBE_NEW_CMD, S2IO_BIT_RESET) != SUCCESS)
1248 * init_nic - Initialization of hardware
1249 * @nic: device private variable
1250 * Description: The function sequentially configures every block
1251 * of the H/W from their reset values.
1252 * Return Value: SUCCESS on success and
1253 * '-1' on failure (endian settings incorrect).
1256 static int init_nic(struct s2io_nic *nic)
1258 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1259 struct net_device *dev = nic->dev;
1260 register u64 val64 = 0;
1264 struct mac_info *mac_control;
1265 struct config_param *config;
1267 unsigned long long mem_share;
1270 mac_control = &nic->mac_control;
1271 config = &nic->config;
1273 /* to set the swapper controle on the card */
1274 if(s2io_set_swapper(nic)) {
1275 DBG_PRINT(ERR_DBG,"ERROR: Setting Swapper failed\n");
1280 * Herc requires EOI to be removed from reset before XGXS, so..
1282 if (nic->device_type & XFRAME_II_DEVICE) {
1283 val64 = 0xA500000000ULL;
1284 writeq(val64, &bar0->sw_reset);
1286 val64 = readq(&bar0->sw_reset);
1289 /* Remove XGXS from reset state */
1291 writeq(val64, &bar0->sw_reset);
1293 val64 = readq(&bar0->sw_reset);
1295 /* Ensure that it's safe to access registers by checking
1296 * RIC_RUNNING bit is reset. Check is valid only for XframeII.
1298 if (nic->device_type == XFRAME_II_DEVICE) {
1299 for (i = 0; i < 50; i++) {
1300 val64 = readq(&bar0->adapter_status);
1301 if (!(val64 & ADAPTER_STATUS_RIC_RUNNING))
1309 /* Enable Receiving broadcasts */
1310 add = &bar0->mac_cfg;
1311 val64 = readq(&bar0->mac_cfg);
1312 val64 |= MAC_RMAC_BCAST_ENABLE;
1313 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1314 writel((u32) val64, add);
1315 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1316 writel((u32) (val64 >> 32), (add + 4));
1318 /* Read registers in all blocks */
1319 val64 = readq(&bar0->mac_int_mask);
1320 val64 = readq(&bar0->mc_int_mask);
1321 val64 = readq(&bar0->xgxs_int_mask);
1325 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
1327 if (nic->device_type & XFRAME_II_DEVICE) {
1328 while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
1329 SPECIAL_REG_WRITE(herc_act_dtx_cfg[dtx_cnt],
1330 &bar0->dtx_control, UF);
1332 msleep(1); /* Necessary!! */
1336 while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
1337 SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
1338 &bar0->dtx_control, UF);
1339 val64 = readq(&bar0->dtx_control);
1344 /* Tx DMA Initialization */
1346 writeq(val64, &bar0->tx_fifo_partition_0);
1347 writeq(val64, &bar0->tx_fifo_partition_1);
1348 writeq(val64, &bar0->tx_fifo_partition_2);
1349 writeq(val64, &bar0->tx_fifo_partition_3);
1352 for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
1354 vBIT(config->tx_cfg[i].fifo_len - 1, ((j * 32) + 19),
1355 13) | vBIT(config->tx_cfg[i].fifo_priority,
1358 if (i == (config->tx_fifo_num - 1)) {
1365 writeq(val64, &bar0->tx_fifo_partition_0);
1370 writeq(val64, &bar0->tx_fifo_partition_1);
1375 writeq(val64, &bar0->tx_fifo_partition_2);
1380 writeq(val64, &bar0->tx_fifo_partition_3);
1391 * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
1392 * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
1394 if ((nic->device_type == XFRAME_I_DEVICE) &&
1395 (nic->pdev->revision < 4))
1396 writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
1398 val64 = readq(&bar0->tx_fifo_partition_0);
1399 DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
1400 &bar0->tx_fifo_partition_0, (unsigned long long) val64);
1403 * Initialization of Tx_PA_CONFIG register to ignore packet
1404 * integrity checking.
1406 val64 = readq(&bar0->tx_pa_cfg);
1407 val64 |= TX_PA_CFG_IGNORE_FRM_ERR | TX_PA_CFG_IGNORE_SNAP_OUI |
1408 TX_PA_CFG_IGNORE_LLC_CTRL | TX_PA_CFG_IGNORE_L2_ERR;
1409 writeq(val64, &bar0->tx_pa_cfg);
1411 /* Rx DMA intialization. */
1413 for (i = 0; i < config->rx_ring_num; i++) {
1415 vBIT(config->rx_cfg[i].ring_priority, (5 + (i * 8)),
1418 writeq(val64, &bar0->rx_queue_priority);
1421 * Allocating equal share of memory to all the
1425 if (nic->device_type & XFRAME_II_DEVICE)
1430 for (i = 0; i < config->rx_ring_num; i++) {
1433 mem_share = (mem_size / config->rx_ring_num +
1434 mem_size % config->rx_ring_num);
1435 val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
1438 mem_share = (mem_size / config->rx_ring_num);
1439 val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
1442 mem_share = (mem_size / config->rx_ring_num);
1443 val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
1446 mem_share = (mem_size / config->rx_ring_num);
1447 val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
1450 mem_share = (mem_size / config->rx_ring_num);
1451 val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
1454 mem_share = (mem_size / config->rx_ring_num);
1455 val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
1458 mem_share = (mem_size / config->rx_ring_num);
1459 val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
1462 mem_share = (mem_size / config->rx_ring_num);
1463 val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
1467 writeq(val64, &bar0->rx_queue_cfg);
1470 * Filling Tx round robin registers
1471 * as per the number of FIFOs for equal scheduling priority
1473 switch (config->tx_fifo_num) {
1476 writeq(val64, &bar0->tx_w_round_robin_0);
1477 writeq(val64, &bar0->tx_w_round_robin_1);
1478 writeq(val64, &bar0->tx_w_round_robin_2);
1479 writeq(val64, &bar0->tx_w_round_robin_3);
1480 writeq(val64, &bar0->tx_w_round_robin_4);
1483 val64 = 0x0001000100010001ULL;
1484 writeq(val64, &bar0->tx_w_round_robin_0);
1485 writeq(val64, &bar0->tx_w_round_robin_1);
1486 writeq(val64, &bar0->tx_w_round_robin_2);
1487 writeq(val64, &bar0->tx_w_round_robin_3);
1488 val64 = 0x0001000100000000ULL;
1489 writeq(val64, &bar0->tx_w_round_robin_4);
1492 val64 = 0x0001020001020001ULL;
1493 writeq(val64, &bar0->tx_w_round_robin_0);
1494 val64 = 0x0200010200010200ULL;
1495 writeq(val64, &bar0->tx_w_round_robin_1);
1496 val64 = 0x0102000102000102ULL;
1497 writeq(val64, &bar0->tx_w_round_robin_2);
1498 val64 = 0x0001020001020001ULL;
1499 writeq(val64, &bar0->tx_w_round_robin_3);
1500 val64 = 0x0200010200000000ULL;
1501 writeq(val64, &bar0->tx_w_round_robin_4);
1504 val64 = 0x0001020300010203ULL;
1505 writeq(val64, &bar0->tx_w_round_robin_0);
1506 writeq(val64, &bar0->tx_w_round_robin_1);
1507 writeq(val64, &bar0->tx_w_round_robin_2);
1508 writeq(val64, &bar0->tx_w_round_robin_3);
1509 val64 = 0x0001020300000000ULL;
1510 writeq(val64, &bar0->tx_w_round_robin_4);
1513 val64 = 0x0001020304000102ULL;
1514 writeq(val64, &bar0->tx_w_round_robin_0);
1515 val64 = 0x0304000102030400ULL;
1516 writeq(val64, &bar0->tx_w_round_robin_1);
1517 val64 = 0x0102030400010203ULL;
1518 writeq(val64, &bar0->tx_w_round_robin_2);
1519 val64 = 0x0400010203040001ULL;
1520 writeq(val64, &bar0->tx_w_round_robin_3);
1521 val64 = 0x0203040000000000ULL;
1522 writeq(val64, &bar0->tx_w_round_robin_4);
1525 val64 = 0x0001020304050001ULL;
1526 writeq(val64, &bar0->tx_w_round_robin_0);
1527 val64 = 0x0203040500010203ULL;
1528 writeq(val64, &bar0->tx_w_round_robin_1);
1529 val64 = 0x0405000102030405ULL;
1530 writeq(val64, &bar0->tx_w_round_robin_2);
1531 val64 = 0x0001020304050001ULL;
1532 writeq(val64, &bar0->tx_w_round_robin_3);
1533 val64 = 0x0203040500000000ULL;
1534 writeq(val64, &bar0->tx_w_round_robin_4);
1537 val64 = 0x0001020304050600ULL;
1538 writeq(val64, &bar0->tx_w_round_robin_0);
1539 val64 = 0x0102030405060001ULL;
1540 writeq(val64, &bar0->tx_w_round_robin_1);
1541 val64 = 0x0203040506000102ULL;
1542 writeq(val64, &bar0->tx_w_round_robin_2);
1543 val64 = 0x0304050600010203ULL;
1544 writeq(val64, &bar0->tx_w_round_robin_3);
1545 val64 = 0x0405060000000000ULL;
1546 writeq(val64, &bar0->tx_w_round_robin_4);
1549 val64 = 0x0001020304050607ULL;
1550 writeq(val64, &bar0->tx_w_round_robin_0);
1551 writeq(val64, &bar0->tx_w_round_robin_1);
1552 writeq(val64, &bar0->tx_w_round_robin_2);
1553 writeq(val64, &bar0->tx_w_round_robin_3);
1554 val64 = 0x0001020300000000ULL;
1555 writeq(val64, &bar0->tx_w_round_robin_4);
1559 /* Enable all configured Tx FIFO partitions */
1560 val64 = readq(&bar0->tx_fifo_partition_0);
1561 val64 |= (TX_FIFO_PARTITION_EN);
1562 writeq(val64, &bar0->tx_fifo_partition_0);
1564 /* Filling the Rx round robin registers as per the
1565 * number of Rings and steering based on QoS with
1568 switch (config->rx_ring_num) {
1571 writeq(val64, &bar0->rx_w_round_robin_0);
1572 writeq(val64, &bar0->rx_w_round_robin_1);
1573 writeq(val64, &bar0->rx_w_round_robin_2);
1574 writeq(val64, &bar0->rx_w_round_robin_3);
1575 writeq(val64, &bar0->rx_w_round_robin_4);
1577 val64 = 0x8080808080808080ULL;
1578 writeq(val64, &bar0->rts_qos_steering);
1581 val64 = 0x0001000100010001ULL;
1582 writeq(val64, &bar0->rx_w_round_robin_0);
1583 writeq(val64, &bar0->rx_w_round_robin_1);
1584 writeq(val64, &bar0->rx_w_round_robin_2);
1585 writeq(val64, &bar0->rx_w_round_robin_3);
1586 val64 = 0x0001000100000000ULL;
1587 writeq(val64, &bar0->rx_w_round_robin_4);
1589 val64 = 0x8080808040404040ULL;
1590 writeq(val64, &bar0->rts_qos_steering);
1593 val64 = 0x0001020001020001ULL;
1594 writeq(val64, &bar0->rx_w_round_robin_0);
1595 val64 = 0x0200010200010200ULL;
1596 writeq(val64, &bar0->rx_w_round_robin_1);
1597 val64 = 0x0102000102000102ULL;
1598 writeq(val64, &bar0->rx_w_round_robin_2);
1599 val64 = 0x0001020001020001ULL;
1600 writeq(val64, &bar0->rx_w_round_robin_3);
1601 val64 = 0x0200010200000000ULL;
1602 writeq(val64, &bar0->rx_w_round_robin_4);
1604 val64 = 0x8080804040402020ULL;
1605 writeq(val64, &bar0->rts_qos_steering);
1608 val64 = 0x0001020300010203ULL;
1609 writeq(val64, &bar0->rx_w_round_robin_0);
1610 writeq(val64, &bar0->rx_w_round_robin_1);
1611 writeq(val64, &bar0->rx_w_round_robin_2);
1612 writeq(val64, &bar0->rx_w_round_robin_3);
1613 val64 = 0x0001020300000000ULL;
1614 writeq(val64, &bar0->rx_w_round_robin_4);
1616 val64 = 0x8080404020201010ULL;
1617 writeq(val64, &bar0->rts_qos_steering);
1620 val64 = 0x0001020304000102ULL;
1621 writeq(val64, &bar0->rx_w_round_robin_0);
1622 val64 = 0x0304000102030400ULL;
1623 writeq(val64, &bar0->rx_w_round_robin_1);
1624 val64 = 0x0102030400010203ULL;
1625 writeq(val64, &bar0->rx_w_round_robin_2);
1626 val64 = 0x0400010203040001ULL;
1627 writeq(val64, &bar0->rx_w_round_robin_3);
1628 val64 = 0x0203040000000000ULL;
1629 writeq(val64, &bar0->rx_w_round_robin_4);
1631 val64 = 0x8080404020201008ULL;
1632 writeq(val64, &bar0->rts_qos_steering);
1635 val64 = 0x0001020304050001ULL;
1636 writeq(val64, &bar0->rx_w_round_robin_0);
1637 val64 = 0x0203040500010203ULL;
1638 writeq(val64, &bar0->rx_w_round_robin_1);
1639 val64 = 0x0405000102030405ULL;
1640 writeq(val64, &bar0->rx_w_round_robin_2);
1641 val64 = 0x0001020304050001ULL;
1642 writeq(val64, &bar0->rx_w_round_robin_3);
1643 val64 = 0x0203040500000000ULL;
1644 writeq(val64, &bar0->rx_w_round_robin_4);
1646 val64 = 0x8080404020100804ULL;
1647 writeq(val64, &bar0->rts_qos_steering);
1650 val64 = 0x0001020304050600ULL;
1651 writeq(val64, &bar0->rx_w_round_robin_0);
1652 val64 = 0x0102030405060001ULL;
1653 writeq(val64, &bar0->rx_w_round_robin_1);
1654 val64 = 0x0203040506000102ULL;
1655 writeq(val64, &bar0->rx_w_round_robin_2);
1656 val64 = 0x0304050600010203ULL;
1657 writeq(val64, &bar0->rx_w_round_robin_3);
1658 val64 = 0x0405060000000000ULL;
1659 writeq(val64, &bar0->rx_w_round_robin_4);
1661 val64 = 0x8080402010080402ULL;
1662 writeq(val64, &bar0->rts_qos_steering);
1665 val64 = 0x0001020304050607ULL;
1666 writeq(val64, &bar0->rx_w_round_robin_0);
1667 writeq(val64, &bar0->rx_w_round_robin_1);
1668 writeq(val64, &bar0->rx_w_round_robin_2);
1669 writeq(val64, &bar0->rx_w_round_robin_3);
1670 val64 = 0x0001020300000000ULL;
1671 writeq(val64, &bar0->rx_w_round_robin_4);
1673 val64 = 0x8040201008040201ULL;
1674 writeq(val64, &bar0->rts_qos_steering);
1680 for (i = 0; i < 8; i++)
1681 writeq(val64, &bar0->rts_frm_len_n[i]);
1683 /* Set the default rts frame length for the rings configured */
1684 val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1685 for (i = 0 ; i < config->rx_ring_num ; i++)
1686 writeq(val64, &bar0->rts_frm_len_n[i]);
1688 /* Set the frame length for the configured rings
1689 * desired by the user
1691 for (i = 0; i < config->rx_ring_num; i++) {
1692 /* If rts_frm_len[i] == 0 then it is assumed that user not
1693 * specified frame length steering.
1694 * If the user provides the frame length then program
1695 * the rts_frm_len register for those values or else
1696 * leave it as it is.
1698 if (rts_frm_len[i] != 0) {
1699 writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
1700 &bar0->rts_frm_len_n[i]);
1704 /* Disable differentiated services steering logic */
1705 for (i = 0; i < 64; i++) {
1706 if (rts_ds_steer(nic, i, 0) == FAILURE) {
1707 DBG_PRINT(ERR_DBG, "%s: failed rts ds steering",
1709 DBG_PRINT(ERR_DBG, "set on codepoint %d\n", i);
1714 /* Program statistics memory */
1715 writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1717 if (nic->device_type == XFRAME_II_DEVICE) {
1718 val64 = STAT_BC(0x320);
1719 writeq(val64, &bar0->stat_byte_cnt);
1723 * Initializing the sampling rate for the device to calculate the
1724 * bandwidth utilization.
1726 val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
1727 MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1728 writeq(val64, &bar0->mac_link_util);
1731 * Initializing the Transmit and Receive Traffic Interrupt
1735 /* Initialize TTI */
1736 if (SUCCESS != init_tti(nic, nic->last_link_state))
1739 /* RTI Initialization */
1740 if (nic->device_type == XFRAME_II_DEVICE) {
1742 * Programmed to generate Apprx 500 Intrs per
1745 int count = (nic->config.bus_speed * 125)/4;
1746 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count);
1748 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
1749 val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) |
1750 RTI_DATA1_MEM_RX_URNG_B(0x10) |
1751 RTI_DATA1_MEM_RX_URNG_C(0x30) | RTI_DATA1_MEM_RX_TIMER_AC_EN;
1753 writeq(val64, &bar0->rti_data1_mem);
1755 val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
1756 RTI_DATA2_MEM_RX_UFC_B(0x2) ;
1757 if (nic->config.intr_type == MSI_X)
1758 val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x20) | \
1759 RTI_DATA2_MEM_RX_UFC_D(0x40));
1761 val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x40) | \
1762 RTI_DATA2_MEM_RX_UFC_D(0x80));
1763 writeq(val64, &bar0->rti_data2_mem);
1765 for (i = 0; i < config->rx_ring_num; i++) {
1766 val64 = RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE_NEW_CMD
1767 | RTI_CMD_MEM_OFFSET(i);
1768 writeq(val64, &bar0->rti_command_mem);
1771 * Once the operation completes, the Strobe bit of the
1772 * command register will be reset. We poll for this
1773 * particular condition. We wait for a maximum of 500ms
1774 * for the operation to complete, if it's not complete
1775 * by then we return error.
1779 val64 = readq(&bar0->rti_command_mem);
1780 if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD))
1784 DBG_PRINT(ERR_DBG, "%s: RTI init Failed\n",
1794 * Initializing proper values as Pause threshold into all
1795 * the 8 Queues on Rx side.
1797 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1798 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1800 /* Disable RMAC PAD STRIPPING */
1801 add = &bar0->mac_cfg;
1802 val64 = readq(&bar0->mac_cfg);
1803 val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1804 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1805 writel((u32) (val64), add);
1806 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1807 writel((u32) (val64 >> 32), (add + 4));
1808 val64 = readq(&bar0->mac_cfg);
1810 /* Enable FCS stripping by adapter */
1811 add = &bar0->mac_cfg;
1812 val64 = readq(&bar0->mac_cfg);
1813 val64 |= MAC_CFG_RMAC_STRIP_FCS;
1814 if (nic->device_type == XFRAME_II_DEVICE)
1815 writeq(val64, &bar0->mac_cfg);
1817 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1818 writel((u32) (val64), add);
1819 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1820 writel((u32) (val64 >> 32), (add + 4));
1824 * Set the time value to be inserted in the pause frame
1825 * generated by xena.
1827 val64 = readq(&bar0->rmac_pause_cfg);
1828 val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1829 val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1830 writeq(val64, &bar0->rmac_pause_cfg);
1833 * Set the Threshold Limit for Generating the pause frame
1834 * If the amount of data in any Queue exceeds ratio of
1835 * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1836 * pause frame is generated
1839 for (i = 0; i < 4; i++) {
1841 (((u64) 0xFF00 | nic->mac_control.
1842 mc_pause_threshold_q0q3)
1845 writeq(val64, &bar0->mc_pause_thresh_q0q3);
1848 for (i = 0; i < 4; i++) {
1850 (((u64) 0xFF00 | nic->mac_control.
1851 mc_pause_threshold_q4q7)
1854 writeq(val64, &bar0->mc_pause_thresh_q4q7);
1857 * TxDMA will stop Read request if the number of read split has
1858 * exceeded the limit pointed by shared_splits
1860 val64 = readq(&bar0->pic_control);
1861 val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1862 writeq(val64, &bar0->pic_control);
1864 if (nic->config.bus_speed == 266) {
1865 writeq(TXREQTO_VAL(0x7f) | TXREQTO_EN, &bar0->txreqtimeout);
1866 writeq(0x0, &bar0->read_retry_delay);
1867 writeq(0x0, &bar0->write_retry_delay);
1871 * Programming the Herc to split every write transaction
1872 * that does not start on an ADB to reduce disconnects.
1874 if (nic->device_type == XFRAME_II_DEVICE) {
1875 val64 = FAULT_BEHAVIOUR | EXT_REQ_EN |
1876 MISC_LINK_STABILITY_PRD(3);
1877 writeq(val64, &bar0->misc_control);
1878 val64 = readq(&bar0->pic_control2);
1879 val64 &= ~(s2BIT(13)|s2BIT(14)|s2BIT(15));
1880 writeq(val64, &bar0->pic_control2);
1882 if (strstr(nic->product_name, "CX4")) {
1883 val64 = TMAC_AVG_IPG(0x17);
1884 writeq(val64, &bar0->tmac_avg_ipg);
1889 #define LINK_UP_DOWN_INTERRUPT 1
1890 #define MAC_RMAC_ERR_TIMER 2
1892 static int s2io_link_fault_indication(struct s2io_nic *nic)
1894 if (nic->config.intr_type != INTA)
1895 return MAC_RMAC_ERR_TIMER;
1896 if (nic->device_type == XFRAME_II_DEVICE)
1897 return LINK_UP_DOWN_INTERRUPT;
1899 return MAC_RMAC_ERR_TIMER;
1903 * do_s2io_write_bits - update alarm bits in alarm register
1904 * @value: alarm bits
1905 * @flag: interrupt status
1906 * @addr: address value
1907 * Description: update alarm bits in alarm register
1911 static void do_s2io_write_bits(u64 value, int flag, void __iomem *addr)
1915 temp64 = readq(addr);
1917 if(flag == ENABLE_INTRS)
1918 temp64 &= ~((u64) value);
1920 temp64 |= ((u64) value);
1921 writeq(temp64, addr);
1924 static void en_dis_err_alarms(struct s2io_nic *nic, u16 mask, int flag)
1926 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1927 register u64 gen_int_mask = 0;
1929 if (mask & TX_DMA_INTR) {
1931 gen_int_mask |= TXDMA_INT_M;
1933 do_s2io_write_bits(TXDMA_TDA_INT | TXDMA_PFC_INT |
1934 TXDMA_PCC_INT | TXDMA_TTI_INT |
1935 TXDMA_LSO_INT | TXDMA_TPA_INT |
1936 TXDMA_SM_INT, flag, &bar0->txdma_int_mask);
1938 do_s2io_write_bits(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM |
1939 PFC_MISC_0_ERR | PFC_MISC_1_ERR |
1940 PFC_PCIX_ERR | PFC_ECC_SG_ERR, flag,
1941 &bar0->pfc_err_mask);
1943 do_s2io_write_bits(TDA_Fn_ECC_DB_ERR | TDA_SM0_ERR_ALARM |
1944 TDA_SM1_ERR_ALARM | TDA_Fn_ECC_SG_ERR |
1945 TDA_PCIX_ERR, flag, &bar0->tda_err_mask);
1947 do_s2io_write_bits(PCC_FB_ECC_DB_ERR | PCC_TXB_ECC_DB_ERR |
1948 PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM |
1949 PCC_N_SERR | PCC_6_COF_OV_ERR |
1950 PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR |
1951 PCC_7_LSO_OV_ERR | PCC_FB_ECC_SG_ERR |
1952 PCC_TXB_ECC_SG_ERR, flag, &bar0->pcc_err_mask);
1954 do_s2io_write_bits(TTI_SM_ERR_ALARM | TTI_ECC_SG_ERR |
1955 TTI_ECC_DB_ERR, flag, &bar0->tti_err_mask);
1957 do_s2io_write_bits(LSO6_ABORT | LSO7_ABORT |
1958 LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM |
1959 LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
1960 flag, &bar0->lso_err_mask);
1962 do_s2io_write_bits(TPA_SM_ERR_ALARM | TPA_TX_FRM_DROP,
1963 flag, &bar0->tpa_err_mask);
1965 do_s2io_write_bits(SM_SM_ERR_ALARM, flag, &bar0->sm_err_mask);
1969 if (mask & TX_MAC_INTR) {
1970 gen_int_mask |= TXMAC_INT_M;
1971 do_s2io_write_bits(MAC_INT_STATUS_TMAC_INT, flag,
1972 &bar0->mac_int_mask);
1973 do_s2io_write_bits(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR |
1974 TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR |
1975 TMAC_DESC_ECC_SG_ERR | TMAC_DESC_ECC_DB_ERR,
1976 flag, &bar0->mac_tmac_err_mask);
1979 if (mask & TX_XGXS_INTR) {
1980 gen_int_mask |= TXXGXS_INT_M;
1981 do_s2io_write_bits(XGXS_INT_STATUS_TXGXS, flag,
1982 &bar0->xgxs_int_mask);
1983 do_s2io_write_bits(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR |
1984 TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
1985 flag, &bar0->xgxs_txgxs_err_mask);
1988 if (mask & RX_DMA_INTR) {
1989 gen_int_mask |= RXDMA_INT_M;
1990 do_s2io_write_bits(RXDMA_INT_RC_INT_M | RXDMA_INT_RPA_INT_M |
1991 RXDMA_INT_RDA_INT_M | RXDMA_INT_RTI_INT_M,
1992 flag, &bar0->rxdma_int_mask);
1993 do_s2io_write_bits(RC_PRCn_ECC_DB_ERR | RC_FTC_ECC_DB_ERR |
1994 RC_PRCn_SM_ERR_ALARM | RC_FTC_SM_ERR_ALARM |
1995 RC_PRCn_ECC_SG_ERR | RC_FTC_ECC_SG_ERR |
1996 RC_RDA_FAIL_WR_Rn, flag, &bar0->rc_err_mask);
1997 do_s2io_write_bits(PRC_PCI_AB_RD_Rn | PRC_PCI_AB_WR_Rn |
1998 PRC_PCI_AB_F_WR_Rn | PRC_PCI_DP_RD_Rn |
1999 PRC_PCI_DP_WR_Rn | PRC_PCI_DP_F_WR_Rn, flag,
2000 &bar0->prc_pcix_err_mask);
2001 do_s2io_write_bits(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR |
2002 RPA_ECC_SG_ERR | RPA_ECC_DB_ERR, flag,
2003 &bar0->rpa_err_mask);
2004 do_s2io_write_bits(RDA_RXDn_ECC_DB_ERR | RDA_FRM_ECC_DB_N_AERR |
2005 RDA_SM1_ERR_ALARM | RDA_SM0_ERR_ALARM |
2006 RDA_RXD_ECC_DB_SERR | RDA_RXDn_ECC_SG_ERR |
2007 RDA_FRM_ECC_SG_ERR | RDA_MISC_ERR|RDA_PCIX_ERR,
2008 flag, &bar0->rda_err_mask);
2009 do_s2io_write_bits(RTI_SM_ERR_ALARM |
2010 RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
2011 flag, &bar0->rti_err_mask);
2014 if (mask & RX_MAC_INTR) {
2015 gen_int_mask |= RXMAC_INT_M;
2016 do_s2io_write_bits(MAC_INT_STATUS_RMAC_INT, flag,
2017 &bar0->mac_int_mask);
2018 do_s2io_write_bits(RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR |
2019 RMAC_UNUSED_INT | RMAC_SINGLE_ECC_ERR |
2020 RMAC_DOUBLE_ECC_ERR |
2021 RMAC_LINK_STATE_CHANGE_INT,
2022 flag, &bar0->mac_rmac_err_mask);
2025 if (mask & RX_XGXS_INTR)
2027 gen_int_mask |= RXXGXS_INT_M;
2028 do_s2io_write_bits(XGXS_INT_STATUS_RXGXS, flag,
2029 &bar0->xgxs_int_mask);
2030 do_s2io_write_bits(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR, flag,
2031 &bar0->xgxs_rxgxs_err_mask);
2034 if (mask & MC_INTR) {
2035 gen_int_mask |= MC_INT_M;
2036 do_s2io_write_bits(MC_INT_MASK_MC_INT, flag, &bar0->mc_int_mask);
2037 do_s2io_write_bits(MC_ERR_REG_SM_ERR | MC_ERR_REG_ECC_ALL_SNG |
2038 MC_ERR_REG_ECC_ALL_DBL | PLL_LOCK_N, flag,
2039 &bar0->mc_err_mask);
2041 nic->general_int_mask = gen_int_mask;
2043 /* Remove this line when alarm interrupts are enabled */
2044 nic->general_int_mask = 0;
2047 * en_dis_able_nic_intrs - Enable or Disable the interrupts
2048 * @nic: device private variable,
2049 * @mask: A mask indicating which Intr block must be modified and,
2050 * @flag: A flag indicating whether to enable or disable the Intrs.
2051 * Description: This function will either disable or enable the interrupts
2052 * depending on the flag argument. The mask argument can be used to
2053 * enable/disable any Intr block.
2054 * Return Value: NONE.
2057 static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
2059 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2060 register u64 temp64 = 0, intr_mask = 0;
2062 intr_mask = nic->general_int_mask;
2064 /* Top level interrupt classification */
2065 /* PIC Interrupts */
2066 if (mask & TX_PIC_INTR) {
2067 /* Enable PIC Intrs in the general intr mask register */
2068 intr_mask |= TXPIC_INT_M;
2069 if (flag == ENABLE_INTRS) {
2071 * If Hercules adapter enable GPIO otherwise
2072 * disable all PCIX, Flash, MDIO, IIC and GPIO
2073 * interrupts for now.
2076 if (s2io_link_fault_indication(nic) ==
2077 LINK_UP_DOWN_INTERRUPT ) {
2078 do_s2io_write_bits(PIC_INT_GPIO, flag,
2079 &bar0->pic_int_mask);
2080 do_s2io_write_bits(GPIO_INT_MASK_LINK_UP, flag,
2081 &bar0->gpio_int_mask);
2083 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
2084 } else if (flag == DISABLE_INTRS) {
2086 * Disable PIC Intrs in the general
2087 * intr mask register
2089 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
2093 /* Tx traffic interrupts */
2094 if (mask & TX_TRAFFIC_INTR) {
2095 intr_mask |= TXTRAFFIC_INT_M;
2096 if (flag == ENABLE_INTRS) {
2098 * Enable all the Tx side interrupts
2099 * writing 0 Enables all 64 TX interrupt levels
2101 writeq(0x0, &bar0->tx_traffic_mask);
2102 } else if (flag == DISABLE_INTRS) {
2104 * Disable Tx Traffic Intrs in the general intr mask
2107 writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
2111 /* Rx traffic interrupts */
2112 if (mask & RX_TRAFFIC_INTR) {
2113 intr_mask |= RXTRAFFIC_INT_M;
2114 if (flag == ENABLE_INTRS) {
2115 /* writing 0 Enables all 8 RX interrupt levels */
2116 writeq(0x0, &bar0->rx_traffic_mask);
2117 } else if (flag == DISABLE_INTRS) {
2119 * Disable Rx Traffic Intrs in the general intr mask
2122 writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
2126 temp64 = readq(&bar0->general_int_mask);
2127 if (flag == ENABLE_INTRS)
2128 temp64 &= ~((u64) intr_mask);
2130 temp64 = DISABLE_ALL_INTRS;
2131 writeq(temp64, &bar0->general_int_mask);
2133 nic->general_int_mask = readq(&bar0->general_int_mask);
2137 * verify_pcc_quiescent- Checks for PCC quiescent state
2138 * Return: 1 If PCC is quiescence
2139 * 0 If PCC is not quiescence
2141 static int verify_pcc_quiescent(struct s2io_nic *sp, int flag)
2144 struct XENA_dev_config __iomem *bar0 = sp->bar0;
2145 u64 val64 = readq(&bar0->adapter_status);
2147 herc = (sp->device_type == XFRAME_II_DEVICE);
2149 if (flag == FALSE) {
2150 if ((!herc && (sp->pdev->revision >= 4)) || herc) {
2151 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE))
2154 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
2158 if ((!herc && (sp->pdev->revision >= 4)) || herc) {
2159 if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
2160 ADAPTER_STATUS_RMAC_PCC_IDLE))
2163 if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
2164 ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
2172 * verify_xena_quiescence - Checks whether the H/W is ready
2173 * Description: Returns whether the H/W is ready to go or not. Depending
2174 * on whether adapter enable bit was written or not the comparison
2175 * differs and the calling function passes the input argument flag to
2177 * Return: 1 If xena is quiescence
2178 * 0 If Xena is not quiescence
2181 static int verify_xena_quiescence(struct s2io_nic *sp)
2184 struct XENA_dev_config __iomem *bar0 = sp->bar0;
2185 u64 val64 = readq(&bar0->adapter_status);
2186 mode = s2io_verify_pci_mode(sp);
2188 if (!(val64 & ADAPTER_STATUS_TDMA_READY)) {
2189 DBG_PRINT(ERR_DBG, "%s", "TDMA is not ready!");
2192 if (!(val64 & ADAPTER_STATUS_RDMA_READY)) {
2193 DBG_PRINT(ERR_DBG, "%s", "RDMA is not ready!");
2196 if (!(val64 & ADAPTER_STATUS_PFC_READY)) {
2197 DBG_PRINT(ERR_DBG, "%s", "PFC is not ready!");
2200 if (!(val64 & ADAPTER_STATUS_TMAC_BUF_EMPTY)) {
2201 DBG_PRINT(ERR_DBG, "%s", "TMAC BUF is not empty!");
2204 if (!(val64 & ADAPTER_STATUS_PIC_QUIESCENT)) {
2205 DBG_PRINT(ERR_DBG, "%s", "PIC is not QUIESCENT!");
2208 if (!(val64 & ADAPTER_STATUS_MC_DRAM_READY)) {
2209 DBG_PRINT(ERR_DBG, "%s", "MC_DRAM is not ready!");
2212 if (!(val64 & ADAPTER_STATUS_MC_QUEUES_READY)) {
2213 DBG_PRINT(ERR_DBG, "%s", "MC_QUEUES is not ready!");
2216 if (!(val64 & ADAPTER_STATUS_M_PLL_LOCK)) {
2217 DBG_PRINT(ERR_DBG, "%s", "M_PLL is not locked!");
2222 * In PCI 33 mode, the P_PLL is not used, and therefore,
2223 * the the P_PLL_LOCK bit in the adapter_status register will
2226 if (!(val64 & ADAPTER_STATUS_P_PLL_LOCK) &&
2227 sp->device_type == XFRAME_II_DEVICE && mode !=
2229 DBG_PRINT(ERR_DBG, "%s", "P_PLL is not locked!");
2232 if (!((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
2233 ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
2234 DBG_PRINT(ERR_DBG, "%s", "RC_PRC is not QUIESCENT!");
2241 * fix_mac_address - Fix for Mac addr problem on Alpha platforms
2242 * @sp: Pointer to device specifc structure
2244 * New procedure to clear mac address reading problems on Alpha platforms
2248 static void fix_mac_address(struct s2io_nic * sp)
2250 struct XENA_dev_config __iomem *bar0 = sp->bar0;
2254 while (fix_mac[i] != END_SIGN) {
2255 writeq(fix_mac[i++], &bar0->gpio_control);
2257 val64 = readq(&bar0->gpio_control);
2262 * start_nic - Turns the device on
2263 * @nic : device private variable.
2265 * This function actually turns the device on. Before this function is
2266 * called,all Registers are configured from their reset states
2267 * and shared memory is allocated but the NIC is still quiescent. On
2268 * calling this function, the device interrupts are cleared and the NIC is
2269 * literally switched on by writing into the adapter control register.
2271 * SUCCESS on success and -1 on failure.
2274 static int start_nic(struct s2io_nic *nic)
2276 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2277 struct net_device *dev = nic->dev;
2278 register u64 val64 = 0;
2280 struct mac_info *mac_control;
2281 struct config_param *config;
2283 mac_control = &nic->mac_control;
2284 config = &nic->config;
2286 /* PRC Initialization and configuration */
2287 for (i = 0; i < config->rx_ring_num; i++) {
2288 writeq((u64) mac_control->rings[i].rx_blocks[0].block_dma_addr,
2289 &bar0->prc_rxd0_n[i]);
2291 val64 = readq(&bar0->prc_ctrl_n[i]);
2292 if (nic->rxd_mode == RXD_MODE_1)
2293 val64 |= PRC_CTRL_RC_ENABLED;
2295 val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
2296 if (nic->device_type == XFRAME_II_DEVICE)
2297 val64 |= PRC_CTRL_GROUP_READS;
2298 val64 &= ~PRC_CTRL_RXD_BACKOFF_INTERVAL(0xFFFFFF);
2299 val64 |= PRC_CTRL_RXD_BACKOFF_INTERVAL(0x1000);
2300 writeq(val64, &bar0->prc_ctrl_n[i]);
2303 if (nic->rxd_mode == RXD_MODE_3B) {
2304 /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
2305 val64 = readq(&bar0->rx_pa_cfg);
2306 val64 |= RX_PA_CFG_IGNORE_L2_ERR;
2307 writeq(val64, &bar0->rx_pa_cfg);
2310 if (vlan_tag_strip == 0) {
2311 val64 = readq(&bar0->rx_pa_cfg);
2312 val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
2313 writeq(val64, &bar0->rx_pa_cfg);
2314 vlan_strip_flag = 0;
2318 * Enabling MC-RLDRAM. After enabling the device, we timeout
2319 * for around 100ms, which is approximately the time required
2320 * for the device to be ready for operation.
2322 val64 = readq(&bar0->mc_rldram_mrs);
2323 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
2324 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
2325 val64 = readq(&bar0->mc_rldram_mrs);
2327 msleep(100); /* Delay by around 100 ms. */
2329 /* Enabling ECC Protection. */
2330 val64 = readq(&bar0->adapter_control);
2331 val64 &= ~ADAPTER_ECC_EN;
2332 writeq(val64, &bar0->adapter_control);
2335 * Verify if the device is ready to be enabled, if so enable
2338 val64 = readq(&bar0->adapter_status);
2339 if (!verify_xena_quiescence(nic)) {
2340 DBG_PRINT(ERR_DBG, "%s: device is not ready, ", dev->name);
2341 DBG_PRINT(ERR_DBG, "Adapter status reads: 0x%llx\n",
2342 (unsigned long long) val64);
2347 * With some switches, link might be already up at this point.
2348 * Because of this weird behavior, when we enable laser,
2349 * we may not get link. We need to handle this. We cannot
2350 * figure out which switch is misbehaving. So we are forced to
2351 * make a global change.
2354 /* Enabling Laser. */
2355 val64 = readq(&bar0->adapter_control);
2356 val64 |= ADAPTER_EOI_TX_ON;
2357 writeq(val64, &bar0->adapter_control);
2359 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
2361 * Dont see link state interrupts initally on some switches,
2362 * so directly scheduling the link state task here.
2364 schedule_work(&nic->set_link_task);
2366 /* SXE-002: Initialize link and activity LED */
2367 subid = nic->pdev->subsystem_device;
2368 if (((subid & 0xFF) >= 0x07) &&
2369 (nic->device_type == XFRAME_I_DEVICE)) {
2370 val64 = readq(&bar0->gpio_control);
2371 val64 |= 0x0000800000000000ULL;
2372 writeq(val64, &bar0->gpio_control);
2373 val64 = 0x0411040400000000ULL;
2374 writeq(val64, (void __iomem *)bar0 + 0x2700);
2380 * s2io_txdl_getskb - Get the skb from txdl, unmap and return skb
2382 static struct sk_buff *s2io_txdl_getskb(struct fifo_info *fifo_data, struct \
2383 TxD *txdlp, int get_off)
2385 struct s2io_nic *nic = fifo_data->nic;
2386 struct sk_buff *skb;
2391 if (txds->Host_Control == (u64)(long)fifo_data->ufo_in_band_v) {
2392 pci_unmap_single(nic->pdev, (dma_addr_t)
2393 txds->Buffer_Pointer, sizeof(u64),
2398 skb = (struct sk_buff *) ((unsigned long)
2399 txds->Host_Control);
2401 memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
2404 pci_unmap_single(nic->pdev, (dma_addr_t)
2405 txds->Buffer_Pointer,
2406 skb->len - skb->data_len,
2408 frg_cnt = skb_shinfo(skb)->nr_frags;
2411 for (j = 0; j < frg_cnt; j++, txds++) {
2412 skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
2413 if (!txds->Buffer_Pointer)
2415 pci_unmap_page(nic->pdev, (dma_addr_t)
2416 txds->Buffer_Pointer,
2417 frag->size, PCI_DMA_TODEVICE);
2420 memset(txdlp,0, (sizeof(struct TxD) * fifo_data->max_txds));
2425 * free_tx_buffers - Free all queued Tx buffers
2426 * @nic : device private variable.
2428 * Free all queued Tx buffers.
2429 * Return Value: void
2432 static void free_tx_buffers(struct s2io_nic *nic)
2434 struct net_device *dev = nic->dev;
2435 struct sk_buff *skb;
2438 struct mac_info *mac_control;
2439 struct config_param *config;
2442 mac_control = &nic->mac_control;
2443 config = &nic->config;
2445 for (i = 0; i < config->tx_fifo_num; i++) {
2446 unsigned long flags;
2447 spin_lock_irqsave(&mac_control->fifos[i].tx_lock, flags);
2448 for (j = 0; j < config->tx_cfg[i].fifo_len; j++) {
2449 txdp = (struct TxD *) \
2450 mac_control->fifos[i].list_info[j].list_virt_addr;
2451 skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j);
2453 nic->mac_control.stats_info->sw_stat.mem_freed
2460 "%s:forcibly freeing %d skbs on FIFO%d\n",
2462 mac_control->fifos[i].tx_curr_get_info.offset = 0;
2463 mac_control->fifos[i].tx_curr_put_info.offset = 0;
2464 spin_unlock_irqrestore(&mac_control->fifos[i].tx_lock, flags);
2469 * stop_nic - To stop the nic
2470 * @nic ; device private variable.
2472 * This function does exactly the opposite of what the start_nic()
2473 * function does. This function is called to stop the device.
2478 static void stop_nic(struct s2io_nic *nic)
2480 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2481 register u64 val64 = 0;
2483 struct mac_info *mac_control;
2484 struct config_param *config;
2486 mac_control = &nic->mac_control;
2487 config = &nic->config;
2489 /* Disable all interrupts */
2490 en_dis_err_alarms(nic, ENA_ALL_INTRS, DISABLE_INTRS);
2491 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
2492 interruptible |= TX_PIC_INTR;
2493 en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
2495 /* Clearing Adapter_En bit of ADAPTER_CONTROL Register */
2496 val64 = readq(&bar0->adapter_control);
2497 val64 &= ~(ADAPTER_CNTL_EN);
2498 writeq(val64, &bar0->adapter_control);
2502 * fill_rx_buffers - Allocates the Rx side skbs
2503 * @ring_info: per ring structure
2504 * @from_card_up: If this is true, we will map the buffer to get
2505 * the dma address for buf0 and buf1 to give it to the card.
2506 * Else we will sync the already mapped buffer to give it to the card.
2508 * The function allocates Rx side skbs and puts the physical
2509 * address of these buffers into the RxD buffer pointers, so that the NIC
2510 * can DMA the received frame into these locations.
2511 * The NIC supports 3 receive modes, viz
2513 * 2. three buffer and
2514 * 3. Five buffer modes.
2515 * Each mode defines how many fragments the received frame will be split
2516 * up into by the NIC. The frame is split into L3 header, L4 Header,
2517 * L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
2518 * is split into 3 fragments. As of now only single buffer mode is
2521 * SUCCESS on success or an appropriate -ve value on failure.
2524 static int fill_rx_buffers(struct ring_info *ring, int from_card_up)
2526 struct sk_buff *skb;
2528 int off, size, block_no, block_no1;
2533 struct RxD_t *first_rxdp = NULL;
2534 u64 Buffer0_ptr = 0, Buffer1_ptr = 0;
2538 struct swStat *stats = &ring->nic->mac_control.stats_info->sw_stat;
2540 alloc_cnt = ring->pkt_cnt - ring->rx_bufs_left;
2542 block_no1 = ring->rx_curr_get_info.block_index;
2543 while (alloc_tab < alloc_cnt) {
2544 block_no = ring->rx_curr_put_info.block_index;
2546 off = ring->rx_curr_put_info.offset;
2548 rxdp = ring->rx_blocks[block_no].rxds[off].virt_addr;
2550 rxd_index = off + 1;
2552 rxd_index += (block_no * ring->rxd_count);
2554 if ((block_no == block_no1) &&
2555 (off == ring->rx_curr_get_info.offset) &&
2556 (rxdp->Host_Control)) {
2557 DBG_PRINT(INTR_DBG, "%s: Get and Put",
2559 DBG_PRINT(INTR_DBG, " info equated\n");
2562 if (off && (off == ring->rxd_count)) {
2563 ring->rx_curr_put_info.block_index++;
2564 if (ring->rx_curr_put_info.block_index ==
2566 ring->rx_curr_put_info.block_index = 0;
2567 block_no = ring->rx_curr_put_info.block_index;
2569 ring->rx_curr_put_info.offset = off;
2570 rxdp = ring->rx_blocks[block_no].block_virt_addr;
2571 DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
2572 ring->dev->name, rxdp);
2576 if ((rxdp->Control_1 & RXD_OWN_XENA) &&
2577 ((ring->rxd_mode == RXD_MODE_3B) &&
2578 (rxdp->Control_2 & s2BIT(0)))) {
2579 ring->rx_curr_put_info.offset = off;
2582 /* calculate size of skb based on ring mode */
2583 size = ring->mtu + HEADER_ETHERNET_II_802_3_SIZE +
2584 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
2585 if (ring->rxd_mode == RXD_MODE_1)
2586 size += NET_IP_ALIGN;
2588 size = ring->mtu + ALIGN_SIZE + BUF0_LEN + 4;
2591 skb = dev_alloc_skb(size);
2593 DBG_PRINT(INFO_DBG, "%s: Out of ", ring->dev->name);
2594 DBG_PRINT(INFO_DBG, "memory to allocate SKBs\n");
2597 first_rxdp->Control_1 |= RXD_OWN_XENA;
2599 stats->mem_alloc_fail_cnt++;
2603 stats->mem_allocated += skb->truesize;
2605 if (ring->rxd_mode == RXD_MODE_1) {
2606 /* 1 buffer mode - normal operation mode */
2607 rxdp1 = (struct RxD1*)rxdp;
2608 memset(rxdp, 0, sizeof(struct RxD1));
2609 skb_reserve(skb, NET_IP_ALIGN);
2610 rxdp1->Buffer0_ptr = pci_map_single
2611 (ring->pdev, skb->data, size - NET_IP_ALIGN,
2612 PCI_DMA_FROMDEVICE);
2613 if(pci_dma_mapping_error(rxdp1->Buffer0_ptr))
2614 goto pci_map_failed;
2617 SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
2618 rxdp->Host_Control = (unsigned long) (skb);
2619 } else if (ring->rxd_mode == RXD_MODE_3B) {
2622 * 2 buffer mode provides 128
2623 * byte aligned receive buffers.
2626 rxdp3 = (struct RxD3*)rxdp;
2627 /* save buffer pointers to avoid frequent dma mapping */
2628 Buffer0_ptr = rxdp3->Buffer0_ptr;
2629 Buffer1_ptr = rxdp3->Buffer1_ptr;
2630 memset(rxdp, 0, sizeof(struct RxD3));
2631 /* restore the buffer pointers for dma sync*/
2632 rxdp3->Buffer0_ptr = Buffer0_ptr;
2633 rxdp3->Buffer1_ptr = Buffer1_ptr;
2635 ba = &ring->ba[block_no][off];
2636 skb_reserve(skb, BUF0_LEN);
2637 tmp = (u64)(unsigned long) skb->data;
2640 skb->data = (void *) (unsigned long)tmp;
2641 skb_reset_tail_pointer(skb);
2644 rxdp3->Buffer0_ptr =
2645 pci_map_single(ring->pdev, ba->ba_0,
2646 BUF0_LEN, PCI_DMA_FROMDEVICE);
2647 if (pci_dma_mapping_error(rxdp3->Buffer0_ptr))
2648 goto pci_map_failed;
2650 pci_dma_sync_single_for_device(ring->pdev,
2651 (dma_addr_t) rxdp3->Buffer0_ptr,
2652 BUF0_LEN, PCI_DMA_FROMDEVICE);
2654 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
2655 if (ring->rxd_mode == RXD_MODE_3B) {
2656 /* Two buffer mode */
2659 * Buffer2 will have L3/L4 header plus
2662 rxdp3->Buffer2_ptr = pci_map_single
2663 (ring->pdev, skb->data, ring->mtu + 4,
2664 PCI_DMA_FROMDEVICE);
2666 if (pci_dma_mapping_error(rxdp3->Buffer2_ptr))
2667 goto pci_map_failed;
2670 rxdp3->Buffer1_ptr =
2671 pci_map_single(ring->pdev,
2673 PCI_DMA_FROMDEVICE);
2675 if (pci_dma_mapping_error
2676 (rxdp3->Buffer1_ptr)) {
2679 (dma_addr_t)(unsigned long)
2682 PCI_DMA_FROMDEVICE);
2683 goto pci_map_failed;
2686 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
2687 rxdp->Control_2 |= SET_BUFFER2_SIZE_3
2690 rxdp->Control_2 |= s2BIT(0);
2691 rxdp->Host_Control = (unsigned long) (skb);
2693 if (alloc_tab & ((1 << rxsync_frequency) - 1))
2694 rxdp->Control_1 |= RXD_OWN_XENA;
2696 if (off == (ring->rxd_count + 1))
2698 ring->rx_curr_put_info.offset = off;
2700 rxdp->Control_2 |= SET_RXD_MARKER;
2701 if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
2704 first_rxdp->Control_1 |= RXD_OWN_XENA;
2708 ring->rx_bufs_left += 1;
2713 /* Transfer ownership of first descriptor to adapter just before
2714 * exiting. Before that, use memory barrier so that ownership
2715 * and other fields are seen by adapter correctly.
2719 first_rxdp->Control_1 |= RXD_OWN_XENA;
2724 stats->pci_map_fail_cnt++;
2725 stats->mem_freed += skb->truesize;
2726 dev_kfree_skb_irq(skb);
2730 static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
2732 struct net_device *dev = sp->dev;
2734 struct sk_buff *skb;
2736 struct mac_info *mac_control;
2741 mac_control = &sp->mac_control;
2742 for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) {
2743 rxdp = mac_control->rings[ring_no].
2744 rx_blocks[blk].rxds[j].virt_addr;
2745 skb = (struct sk_buff *)
2746 ((unsigned long) rxdp->Host_Control);
2750 if (sp->rxd_mode == RXD_MODE_1) {
2751 rxdp1 = (struct RxD1*)rxdp;
2752 pci_unmap_single(sp->pdev, (dma_addr_t)
2755 HEADER_ETHERNET_II_802_3_SIZE
2756 + HEADER_802_2_SIZE +
2758 PCI_DMA_FROMDEVICE);
2759 memset(rxdp, 0, sizeof(struct RxD1));
2760 } else if(sp->rxd_mode == RXD_MODE_3B) {
2761 rxdp3 = (struct RxD3*)rxdp;
2762 ba = &mac_control->rings[ring_no].
2764 pci_unmap_single(sp->pdev, (dma_addr_t)
2767 PCI_DMA_FROMDEVICE);
2768 pci_unmap_single(sp->pdev, (dma_addr_t)
2771 PCI_DMA_FROMDEVICE);
2772 pci_unmap_single(sp->pdev, (dma_addr_t)
2775 PCI_DMA_FROMDEVICE);
2776 memset(rxdp, 0, sizeof(struct RxD3));
2778 sp->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
2780 mac_control->rings[ring_no].rx_bufs_left -= 1;
2785 * free_rx_buffers - Frees all Rx buffers
2786 * @sp: device private variable.
2788 * This function will free all Rx buffers allocated by host.
2793 static void free_rx_buffers(struct s2io_nic *sp)
2795 struct net_device *dev = sp->dev;
2796 int i, blk = 0, buf_cnt = 0;
2797 struct mac_info *mac_control;
2798 struct config_param *config;
2800 mac_control = &sp->mac_control;
2801 config = &sp->config;
2803 for (i = 0; i < config->rx_ring_num; i++) {
2804 for (blk = 0; blk < rx_ring_sz[i]; blk++)
2805 free_rxd_blk(sp,i,blk);
2807 mac_control->rings[i].rx_curr_put_info.block_index = 0;
2808 mac_control->rings[i].rx_curr_get_info.block_index = 0;
2809 mac_control->rings[i].rx_curr_put_info.offset = 0;
2810 mac_control->rings[i].rx_curr_get_info.offset = 0;
2811 mac_control->rings[i].rx_bufs_left = 0;
2812 DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n",
2813 dev->name, buf_cnt, i);
2817 static int s2io_chk_rx_buffers(struct ring_info *ring)
2819 if (fill_rx_buffers(ring, 0) == -ENOMEM) {
2820 DBG_PRINT(INFO_DBG, "%s:Out of memory", ring->dev->name);
2821 DBG_PRINT(INFO_DBG, " in Rx Intr!!\n");
2827 * s2io_poll - Rx interrupt handler for NAPI support
2828 * @napi : pointer to the napi structure.
2829 * @budget : The number of packets that were budgeted to be processed
2830 * during one pass through the 'Poll" function.
2832 * Comes into picture only if NAPI support has been incorporated. It does
2833 * the same thing that rx_intr_handler does, but not in a interrupt context
2834 * also It will process only a given number of packets.
2836 * 0 on success and 1 if there are No Rx packets to be processed.
2839 static int s2io_poll_msix(struct napi_struct *napi, int budget)
2841 struct ring_info *ring = container_of(napi, struct ring_info, napi);
2842 struct net_device *dev = ring->dev;
2843 struct config_param *config;
2844 struct mac_info *mac_control;
2845 int pkts_processed = 0;
2846 u8 __iomem *addr = NULL;
2848 struct s2io_nic *nic = dev->priv;
2849 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2850 int budget_org = budget;
2852 config = &nic->config;
2853 mac_control = &nic->mac_control;
2855 if (unlikely(!is_s2io_card_up(nic)))
2858 pkts_processed = rx_intr_handler(ring, budget);
2859 s2io_chk_rx_buffers(ring);
2861 if (pkts_processed < budget_org) {
2862 netif_rx_complete(dev, napi);
2863 /*Re Enable MSI-Rx Vector*/
2864 addr = (u8 __iomem *)&bar0->xmsi_mask_reg;
2865 addr += 7 - ring->ring_no;
2866 val8 = (ring->ring_no == 0) ? 0x3f : 0xbf;
2870 return pkts_processed;
2872 static int s2io_poll_inta(struct napi_struct *napi, int budget)
2874 struct s2io_nic *nic = container_of(napi, struct s2io_nic, napi);
2875 struct ring_info *ring;
2876 struct net_device *dev = nic->dev;
2877 struct config_param *config;
2878 struct mac_info *mac_control;
2879 int pkts_processed = 0;
2880 int ring_pkts_processed, i;
2881 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2882 int budget_org = budget;
2884 config = &nic->config;
2885 mac_control = &nic->mac_control;
2887 if (unlikely(!is_s2io_card_up(nic)))
2890 for (i = 0; i < config->rx_ring_num; i++) {
2891 ring = &mac_control->rings[i];
2892 ring_pkts_processed = rx_intr_handler(ring, budget);
2893 s2io_chk_rx_buffers(ring);
2894 pkts_processed += ring_pkts_processed;
2895 budget -= ring_pkts_processed;
2899 if (pkts_processed < budget_org) {
2900 netif_rx_complete(dev, napi);
2901 /* Re enable the Rx interrupts for the ring */
2902 writeq(0, &bar0->rx_traffic_mask);
2903 readl(&bar0->rx_traffic_mask);
2905 return pkts_processed;
2908 #ifdef CONFIG_NET_POLL_CONTROLLER
2910 * s2io_netpoll - netpoll event handler entry point
2911 * @dev : pointer to the device structure.
2913 * This function will be called by upper layer to check for events on the
2914 * interface in situations where interrupts are disabled. It is used for
2915 * specific in-kernel networking tasks, such as remote consoles and kernel
2916 * debugging over the network (example netdump in RedHat).
2918 static void s2io_netpoll(struct net_device *dev)
2920 struct s2io_nic *nic = dev->priv;
2921 struct mac_info *mac_control;
2922 struct config_param *config;
2923 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2924 u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
2927 if (pci_channel_offline(nic->pdev))
2930 disable_irq(dev->irq);
2932 mac_control = &nic->mac_control;
2933 config = &nic->config;
2935 writeq(val64, &bar0->rx_traffic_int);
2936 writeq(val64, &bar0->tx_traffic_int);
2938 /* we need to free up the transmitted skbufs or else netpoll will
2939 * run out of skbs and will fail and eventually netpoll application such
2940 * as netdump will fail.
2942 for (i = 0; i < config->tx_fifo_num; i++)
2943 tx_intr_handler(&mac_control->fifos[i]);
2945 /* check for received packet and indicate up to network */
2946 for (i = 0; i < config->rx_ring_num; i++)
2947 rx_intr_handler(&mac_control->rings[i], 0);
2949 for (i = 0; i < config->rx_ring_num; i++) {
2950 if (fill_rx_buffers(&mac_control->rings[i], 0) == -ENOMEM) {
2951 DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
2952 DBG_PRINT(INFO_DBG, " in Rx Netpoll!!\n");
2956 enable_irq(dev->irq);
2962 * rx_intr_handler - Rx interrupt handler
2963 * @ring_info: per ring structure.
2964 * @budget: budget for napi processing.
2966 * If the interrupt is because of a received frame or if the
2967 * receive ring contains fresh as yet un-processed frames,this function is
2968 * called. It picks out the RxD at which place the last Rx processing had
2969 * stopped and sends the skb to the OSM's Rx handler and then increments
2972 * No. of napi packets processed.
2974 static int rx_intr_handler(struct ring_info *ring_data, int budget)
2976 int get_block, put_block;
2977 struct rx_curr_get_info get_info, put_info;
2979 struct sk_buff *skb;
2980 int pkt_cnt = 0, napi_pkts = 0;
2985 get_info = ring_data->rx_curr_get_info;
2986 get_block = get_info.block_index;
2987 memcpy(&put_info, &ring_data->rx_curr_put_info, sizeof(put_info));
2988 put_block = put_info.block_index;
2989 rxdp = ring_data->rx_blocks[get_block].rxds[get_info.offset].virt_addr;
2991 while (RXD_IS_UP2DT(rxdp)) {
2993 * If your are next to put index then it's
2994 * FIFO full condition
2996 if ((get_block == put_block) &&
2997 (get_info.offset + 1) == put_info.offset) {
2998 DBG_PRINT(INTR_DBG, "%s: Ring Full\n",
2999 ring_data->dev->name);
3002 skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control);
3004 DBG_PRINT(ERR_DBG, "%s: The skb is ",
3005 ring_data->dev->name);
3006 DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
3009 if (ring_data->rxd_mode == RXD_MODE_1) {
3010 rxdp1 = (struct RxD1*)rxdp;
3011 pci_unmap_single(ring_data->pdev, (dma_addr_t)
3014 HEADER_ETHERNET_II_802_3_SIZE +
3017 PCI_DMA_FROMDEVICE);
3018 } else if (ring_data->rxd_mode == RXD_MODE_3B) {
3019 rxdp3 = (struct RxD3*)rxdp;
3020 pci_dma_sync_single_for_cpu(ring_data->pdev, (dma_addr_t)
3022 BUF0_LEN, PCI_DMA_FROMDEVICE);
3023 pci_unmap_single(ring_data->pdev, (dma_addr_t)
3026 PCI_DMA_FROMDEVICE);
3028 prefetch(skb->data);
3029 rx_osm_handler(ring_data, rxdp);
3031 ring_data->rx_curr_get_info.offset = get_info.offset;
3032 rxdp = ring_data->rx_blocks[get_block].
3033 rxds[get_info.offset].virt_addr;
3034 if (get_info.offset == rxd_count[ring_data->rxd_mode]) {
3035 get_info.offset = 0;
3036 ring_data->rx_curr_get_info.offset = get_info.offset;
3038 if (get_block == ring_data->block_count)
3040 ring_data->rx_curr_get_info.block_index = get_block;
3041 rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
3044 if (ring_data->nic->config.napi) {
3051 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
3054 if (ring_data->lro) {
3055 /* Clear all LRO sessions before exiting */
3056 for (i=0; i<MAX_LRO_SESSIONS; i++) {
3057 struct lro *lro = &ring_data->lro0_n[i];
3059 update_L3L4_header(ring_data->nic, lro);
3060 queue_rx_frame(lro->parent, lro->vlan_tag);
3061 clear_lro_session(lro);
3069 * tx_intr_handler - Transmit interrupt handler
3070 * @nic : device private variable
3072 * If an interrupt was raised to indicate DMA complete of the
3073 * Tx packet, this function is called. It identifies the last TxD
3074 * whose buffer was freed and frees all skbs whose data have already
3075 * DMA'ed into the NICs internal memory.
3080 static void tx_intr_handler(struct fifo_info *fifo_data)
3082 struct s2io_nic *nic = fifo_data->nic;
3083 struct tx_curr_get_info get_info, put_info;
3084 struct sk_buff *skb = NULL;
3087 unsigned long flags = 0;
3090 if (!spin_trylock_irqsave(&fifo_data->tx_lock, flags))
3093 get_info = fifo_data->tx_curr_get_info;
3094 memcpy(&put_info, &fifo_data->tx_curr_put_info, sizeof(put_info));
3095 txdlp = (struct TxD *) fifo_data->list_info[get_info.offset].
3097 while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
3098 (get_info.offset != put_info.offset) &&
3099 (txdlp->Host_Control)) {
3100 /* Check for TxD errors */
3101 if (txdlp->Control_1 & TXD_T_CODE) {
3102 unsigned long long err;
3103 err = txdlp->Control_1 & TXD_T_CODE;
3105 nic->mac_control.stats_info->sw_stat.
3109 /* update t_code statistics */
3110 err_mask = err >> 48;
3113 nic->mac_control.stats_info->sw_stat.
3118 nic->mac_control.stats_info->sw_stat.
3119 tx_desc_abort_cnt++;
3123 nic->mac_control.stats_info->sw_stat.
3124 tx_parity_err_cnt++;
3128 nic->mac_control.stats_info->sw_stat.
3133 nic->mac_control.stats_info->sw_stat.
3134 tx_list_proc_err_cnt++;
3139 skb = s2io_txdl_getskb(fifo_data, txdlp, get_info.offset);
3141 spin_unlock_irqrestore(&fifo_data->tx_lock, flags);
3142 DBG_PRINT(ERR_DBG, "%s: Null skb ",
3144 DBG_PRINT(ERR_DBG, "in Tx Free Intr\n");
3149 /* Updating the statistics block */
3150 nic->stats.tx_bytes += skb->len;
3151 nic->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
3152 dev_kfree_skb_irq(skb);
3155 if (get_info.offset == get_info.fifo_len + 1)
3156 get_info.offset = 0;
3157 txdlp = (struct TxD *) fifo_data->list_info
3158 [get_info.offset].list_virt_addr;
3159 fifo_data->tx_curr_get_info.offset =
3163 s2io_wake_tx_queue(fifo_data, pkt_cnt, nic->config.multiq);
3165 spin_unlock_irqrestore(&fifo_data->tx_lock, flags);
3169 * s2io_mdio_write - Function to write in to MDIO registers
3170 * @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3171 * @addr : address value
3172 * @value : data value
3173 * @dev : pointer to net_device structure
3175 * This function is used to write values to the MDIO registers
3178 static void s2io_mdio_write(u32 mmd_type, u64 addr, u16 value, struct net_device *dev)
3181 struct s2io_nic *sp = dev->priv;
3182 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3184 //address transaction
3185 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3186 | MDIO_MMD_DEV_ADDR(mmd_type)
3187 | MDIO_MMS_PRT_ADDR(0x0);
3188 writeq(val64, &bar0->mdio_control);
3189 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3190 writeq(val64, &bar0->mdio_control);
3195 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3196 | MDIO_MMD_DEV_ADDR(mmd_type)
3197 | MDIO_MMS_PRT_ADDR(0x0)
3198 | MDIO_MDIO_DATA(value)
3199 | MDIO_OP(MDIO_OP_WRITE_TRANS);
3200 writeq(val64, &bar0->mdio_control);
3201 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3202 writeq(val64, &bar0->mdio_control);
3206 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3207 | MDIO_MMD_DEV_ADDR(mmd_type)
3208 | MDIO_MMS_PRT_ADDR(0x0)
3209 | MDIO_OP(MDIO_OP_READ_TRANS);
3210 writeq(val64, &bar0->mdio_control);
3211 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3212 writeq(val64, &bar0->mdio_control);
3218 * s2io_mdio_read - Function to write in to MDIO registers
3219 * @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3220 * @addr : address value
3221 * @dev : pointer to net_device structure
3223 * This function is used to read values to the MDIO registers
3226 static u64 s2io_mdio_read(u32 mmd_type, u64 addr, struct net_device *dev)
3230 struct s2io_nic *sp = dev->priv;
3231 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3233 /* address transaction */
3234 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3235 | MDIO_MMD_DEV_ADDR(mmd_type)
3236 | MDIO_MMS_PRT_ADDR(0x0);
3237 writeq(val64, &bar0->mdio_control);
3238 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3239 writeq(val64, &bar0->mdio_control);
3242 /* Data transaction */
3244 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3245 | MDIO_MMD_DEV_ADDR(mmd_type)
3246 | MDIO_MMS_PRT_ADDR(0x0)
3247 | MDIO_OP(MDIO_OP_READ_TRANS);
3248 writeq(val64, &bar0->mdio_control);
3249 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3250 writeq(val64, &bar0->mdio_control);
3253 /* Read the value from regs */
3254 rval64 = readq(&bar0->mdio_control);
3255 rval64 = rval64 & 0xFFFF0000;
3256 rval64 = rval64 >> 16;
3260 * s2io_chk_xpak_counter - Function to check the status of the xpak counters
3261 * @counter : couter value to be updated
3262 * @flag : flag to indicate the status
3263 * @type : counter type
3265 * This function is to check the status of the xpak counters value
3269 static void s2io_chk_xpak_counter(u64 *counter, u64 * regs_stat, u32 index, u16 flag, u16 type)
3274 for(i = 0; i <index; i++)
3279 *counter = *counter + 1;
3280 val64 = *regs_stat & mask;
3281 val64 = val64 >> (index * 0x2);
3288 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3289 "service. Excessive temperatures may "
3290 "result in premature transceiver "
3294 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3295 "service Excessive bias currents may "
3296 "indicate imminent laser diode "
3300 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3301 "service Excessive laser output "
3302 "power may saturate far-end "
3306 DBG_PRINT(ERR_DBG, "Incorrect XPAK Alarm "
3311 val64 = val64 << (index * 0x2);
3312 *regs_stat = (*regs_stat & (~mask)) | (val64);
3315 *regs_stat = *regs_stat & (~mask);
3320 * s2io_updt_xpak_counter - Function to update the xpak counters
3321 * @dev : pointer to net_device struct
3323 * This function is to upate the status of the xpak counters value
3326 static void s2io_updt_xpak_counter(struct net_device *dev)
3334 struct s2io_nic *sp = dev->priv;
3335 struct stat_block *stat_info = sp->mac_control.stats_info;
3337 /* Check the communication with the MDIO slave */
3340 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3341 if((val64 == 0xFFFF) || (val64 == 0x0000))
3343 DBG_PRINT(ERR_DBG, "ERR: MDIO slave access failed - "
3344 "Returned %llx\n", (unsigned long long)val64);
3348 /* Check for the expecte value of 2040 at PMA address 0x0000 */
3351 DBG_PRINT(ERR_DBG, "Incorrect value at PMA address 0x0000 - ");
3352 DBG_PRINT(ERR_DBG, "Returned: %llx- Expected: 0x2040\n",
3353 (unsigned long long)val64);
3357 /* Loading the DOM register to MDIO register */
3359 s2io_mdio_write(MDIO_MMD_PMA_DEV_ADDR, addr, val16, dev);
3360 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3362 /* Reading the Alarm flags */
3365 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3367 flag = CHECKBIT(val64, 0x7);
3369 s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_transceiver_temp_high,
3370 &stat_info->xpak_stat.xpak_regs_stat,
3373 if(CHECKBIT(val64, 0x6))
3374 stat_info->xpak_stat.alarm_transceiver_temp_low++;
3376 flag = CHECKBIT(val64, 0x3);
3378 s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_bias_current_high,
3379 &stat_info->xpak_stat.xpak_regs_stat,
3382 if(CHECKBIT(val64, 0x2))
3383 stat_info->xpak_stat.alarm_laser_bias_current_low++;
3385 flag = CHECKBIT(val64, 0x1);
3387 s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_output_power_high,
3388 &stat_info->xpak_stat.xpak_regs_stat,
3391 if(CHECKBIT(val64, 0x0))
3392 stat_info->xpak_stat.alarm_laser_output_power_low++;
3394 /* Reading the Warning flags */
3397 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3399 if(CHECKBIT(val64, 0x7))
3400 stat_info->xpak_stat.warn_transceiver_temp_high++;
3402 if(CHECKBIT(val64, 0x6))
3403 stat_info->xpak_stat.warn_transceiver_temp_low++;
3405 if(CHECKBIT(val64, 0x3))
3406 stat_info->xpak_stat.warn_laser_bias_current_high++;
3408 if(CHECKBIT(val64, 0x2))
3409 stat_info->xpak_stat.warn_laser_bias_current_low++;
3411 if(CHECKBIT(val64, 0x1))
3412 stat_info->xpak_stat.warn_laser_output_power_high++;
3414 if(CHECKBIT(val64, 0x0))
3415 stat_info->xpak_stat.warn_laser_output_power_low++;
3419 * wait_for_cmd_complete - waits for a command to complete.
3420 * @sp : private member of the device structure, which is a pointer to the
3421 * s2io_nic structure.
3422 * Description: Function that waits for a command to Write into RMAC
3423 * ADDR DATA registers to be completed and returns either success or
3424 * error depending on whether the command was complete or not.
3426 * SUCCESS on success and FAILURE on failure.
3429 static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit,
3432 int ret = FAILURE, cnt = 0, delay = 1;
3435 if ((bit_state != S2IO_BIT_RESET) && (bit_state != S2IO_BIT_SET))
3439 val64 = readq(addr);
3440 if (bit_state == S2IO_BIT_RESET) {
3441 if (!(val64 & busy_bit)) {
3446 if (!(val64 & busy_bit)) {
3463 * check_pci_device_id - Checks if the device id is supported
3465 * Description: Function to check if the pci device id is supported by driver.
3466 * Return value: Actual device id if supported else PCI_ANY_ID
3468 static u16 check_pci_device_id(u16 id)
3471 case PCI_DEVICE_ID_HERC_WIN:
3472 case PCI_DEVICE_ID_HERC_UNI:
3473 return XFRAME_II_DEVICE;
3474 case PCI_DEVICE_ID_S2IO_UNI:
3475 case PCI_DEVICE_ID_S2IO_WIN:
3476 return XFRAME_I_DEVICE;
3483 * s2io_reset - Resets the card.
3484 * @sp : private member of the device structure.
3485 * Description: Function to Reset the card. This function then also
3486 * restores the previously saved PCI configuration space registers as
3487 * the card reset also resets the configuration space.
3492 static void s2io_reset(struct s2io_nic * sp)
3494 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3499 unsigned long long up_cnt, down_cnt, up_time, down_time, reset_cnt;
3500 unsigned long long mem_alloc_cnt, mem_free_cnt, watchdog_cnt;
3502 DBG_PRINT(INIT_DBG,"%s - Resetting XFrame card %s\n",
3503 __FUNCTION__, sp->dev->name);
3505 /* Back up the PCI-X CMD reg, dont want to lose MMRBC, OST settings */
3506 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd));
3508 val64 = SW_RESET_ALL;
3509 writeq(val64, &bar0->sw_reset);
3510 if (strstr(sp->product_name, "CX4")) {
3514 for (i = 0; i < S2IO_MAX_PCI_CONFIG_SPACE_REINIT; i++) {
3516 /* Restore the PCI state saved during initialization. */
3517 pci_restore_state(sp->pdev);
3518 pci_read_config_word(sp->pdev, 0x2, &val16);
3519 if (check_pci_device_id(val16) != (u16)PCI_ANY_ID)
3524 if (check_pci_device_id(val16) == (u16)PCI_ANY_ID) {
3525 DBG_PRINT(ERR_DBG,"%s SW_Reset failed!\n", __FUNCTION__);
3528 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, pci_cmd);
3532 /* Set swapper to enable I/O register access */
3533 s2io_set_swapper(sp);
3535 /* restore mac_addr entries */
3536 do_s2io_restore_unicast_mc(sp);
3538 /* Restore the MSIX table entries from local variables */
3539 restore_xmsi_data(sp);
3541 /* Clear certain PCI/PCI-X fields after reset */
3542 if (sp->device_type == XFRAME_II_DEVICE) {
3543 /* Clear "detected parity error" bit */
3544 pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000);
3546 /* Clearing PCIX Ecc status register */
3547 pci_write_config_dword(sp->pdev, 0x68, 0x7C);
3549 /* Clearing PCI_STATUS error reflected here */
3550 writeq(s2BIT(62), &bar0->txpic_int_reg);
3553 /* Reset device statistics maintained by OS */
3554 memset(&sp->stats, 0, sizeof (struct net_device_stats));
3556 up_cnt = sp->mac_control.stats_info->sw_stat.link_up_cnt;
3557 down_cnt = sp->mac_control.stats_info->sw_stat.link_down_cnt;
3558 up_time = sp->mac_control.stats_info->sw_stat.link_up_time;
3559 down_time = sp->mac_control.stats_info->sw_stat.link_down_time;
3560 reset_cnt = sp->mac_control.stats_info->sw_stat.soft_reset_cnt;
3561 mem_alloc_cnt = sp->mac_control.stats_info->sw_stat.mem_allocated;
3562 mem_free_cnt = sp->mac_control.stats_info->sw_stat.mem_freed;
3563 watchdog_cnt = sp->mac_control.stats_info->sw_stat.watchdog_timer_cnt;
3564 /* save link up/down time/cnt, reset/memory/watchdog cnt */
3565 memset(sp->mac_control.stats_info, 0, sizeof(struct stat_block));
3566 /* restore link up/down time/cnt, reset/memory/watchdog cnt */
3567 sp->mac_control.stats_info->sw_stat.link_up_cnt = up_cnt;
3568 sp->mac_control.stats_info->sw_stat.link_down_cnt = down_cnt;
3569 sp->mac_control.stats_info->sw_stat.link_up_time = up_time;
3570 sp->mac_control.stats_info->sw_stat.link_down_time = down_time;
3571 sp->mac_control.stats_info->sw_stat.soft_reset_cnt = reset_cnt;
3572 sp->mac_control.stats_info->sw_stat.mem_allocated = mem_alloc_cnt;
3573 sp->mac_control.stats_info->sw_stat.mem_freed = mem_free_cnt;
3574 sp->mac_control.stats_info->sw_stat.watchdog_timer_cnt = watchdog_cnt;
3576 /* SXE-002: Configure link and activity LED to turn it off */
3577 subid = sp->pdev->subsystem_device;
3578 if (((subid & 0xFF) >= 0x07) &&
3579 (sp->device_type == XFRAME_I_DEVICE)) {
3580 val64 = readq(&bar0->gpio_control);
3581 val64 |= 0x0000800000000000ULL;
3582 writeq(val64, &bar0->gpio_control);
3583 val64 = 0x0411040400000000ULL;
3584 writeq(val64, (void __iomem *)bar0 + 0x2700);
3588 * Clear spurious ECC interrupts that would have occured on
3589 * XFRAME II cards after reset.
3591 if (sp->device_type == XFRAME_II_DEVICE) {
3592 val64 = readq(&bar0->pcc_err_reg);
3593 writeq(val64, &bar0->pcc_err_reg);
3596 sp->device_enabled_once = FALSE;
3600 * s2io_set_swapper - to set the swapper controle on the card
3601 * @sp : private member of the device structure,
3602 * pointer to the s2io_nic structure.
3603 * Description: Function to set the swapper control on the card
3604 * correctly depending on the 'endianness' of the system.
3606 * SUCCESS on success and FAILURE on failure.
3609 static int s2io_set_swapper(struct s2io_nic * sp)
3611 struct net_device *dev = sp->dev;
3612 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3613 u64 val64, valt, valr;
3616 * Set proper endian settings and verify the same by reading
3617 * the PIF Feed-back register.
3620 val64 = readq(&bar0->pif_rd_swapper_fb);
3621 if (val64 != 0x0123456789ABCDEFULL) {
3623 u64 value[] = { 0xC30000C3C30000C3ULL, /* FE=1, SE=1 */
3624 0x8100008181000081ULL, /* FE=1, SE=0 */
3625 0x4200004242000042ULL, /* FE=0, SE=1 */
3626 0}; /* FE=0, SE=0 */
3629 writeq(value[i], &bar0->swapper_ctrl);
3630 val64 = readq(&bar0->pif_rd_swapper_fb);
3631 if (val64 == 0x0123456789ABCDEFULL)
3636 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3638 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3639 (unsigned long long) val64);
3644 valr = readq(&bar0->swapper_ctrl);
3647 valt = 0x0123456789ABCDEFULL;
3648 writeq(valt, &bar0->xmsi_address);
3649 val64 = readq(&bar0->xmsi_address);
3653 u64 value[] = { 0x00C3C30000C3C300ULL, /* FE=1, SE=1 */
3654 0x0081810000818100ULL, /* FE=1, SE=0 */
3655 0x0042420000424200ULL, /* FE=0, SE=1 */
3656 0}; /* FE=0, SE=0 */
3659 writeq((value[i] | valr), &bar0->swapper_ctrl);
3660 writeq(valt, &bar0->xmsi_address);
3661 val64 = readq(&bar0->xmsi_address);
3667 unsigned long long x = val64;
3668 DBG_PRINT(ERR_DBG, "Write failed, Xmsi_addr ");
3669 DBG_PRINT(ERR_DBG, "reads:0x%llx\n", x);
3673 val64 = readq(&bar0->swapper_ctrl);
3674 val64 &= 0xFFFF000000000000ULL;
3678 * The device by default set to a big endian format, so a
3679 * big endian driver need not set anything.
3681 val64 |= (SWAPPER_CTRL_TXP_FE |
3682 SWAPPER_CTRL_TXP_SE |
3683 SWAPPER_CTRL_TXD_R_FE |
3684 SWAPPER_CTRL_TXD_W_FE |
3685 SWAPPER_CTRL_TXF_R_FE |
3686 SWAPPER_CTRL_RXD_R_FE |
3687 SWAPPER_CTRL_RXD_W_FE |
3688 SWAPPER_CTRL_RXF_W_FE |
3689 SWAPPER_CTRL_XMSI_FE |
3690 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
3691 if (sp->config.intr_type == INTA)
3692 val64 |= SWAPPER_CTRL_XMSI_SE;
3693 writeq(val64, &bar0->swapper_ctrl);
3696 * Initially we enable all bits to make it accessible by the
3697 * driver, then we selectively enable only those bits that
3700 val64 |= (SWAPPER_CTRL_TXP_FE |
3701 SWAPPER_CTRL_TXP_SE |
3702 SWAPPER_CTRL_TXD_R_FE |
3703 SWAPPER_CTRL_TXD_R_SE |
3704 SWAPPER_CTRL_TXD_W_FE |
3705 SWAPPER_CTRL_TXD_W_SE |
3706 SWAPPER_CTRL_TXF_R_FE |
3707 SWAPPER_CTRL_RXD_R_FE |
3708 SWAPPER_CTRL_RXD_R_SE |
3709 SWAPPER_CTRL_RXD_W_FE |
3710 SWAPPER_CTRL_RXD_W_SE |
3711 SWAPPER_CTRL_RXF_W_FE |
3712 SWAPPER_CTRL_XMSI_FE |
3713 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
3714 if (sp->config.intr_type == INTA)
3715 val64 |= SWAPPER_CTRL_XMSI_SE;
3716 writeq(val64, &bar0->swapper_ctrl);
3718 val64 = readq(&bar0->swapper_ctrl);
3721 * Verifying if endian settings are accurate by reading a
3722 * feedback register.
3724 val64 = readq(&bar0->pif_rd_swapper_fb);
3725 if (val64 != 0x0123456789ABCDEFULL) {
3726 /* Endian settings are incorrect, calls for another dekko. */
3727 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3729 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3730 (unsigned long long) val64);
3737 static int wait_for_msix_trans(struct s2io_nic *nic, int i)
3739 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3741 int ret = 0, cnt = 0;
3744 val64 = readq(&bar0->xmsi_access);
3745 if (!(val64 & s2BIT(15)))
3751 DBG_PRINT(ERR_DBG, "XMSI # %d Access failed\n", i);
3758 static void restore_xmsi_data(struct s2io_nic *nic)
3760 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3765 if (nic->device_type == XFRAME_I_DEVICE)
3768 for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
3769 msix_index = (i) ? ((i-1) * 8 + 1): 0;
3770 writeq(nic->msix_info[i].addr, &bar0->xmsi_address);
3771 writeq(nic->msix_info[i].data, &bar0->xmsi_data);
3772 val64 = (s2BIT(7) | s2BIT(15) | vBIT(msix_index, 26, 6));
3773 writeq(val64, &bar0->xmsi_access);
3774 if (wait_for_msix_trans(nic, msix_index)) {
3775 DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
3781 static void store_xmsi_data(struct s2io_nic *nic)
3783 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3784 u64 val64, addr, data;
3787 if (nic->device_type == XFRAME_I_DEVICE)
3790 /* Store and display */
3791 for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
3792 msix_index = (i) ? ((i-1) * 8 + 1): 0;
3793 val64 = (s2BIT(15) | vBIT(msix_index, 26, 6));
3794 writeq(val64, &bar0->xmsi_access);
3795 if (wait_for_msix_trans(nic, msix_index)) {
3796 DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
3799 addr = readq(&bar0->xmsi_address);
3800 data = readq(&bar0->xmsi_data);
3802 nic->msix_info[i].addr = addr;
3803 nic->msix_info[i].data = data;
3808 static int s2io_enable_msi_x(struct s2io_nic *nic)
3810 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3812 u16 msi_control; /* Temp variable */
3813 int ret, i, j, msix_indx = 1;
3815 nic->entries = kmalloc(nic->num_entries * sizeof(struct msix_entry),
3817 if (!nic->entries) {
3818 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n", \
3820 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
3823 nic->mac_control.stats_info->sw_stat.mem_allocated
3824 += (nic->num_entries * sizeof(struct msix_entry));
3826 memset(nic->entries, 0, nic->num_entries * sizeof(struct msix_entry));
3829 kmalloc(nic->num_entries * sizeof(struct s2io_msix_entry),
3831 if (!nic->s2io_entries) {
3832 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n",
3834 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
3835 kfree(nic->entries);
3836 nic->mac_control.stats_info->sw_stat.mem_freed
3837 += (nic->num_entries * sizeof(struct msix_entry));
3840 nic->mac_control.stats_info->sw_stat.mem_allocated
3841 += (nic->num_entries * sizeof(struct s2io_msix_entry));
3842 memset(nic->s2io_entries, 0,
3843 nic->num_entries * sizeof(struct s2io_msix_entry));
3845 nic->entries[0].entry = 0;
3846 nic->s2io_entries[0].entry = 0;
3847 nic->s2io_entries[0].in_use = MSIX_FLG;
3848 nic->s2io_entries[0].type = MSIX_ALARM_TYPE;
3849 nic->s2io_entries[0].arg = &nic->mac_control.fifos;
3851 for (i = 1; i < nic->num_entries; i++) {
3852 nic->entries[i].entry = ((i - 1) * 8) + 1;
3853 nic->s2io_entries[i].entry = ((i - 1) * 8) + 1;
3854 nic->s2io_entries[i].arg = NULL;
3855 nic->s2io_entries[i].in_use = 0;
3858 rx_mat = readq(&bar0->rx_mat);
3859 for (j = 0; j < nic->config.rx_ring_num; j++) {
3860 rx_mat |= RX_MAT_SET(j, msix_indx);
3861 nic->s2io_entries[j+1].arg = &nic->mac_control.rings[j];
3862 nic->s2io_entries[j+1].type = MSIX_RING_TYPE;
3863 nic->s2io_entries[j+1].in_use = MSIX_FLG;
3866 writeq(rx_mat, &bar0->rx_mat);
3867 readq(&bar0->rx_mat);
3869 ret = pci_enable_msix(nic->pdev, nic->entries, nic->num_entries);
3870 /* We fail init if error or we get less vectors than min required */
3872 DBG_PRINT(ERR_DBG, "%s: Enabling MSIX failed\n", nic->dev->name);
3873 kfree(nic->entries);
3874 nic->mac_control.stats_info->sw_stat.mem_freed
3875 += (nic->num_entries * sizeof(struct msix_entry));
3876 kfree(nic->s2io_entries);
3877 nic->mac_control.stats_info->sw_stat.mem_freed
3878 += (nic->num_entries * sizeof(struct s2io_msix_entry));
3879 nic->entries = NULL;
3880 nic->s2io_entries = NULL;
3885 * To enable MSI-X, MSI also needs to be enabled, due to a bug
3886 * in the herc NIC. (Temp change, needs to be removed later)
3888 pci_read_config_word(nic->pdev, 0x42, &msi_control);
3889 msi_control |= 0x1; /* Enable MSI */
3890 pci_write_config_word(nic->pdev, 0x42, msi_control);
3895 /* Handle software interrupt used during MSI(X) test */
3896 static irqreturn_t s2io_test_intr(int irq, void *dev_id)
3898 struct s2io_nic *sp = dev_id;
3900 sp->msi_detected = 1;
3901 wake_up(&sp->msi_wait);
3906 /* Test interrupt path by forcing a a software IRQ */
3907 static int s2io_test_msi(struct s2io_nic *sp)
3909 struct pci_dev *pdev = sp->pdev;
3910 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3914 err = request_irq(sp->entries[1].vector, s2io_test_intr, 0,
3917 DBG_PRINT(ERR_DBG, "%s: PCI %s: cannot assign irq %d\n",
3918 sp->dev->name, pci_name(pdev), pdev->irq);
3922 init_waitqueue_head (&sp->msi_wait);
3923 sp->msi_detected = 0;
3925 saved64 = val64 = readq(&bar0->scheduled_int_ctrl);
3926 val64 |= SCHED_INT_CTRL_ONE_SHOT;
3927 val64 |= SCHED_INT_CTRL_TIMER_EN;
3928 val64 |= SCHED_INT_CTRL_INT2MSI(1);
3929 writeq(val64, &bar0->scheduled_int_ctrl);
3931 wait_event_timeout(sp->msi_wait, sp->msi_detected, HZ/10);
3933 if (!sp->msi_detected) {
3934 /* MSI(X) test failed, go back to INTx mode */
3935 DBG_PRINT(ERR_DBG, "%s: PCI %s: No interrupt was generated "
3936 "using MSI(X) during test\n", sp->dev->name,
3942 free_irq(sp->entries[1].vector, sp);
3944 writeq(saved64, &bar0->scheduled_int_ctrl);
3949 static void remove_msix_isr(struct s2io_nic *sp)
3954 for (i = 0; i < sp->num_entries; i++) {
3955 if (sp->s2io_entries[i].in_use ==
3956 MSIX_REGISTERED_SUCCESS) {
3957 int vector = sp->entries[i].vector;
3958 void *arg = sp->s2io_entries[i].arg;
3959 free_irq(vector, arg);
3964 kfree(sp->s2io_entries);
3966 sp->s2io_entries = NULL;
3968 pci_read_config_word(sp->pdev, 0x42, &msi_control);
3969 msi_control &= 0xFFFE; /* Disable MSI */
3970 pci_write_config_word(sp->pdev, 0x42, msi_control);
3972 pci_disable_msix(sp->pdev);
3975 static void remove_inta_isr(struct s2io_nic *sp)
3977 struct net_device *dev = sp->dev;
3979 free_irq(sp->pdev->irq, dev);
3982 /* ********************************************************* *
3983 * Functions defined below concern the OS part of the driver *
3984 * ********************************************************* */
3987 * s2io_open - open entry point of the driver
3988 * @dev : pointer to the device structure.
3990 * This function is the open entry point of the driver. It mainly calls a
3991 * function to allocate Rx buffers and inserts them into the buffer
3992 * descriptors and then enables the Rx part of the NIC.
3994 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3998 static int s2io_open(struct net_device *dev)
4000 struct s2io_nic *sp = dev->priv;
4004 * Make sure you have link off by default every time
4005 * Nic is initialized
4007 netif_carrier_off(dev);
4008 sp->last_link_state = 0;
4010 /* Initialize H/W and enable interrupts */
4011 err = s2io_card_up(sp);
4013 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
4015 goto hw_init_failed;
4018 if (do_s2io_prog_unicast(dev, dev->dev_addr) == FAILURE) {
4019 DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
4022 goto hw_init_failed;
4024 s2io_start_all_tx_queue(sp);
4028 if (sp->config.intr_type == MSI_X) {
4031 sp->mac_control.stats_info->sw_stat.mem_freed
4032 += (sp->num_entries * sizeof(struct msix_entry));
4034 if (sp->s2io_entries) {
4035 kfree(sp->s2io_entries);
4036 sp->mac_control.stats_info->sw_stat.mem_freed
4037 += (sp->num_entries * sizeof(struct s2io_msix_entry));
4044 * s2io_close -close entry point of the driver
4045 * @dev : device pointer.
4047 * This is the stop entry point of the driver. It needs to undo exactly
4048 * whatever was done by the open entry point,thus it's usually referred to
4049 * as the close function.Among other things this function mainly stops the
4050 * Rx side of the NIC and frees all the Rx buffers in the Rx rings.
4052 * 0 on success and an appropriate (-)ve integer as defined in errno.h
4056 static int s2io_close(struct net_device *dev)
4058 struct s2io_nic *sp = dev->priv;
4059 struct config_param *config = &sp->config;
4063 /* Return if the device is already closed *
4064 * Can happen when s2io_card_up failed in change_mtu *
4066 if (!is_s2io_card_up(sp))
4069 s2io_stop_all_tx_queue(sp);
4070 /* delete all populated mac entries */
4071 for (offset = 1; offset < config->max_mc_addr; offset++) {
4072 tmp64 = do_s2io_read_unicast_mc(sp, offset);
4073 if (tmp64 != S2IO_DISABLE_MAC_ENTRY)
4074 do_s2io_delete_unicast_mc(sp, tmp64);
4083 * s2io_xmit - Tx entry point of te driver
4084 * @skb : the socket buffer containing the Tx data.
4085 * @dev : device pointer.
4087 * This function is the Tx entry point of the driver. S2IO NIC supports
4088 * certain protocol assist features on Tx side, namely CSO, S/G, LSO.
4089 * NOTE: when device cant queue the pkt,just the trans_start variable will
4092 * 0 on success & 1 on failure.
4095 static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
4097 struct s2io_nic *sp = dev->priv;
4098 u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
4101 struct TxFIFO_element __iomem *tx_fifo;
4102 unsigned long flags = 0;
4104 struct fifo_info *fifo = NULL;
4105 struct mac_info *mac_control;
4106 struct config_param *config;
4107 int do_spin_lock = 1;
4109 int enable_per_list_interrupt = 0;
4110 struct swStat *stats = &sp->mac_control.stats_info->sw_stat;
4112 mac_control = &sp->mac_control;
4113 config = &sp->config;
4115 DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
4117 if (unlikely(skb->len <= 0)) {
4118 DBG_PRINT(TX_DBG, "%s:Buffer has no data..\n", dev->name);
4119 dev_kfree_skb_any(skb);
4123 if (!is_s2io_card_up(sp)) {
4124 DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
4131 if (sp->vlgrp && vlan_tx_tag_present(skb))
4132 vlan_tag = vlan_tx_tag_get(skb);
4133 if (sp->config.tx_steering_type == TX_DEFAULT_STEERING) {
4134 if (skb->protocol == htons(ETH_P_IP)) {
4139 if ((ip->frag_off & htons(IP_OFFSET|IP_MF)) == 0) {
4140 th = (struct tcphdr *)(((unsigned char *)ip) +
4143 if (ip->protocol == IPPROTO_TCP) {
4144 queue_len = sp->total_tcp_fifos;
4145 queue = (ntohs(th->source) +
4147 sp->fifo_selector[queue_len - 1];
4148 if (queue >= queue_len)
4149 queue = queue_len - 1;
4150 } else if (ip->protocol == IPPROTO_UDP) {
4151 queue_len = sp->total_udp_fifos;
4152 queue = (ntohs(th->source) +
4154 sp->fifo_selector[queue_len - 1];
4155 if (queue >= queue_len)
4156 queue = queue_len - 1;
4157 queue += sp->udp_fifo_idx;
4158 if (skb->len > 1024)
4159 enable_per_list_interrupt = 1;
4164 } else if (sp->config.tx_steering_type == TX_PRIORITY_STEERING)
4165 /* get fifo number based on skb->priority value */
4166 queue = config->fifo_mapping
4167 [skb->priority & (MAX_TX_FIFOS - 1)];
4168 fifo = &mac_control->fifos[queue];
4171 spin_lock_irqsave(&fifo->tx_lock, flags);
4173 if (unlikely(!spin_trylock_irqsave(&fifo->tx_lock, flags)))
4174 return NETDEV_TX_LOCKED;
4177 if (sp->config.multiq) {
4178 if (__netif_subqueue_stopped(dev, fifo->fifo_no)) {
4179 spin_unlock_irqrestore(&fifo->tx_lock, flags);
4180 return NETDEV_TX_BUSY;
4182 } else if (unlikely(fifo->queue_state == FIFO_QUEUE_STOP)) {
4183 if (netif_queue_stopped(dev)) {
4184 spin_unlock_irqrestore(&fifo->tx_lock, flags);
4185 return NETDEV_TX_BUSY;
4189 put_off = (u16) fifo->tx_curr_put_info.offset;
4190 get_off = (u16) fifo->tx_curr_get_info.offset;
4191 txdp = (struct TxD *) fifo->list_info[put_off].list_virt_addr;
4193 queue_len = fifo->tx_curr_put_info.fifo_len + 1;
4194 /* Avoid "put" pointer going beyond "get" pointer */
4195 if (txdp->Host_Control ||
4196 ((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4197 DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n");
4198 s2io_stop_tx_queue(sp, fifo->fifo_no);
4200 spin_unlock_irqrestore(&fifo->tx_lock, flags);
4204 offload_type = s2io_offload_type(skb);
4205 if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
4206 txdp->Control_1 |= TXD_TCP_LSO_EN;
4207 txdp->Control_1 |= TXD_TCP_LSO_MSS(s2io_tcp_mss(skb));
4209 if (skb->ip_summed == CHECKSUM_PARTIAL) {
4211 (TXD_TX_CKO_IPV4_EN | TXD_TX_CKO_TCP_EN |
4214 txdp->Control_1 |= TXD_GATHER_CODE_FIRST;
4215 txdp->Control_1 |= TXD_LIST_OWN_XENA;
4216 txdp->Control_2 |= TXD_INT_NUMBER(fifo->fifo_no);
4217 if (enable_per_list_interrupt)
4218 if (put_off & (queue_len >> 5))
4219 txdp->Control_2 |= TXD_INT_TYPE_PER_LIST;
4221 txdp->Control_2 |= TXD_VLAN_ENABLE;
4222 txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
4225 frg_len = skb->len - skb->data_len;
4226 if (offload_type == SKB_GSO_UDP) {
4229 ufo_size = s2io_udp_mss(skb);
4231 txdp->Control_1 |= TXD_UFO_EN;
4232 txdp->Control_1 |= TXD_UFO_MSS(ufo_size);
4233 txdp->Control_1 |= TXD_BUFFER0_SIZE(8);
4235 /* both variants do cpu_to_be64(be32_to_cpu(...)) */
4236 fifo->ufo_in_band_v[put_off] =
4237 (__force u64)skb_shinfo(skb)->ip6_frag_id;
4239 fifo->ufo_in_band_v[put_off] =
4240 (__force u64)skb_shinfo(skb)->ip6_frag_id << 32;
4242 txdp->Host_Control = (unsigned long)fifo->ufo_in_band_v;
4243 txdp->Buffer_Pointer = pci_map_single(sp->pdev,
4244 fifo->ufo_in_band_v,
4245 sizeof(u64), PCI_DMA_TODEVICE);
4246 if (pci_dma_mapping_error(txdp->Buffer_Pointer))
4247 goto pci_map_failed;
4251 txdp->Buffer_Pointer = pci_map_single
4252 (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE);
4253 if (pci_dma_mapping_error(txdp->Buffer_Pointer))
4254 goto pci_map_failed;
4256 txdp->Host_Control = (unsigned long) skb;
4257 txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len);
4258 if (offload_type == SKB_GSO_UDP)
4259 txdp->Control_1 |= TXD_UFO_EN;
4261 frg_cnt = skb_shinfo(skb)->nr_frags;
4262 /* For fragmented SKB. */
4263 for (i = 0; i < frg_cnt; i++) {
4264 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4265 /* A '0' length fragment will be ignored */
4269 txdp->Buffer_Pointer = (u64) pci_map_page
4270 (sp->pdev, frag->page, frag->page_offset,
4271 frag->size, PCI_DMA_TODEVICE);
4272 txdp->Control_1 = TXD_BUFFER0_SIZE(frag->size);
4273 if (offload_type == SKB_GSO_UDP)
4274 txdp->Control_1 |= TXD_UFO_EN;
4276 txdp->Control_1 |= TXD_GATHER_CODE_LAST;
4278 if (offload_type == SKB_GSO_UDP)
4279 frg_cnt++; /* as Txd0 was used for inband header */
4281 tx_fifo = mac_control->tx_FIFO_start[queue];
4282 val64 = fifo->list_info[put_off].list_phy_addr;
4283 writeq(val64, &tx_fifo->TxDL_Pointer);
4285 val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
4288 val64 |= TX_FIFO_SPECIAL_FUNC;
4290 writeq(val64, &tx_fifo->List_Control);
4295 if (put_off == fifo->tx_curr_put_info.fifo_len + 1)
4297 fifo->tx_curr_put_info.offset = put_off;
4299 /* Avoid "put" pointer going beyond "get" pointer */
4300 if (((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4301 sp->mac_control.stats_info->sw_stat.fifo_full_cnt++;
4303 "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
4305 s2io_stop_tx_queue(sp, fifo->fifo_no);
4307 mac_control->stats_info->sw_stat.mem_allocated += skb->truesize;
4308 dev->trans_start = jiffies;
4309 spin_unlock_irqrestore(&fifo->tx_lock, flags);
4311 if (sp->config.intr_type == MSI_X)
4312 tx_intr_handler(fifo);
4316 stats->pci_map_fail_cnt++;
4317 s2io_stop_tx_queue(sp, fifo->fifo_no);
4318 stats->mem_freed += skb->truesize;
4320 spin_unlock_irqrestore(&fifo->tx_lock, flags);
4325 s2io_alarm_handle(unsigned long data)
4327 struct s2io_nic *sp = (struct s2io_nic *)data;
4328 struct net_device *dev = sp->dev;
4330 s2io_handle_errors(dev);
4331 mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
4334 static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id)
4336 struct ring_info *ring = (struct ring_info *)dev_id;
4337 struct s2io_nic *sp = ring->nic;
4338 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4339 struct net_device *dev = sp->dev;
4341 if (unlikely(!is_s2io_card_up(sp)))
4344 if (sp->config.napi) {
4345 u8 __iomem *addr = NULL;
4348 addr = (u8 __iomem *)&bar0->xmsi_mask_reg;
4349 addr += (7 - ring->ring_no);
4350 val8 = (ring->ring_no == 0) ? 0x7f : 0xff;
4353 netif_rx_schedule(dev, &ring->napi);
4355 rx_intr_handler(ring, 0);
4356 s2io_chk_rx_buffers(ring);
4362 static irqreturn_t s2io_msix_fifo_handle(int irq, void *dev_id)
4365 struct fifo_info *fifos = (struct fifo_info *)dev_id;
4366 struct s2io_nic *sp = fifos->nic;
4367 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4368 struct config_param *config = &sp->config;
4371 if (unlikely(!is_s2io_card_up(sp)))
4374 reason = readq(&bar0->general_int_status);
4375 if (unlikely(reason == S2IO_MINUS_ONE))
4376 /* Nothing much can be done. Get out */
4379 writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
4381 if (reason & GEN_INTR_TXTRAFFIC)
4382 writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
4384 for (i = 0; i < config->tx_fifo_num; i++)
4385 tx_intr_handler(&fifos[i]);
4387 writeq(sp->general_int_mask, &bar0->general_int_mask);
4388 readl(&bar0->general_int_status);
4393 static void s2io_txpic_intr_handle(struct s2io_nic *sp)
4395 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4398 val64 = readq(&bar0->pic_int_status);
4399 if (val64 & PIC_INT_GPIO) {
4400 val64 = readq(&bar0->gpio_int_reg);
4401 if ((val64 & GPIO_INT_REG_LINK_DOWN) &&
4402 (val64 & GPIO_INT_REG_LINK_UP)) {
4404 * This is unstable state so clear both up/down
4405 * interrupt and adapter to re-evaluate the link state.
4407 val64 |= GPIO_INT_REG_LINK_DOWN;
4408 val64 |= GPIO_INT_REG_LINK_UP;
4409 writeq(val64, &bar0->gpio_int_reg);
4410 val64 = readq(&bar0->gpio_int_mask);
4411 val64 &= ~(GPIO_INT_MASK_LINK_UP |
4412 GPIO_INT_MASK_LINK_DOWN);
4413 writeq(val64, &bar0->gpio_int_mask);
4415 else if (val64 & GPIO_INT_REG_LINK_UP) {
4416 val64 = readq(&bar0->adapter_status);
4417 /* Enable Adapter */
4418 val64 = readq(&bar0->adapter_control);
4419 val64 |= ADAPTER_CNTL_EN;
4420 writeq(val64, &bar0->adapter_control);
4421 val64 |= ADAPTER_LED_ON;
4422 writeq(val64, &bar0->adapter_control);
4423 if (!sp->device_enabled_once)
4424 sp->device_enabled_once = 1;
4426 s2io_link(sp, LINK_UP);
4428 * unmask link down interrupt and mask link-up
4431 val64 = readq(&bar0->gpio_int_mask);
4432 val64 &= ~GPIO_INT_MASK_LINK_DOWN;
4433 val64 |= GPIO_INT_MASK_LINK_UP;
4434 writeq(val64, &bar0->gpio_int_mask);
4436 }else if (val64 & GPIO_INT_REG_LINK_DOWN) {
4437 val64 = readq(&bar0->adapter_status);
4438 s2io_link(sp, LINK_DOWN);
4439 /* Link is down so unmaks link up interrupt */
4440 val64 = readq(&bar0->gpio_int_mask);
4441 val64 &= ~GPIO_INT_MASK_LINK_UP;
4442 val64 |= GPIO_INT_MASK_LINK_DOWN;
4443 writeq(val64, &bar0->gpio_int_mask);
4446 val64 = readq(&bar0->adapter_control);
4447 val64 = val64 &(~ADAPTER_LED_ON);
4448 writeq(val64, &bar0->adapter_control);
4451 val64 = readq(&bar0->gpio_int_mask);
4455 * do_s2io_chk_alarm_bit - Check for alarm and incrment the counter
4456 * @value: alarm bits
4457 * @addr: address value
4458 * @cnt: counter variable
4459 * Description: Check for alarm and increment the counter
4461 * 1 - if alarm bit set
4462 * 0 - if alarm bit is not set
4464 static int do_s2io_chk_alarm_bit(u64 value, void __iomem * addr,
4465 unsigned long long *cnt)
4468 val64 = readq(addr);
4469 if ( val64 & value ) {
4470 writeq(val64, addr);
4479 * s2io_handle_errors - Xframe error indication handler
4480 * @nic: device private variable
4481 * Description: Handle alarms such as loss of link, single or
4482 * double ECC errors, critical and serious errors.
4486 static void s2io_handle_errors(void * dev_id)
4488 struct net_device *dev = (struct net_device *) dev_id;
4489 struct s2io_nic *sp = dev->priv;
4490 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4491 u64 temp64 = 0,val64=0;
4494 struct swStat *sw_stat = &sp->mac_control.stats_info->sw_stat;
4495 struct xpakStat *stats = &sp->mac_control.stats_info->xpak_stat;
4497 if (!is_s2io_card_up(sp))
4500 if (pci_channel_offline(sp->pdev))
4503 memset(&sw_stat->ring_full_cnt, 0,
4504 sizeof(sw_stat->ring_full_cnt));
4506 /* Handling the XPAK counters update */
4507 if(stats->xpak_timer_count < 72000) {
4508 /* waiting for an hour */
4509 stats->xpak_timer_count++;
4511 s2io_updt_xpak_counter(dev);
4512 /* reset the count to zero */
4513 stats->xpak_timer_count = 0;
4516 /* Handling link status change error Intr */
4517 if (s2io_link_fault_indication(sp) == MAC_RMAC_ERR_TIMER) {
4518 val64 = readq(&bar0->mac_rmac_err_reg);
4519 writeq(val64, &bar0->mac_rmac_err_reg);
4520 if (val64 & RMAC_LINK_STATE_CHANGE_INT)
4521 schedule_work(&sp->set_link_task);
4524 /* In case of a serious error, the device will be Reset. */
4525 if (do_s2io_chk_alarm_bit(SERR_SOURCE_ANY, &bar0->serr_source,
4526 &sw_stat->serious_err_cnt))
4529 /* Check for data parity error */
4530 if (do_s2io_chk_alarm_bit(GPIO_INT_REG_DP_ERR_INT, &bar0->gpio_int_reg,
4531 &sw_stat->parity_err_cnt))
4534 /* Check for ring full counter */
4535 if (sp->device_type == XFRAME_II_DEVICE) {
4536 val64 = readq(&bar0->ring_bump_counter1);
4537 for (i=0; i<4; i++) {
4538 temp64 = ( val64 & vBIT(0xFFFF,(i*16),16));
4539 temp64 >>= 64 - ((i+1)*16);
4540 sw_stat->ring_full_cnt[i] += temp64;
4543 val64 = readq(&bar0->ring_bump_counter2);
4544 for (i=0; i<4; i++) {
4545 temp64 = ( val64 & vBIT(0xFFFF,(i*16),16));
4546 temp64 >>= 64 - ((i+1)*16);
4547 sw_stat->ring_full_cnt[i+4] += temp64;
4551 val64 = readq(&bar0->txdma_int_status);
4552 /*check for pfc_err*/
4553 if (val64 & TXDMA_PFC_INT) {
4554 if (do_s2io_chk_alarm_bit(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM|
4555 PFC_MISC_0_ERR | PFC_MISC_1_ERR|
4556 PFC_PCIX_ERR, &bar0->pfc_err_reg,
4557 &sw_stat->pfc_err_cnt))
4559 do_s2io_chk_alarm_bit(PFC_ECC_SG_ERR, &bar0->pfc_err_reg,
4560 &sw_stat->pfc_err_cnt);
4563 /*check for tda_err*/
4564 if (val64 & TXDMA_TDA_INT) {
4565 if(do_s2io_chk_alarm_bit(TDA_Fn_ECC_DB_ERR | TDA_SM0_ERR_ALARM |
4566 TDA_SM1_ERR_ALARM, &bar0->tda_err_reg,
4567 &sw_stat->tda_err_cnt))
4569 do_s2io_chk_alarm_bit(TDA_Fn_ECC_SG_ERR | TDA_PCIX_ERR,
4570 &bar0->tda_err_reg, &sw_stat->tda_err_cnt);
4572 /*check for pcc_err*/
4573 if (val64 & TXDMA_PCC_INT) {
4574 if (do_s2io_chk_alarm_bit(PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM
4575 | PCC_N_SERR | PCC_6_COF_OV_ERR
4576 | PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR
4577 | PCC_7_LSO_OV_ERR | PCC_FB_ECC_DB_ERR
4578 | PCC_TXB_ECC_DB_ERR, &bar0->pcc_err_reg,
4579 &sw_stat->pcc_err_cnt))
4581 do_s2io_chk_alarm_bit(PCC_FB_ECC_SG_ERR | PCC_TXB_ECC_SG_ERR,
4582 &bar0->pcc_err_reg, &sw_stat->pcc_err_cnt);
4585 /*check for tti_err*/
4586 if (val64 & TXDMA_TTI_INT) {
4587 if (do_s2io_chk_alarm_bit(TTI_SM_ERR_ALARM, &bar0->tti_err_reg,
4588 &sw_stat->tti_err_cnt))
4590 do_s2io_chk_alarm_bit(TTI_ECC_SG_ERR | TTI_ECC_DB_ERR,
4591 &bar0->tti_err_reg, &sw_stat->tti_err_cnt);
4594 /*check for lso_err*/
4595 if (val64 & TXDMA_LSO_INT) {
4596 if (do_s2io_chk_alarm_bit(LSO6_ABORT | LSO7_ABORT
4597 | LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM,
4598 &bar0->lso_err_reg, &sw_stat->lso_err_cnt))
4600 do_s2io_chk_alarm_bit(LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
4601 &bar0->lso_err_reg, &sw_stat->lso_err_cnt);
4604 /*check for tpa_err*/
4605 if (val64 & TXDMA_TPA_INT) {
4606 if (do_s2io_chk_alarm_bit(TPA_SM_ERR_ALARM, &bar0->tpa_err_reg,
4607 &sw_stat->tpa_err_cnt))
4609 do_s2io_chk_alarm_bit(TPA_TX_FRM_DROP, &bar0->tpa_err_reg,
4610 &sw_stat->tpa_err_cnt);
4613 /*check for sm_err*/
4614 if (val64 & TXDMA_SM_INT) {
4615 if (do_s2io_chk_alarm_bit(SM_SM_ERR_ALARM, &bar0->sm_err_reg,
4616 &sw_stat->sm_err_cnt))
4620 val64 = readq(&bar0->mac_int_status);
4621 if (val64 & MAC_INT_STATUS_TMAC_INT) {
4622 if (do_s2io_chk_alarm_bit(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR,
4623 &bar0->mac_tmac_err_reg,
4624 &sw_stat->mac_tmac_err_cnt))
4626 do_s2io_chk_alarm_bit(TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR
4627 | TMAC_DESC_ECC_SG_ERR | TMAC_DESC_ECC_DB_ERR,
4628 &bar0->mac_tmac_err_reg,
4629 &sw_stat->mac_tmac_err_cnt);
4632 val64 = readq(&bar0->xgxs_int_status);
4633 if (val64 & XGXS_INT_STATUS_TXGXS) {
4634 if (do_s2io_chk_alarm_bit(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR,
4635 &bar0->xgxs_txgxs_err_reg,
4636 &sw_stat->xgxs_txgxs_err_cnt))
4638 do_s2io_chk_alarm_bit(TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
4639 &bar0->xgxs_txgxs_err_reg,
4640 &sw_stat->xgxs_txgxs_err_cnt);
4643 val64 = readq(&bar0->rxdma_int_status);
4644 if (val64 & RXDMA_INT_RC_INT_M) {
4645 if (do_s2io_chk_alarm_bit(RC_PRCn_ECC_DB_ERR | RC_FTC_ECC_DB_ERR
4646 | RC_PRCn_SM_ERR_ALARM |RC_FTC_SM_ERR_ALARM,
4647 &bar0->rc_err_reg, &sw_stat->rc_err_cnt))
4649 do_s2io_chk_alarm_bit(RC_PRCn_ECC_SG_ERR | RC_FTC_ECC_SG_ERR
4650 | RC_RDA_FAIL_WR_Rn, &bar0->rc_err_reg,
4651 &sw_stat->rc_err_cnt);
4652 if (do_s2io_chk_alarm_bit(PRC_PCI_AB_RD_Rn | PRC_PCI_AB_WR_Rn
4653 | PRC_PCI_AB_F_WR_Rn, &bar0->prc_pcix_err_reg,
4654 &sw_stat->prc_pcix_err_cnt))
4656 do_s2io_chk_alarm_bit(PRC_PCI_DP_RD_Rn | PRC_PCI_DP_WR_Rn
4657 | PRC_PCI_DP_F_WR_Rn, &bar0->prc_pcix_err_reg,
4658 &sw_stat->prc_pcix_err_cnt);
4661 if (val64 & RXDMA_INT_RPA_INT_M) {
4662 if (do_s2io_chk_alarm_bit(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR,
4663 &bar0->rpa_err_reg, &sw_stat->rpa_err_cnt))
4665 do_s2io_chk_alarm_bit(RPA_ECC_SG_ERR | RPA_ECC_DB_ERR,
4666 &bar0->rpa_err_reg, &sw_stat->rpa_err_cnt);
4669 if (val64 & RXDMA_INT_RDA_INT_M) {
4670 if (do_s2io_chk_alarm_bit(RDA_RXDn_ECC_DB_ERR
4671 | RDA_FRM_ECC_DB_N_AERR | RDA_SM1_ERR_ALARM
4672 | RDA_SM0_ERR_ALARM | RDA_RXD_ECC_DB_SERR,
4673 &bar0->rda_err_reg, &sw_stat->rda_err_cnt))
4675 do_s2io_chk_alarm_bit(RDA_RXDn_ECC_SG_ERR | RDA_FRM_ECC_SG_ERR
4676 | RDA_MISC_ERR | RDA_PCIX_ERR,
4677 &bar0->rda_err_reg, &sw_stat->rda_err_cnt);
4680 if (val64 & RXDMA_INT_RTI_INT_M) {
4681 if (do_s2io_chk_alarm_bit(RTI_SM_ERR_ALARM, &bar0->rti_err_reg,
4682 &sw_stat->rti_err_cnt))
4684 do_s2io_chk_alarm_bit(RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
4685 &bar0->rti_err_reg, &sw_stat->rti_err_cnt);
4688 val64 = readq(&bar0->mac_int_status);
4689 if (val64 & MAC_INT_STATUS_RMAC_INT) {
4690 if (do_s2io_chk_alarm_bit(RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR,
4691 &bar0->mac_rmac_err_reg,
4692 &sw_stat->mac_rmac_err_cnt))
4694 do_s2io_chk_alarm_bit(RMAC_UNUSED_INT|RMAC_SINGLE_ECC_ERR|
4695 RMAC_DOUBLE_ECC_ERR, &bar0->mac_rmac_err_reg,
4696 &sw_stat->mac_rmac_err_cnt);
4699 val64 = readq(&bar0->xgxs_int_status);
4700 if (val64 & XGXS_INT_STATUS_RXGXS) {
4701 if (do_s2io_chk_alarm_bit(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR,
4702 &bar0->xgxs_rxgxs_err_reg,
4703 &sw_stat->xgxs_rxgxs_err_cnt))
4707 val64 = readq(&bar0->mc_int_status);
4708 if(val64 & MC_INT_STATUS_MC_INT) {
4709 if (do_s2io_chk_alarm_bit(MC_ERR_REG_SM_ERR, &bar0->mc_err_reg,
4710 &sw_stat->mc_err_cnt))
4713 /* Handling Ecc errors */
4714 if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
4715 writeq(val64, &bar0->mc_err_reg);
4716 if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
4717 sw_stat->double_ecc_errs++;
4718 if (sp->device_type != XFRAME_II_DEVICE) {
4720 * Reset XframeI only if critical error
4723 (MC_ERR_REG_MIRI_ECC_DB_ERR_0 |
4724 MC_ERR_REG_MIRI_ECC_DB_ERR_1))
4728 sw_stat->single_ecc_errs++;
4734 s2io_stop_all_tx_queue(sp);
4735 schedule_work(&sp->rst_timer_task);
4736 sw_stat->soft_reset_cnt++;
4741 * s2io_isr - ISR handler of the device .
4742 * @irq: the irq of the device.
4743 * @dev_id: a void pointer to the dev structure of the NIC.
4744 * Description: This function is the ISR handler of the device. It
4745 * identifies the reason for the interrupt and calls the relevant
4746 * service routines. As a contongency measure, this ISR allocates the
4747 * recv buffers, if their numbers are below the panic value which is
4748 * presently set to 25% of the original number of rcv buffers allocated.
4750 * IRQ_HANDLED: will be returned if IRQ was handled by this routine
4751 * IRQ_NONE: will be returned if interrupt is not from our device
4753 static irqreturn_t s2io_isr(int irq, void *dev_id)
4755 struct net_device *dev = (struct net_device *) dev_id;
4756 struct s2io_nic *sp = dev->priv;
4757 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4760 struct mac_info *mac_control;
4761 struct config_param *config;
4763 /* Pretend we handled any irq's from a disconnected card */
4764 if (pci_channel_offline(sp->pdev))
4767 if (!is_s2io_card_up(sp))
4770 mac_control = &sp->mac_control;
4771 config = &sp->config;
4774 * Identify the cause for interrupt and call the appropriate
4775 * interrupt handler. Causes for the interrupt could be;
4780 reason = readq(&bar0->general_int_status);
4782 if (unlikely(reason == S2IO_MINUS_ONE) ) {
4783 /* Nothing much can be done. Get out */
4787 if (reason & (GEN_INTR_RXTRAFFIC |
4788 GEN_INTR_TXTRAFFIC | GEN_INTR_TXPIC))
4790 writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
4793 if (reason & GEN_INTR_RXTRAFFIC) {
4794 netif_rx_schedule(dev, &sp->napi);
4795 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_mask);
4796 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4797 readl(&bar0->rx_traffic_int);
4801 * rx_traffic_int reg is an R1 register, writing all 1's
4802 * will ensure that the actual interrupt causing bit
4803 * get's cleared and hence a read can be avoided.
4805 if (reason & GEN_INTR_RXTRAFFIC)
4806 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4808 for (i = 0; i < config->rx_ring_num; i++)
4809 rx_intr_handler(&mac_control->rings[i], 0);
4813 * tx_traffic_int reg is an R1 register, writing all 1's
4814 * will ensure that the actual interrupt causing bit get's
4815 * cleared and hence a read can be avoided.
4817 if (reason & GEN_INTR_TXTRAFFIC)
4818 writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
4820 for (i = 0; i < config->tx_fifo_num; i++)
4821 tx_intr_handler(&mac_control->fifos[i]);
4823 if (reason & GEN_INTR_TXPIC)
4824 s2io_txpic_intr_handle(sp);
4827 * Reallocate the buffers from the interrupt handler itself.
4829 if (!config->napi) {
4830 for (i = 0; i < config->rx_ring_num; i++)
4831 s2io_chk_rx_buffers(&mac_control->rings[i]);
4833 writeq(sp->general_int_mask, &bar0->general_int_mask);
4834 readl(&bar0->general_int_status);
4840 /* The interrupt was not raised by us */
4850 static void s2io_updt_stats(struct s2io_nic *sp)
4852 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4856 if (is_s2io_card_up(sp)) {
4857 /* Apprx 30us on a 133 MHz bus */
4858 val64 = SET_UPDT_CLICKS(10) |
4859 STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN;
4860 writeq(val64, &bar0->stat_cfg);
4863 val64 = readq(&bar0->stat_cfg);
4864 if (!(val64 & s2BIT(0)))
4868 break; /* Updt failed */
4874 * s2io_get_stats - Updates the device statistics structure.
4875 * @dev : pointer to the device structure.
4877 * This function updates the device statistics structure in the s2io_nic
4878 * structure and returns a pointer to the same.
4880 * pointer to the updated net_device_stats structure.
4883 static struct net_device_stats *s2io_get_stats(struct net_device *dev)
4885 struct s2io_nic *sp = dev->priv;
4886 struct mac_info *mac_control;
4887 struct config_param *config;
4891 mac_control = &sp->mac_control;
4892 config = &sp->config;
4894 /* Configure Stats for immediate updt */
4895 s2io_updt_stats(sp);
4897 sp->stats.tx_packets =
4898 le32_to_cpu(mac_control->stats_info->tmac_frms);
4899 sp->stats.tx_errors =
4900 le32_to_cpu(mac_control->stats_info->tmac_any_err_frms);
4901 sp->stats.rx_errors =
4902 le64_to_cpu(mac_control->stats_info->rmac_drop_frms);
4903 sp->stats.multicast =
4904 le32_to_cpu(mac_control->stats_info->rmac_vld_mcst_frms);
4905 sp->stats.rx_length_errors =
4906 le64_to_cpu(mac_control->stats_info->rmac_long_frms);
4908 /* collect per-ring rx_packets and rx_bytes */
4909 sp->stats.rx_packets = sp->stats.rx_bytes = 0;
4910 for (i = 0; i < config->rx_ring_num; i++) {
4911 sp->stats.rx_packets += mac_control->rings[i].rx_packets;
4912 sp->stats.rx_bytes += mac_control->rings[i].rx_bytes;
4915 return (&sp->stats);
4919 * s2io_set_multicast - entry point for multicast address enable/disable.
4920 * @dev : pointer to the device structure
4922 * This function is a driver entry point which gets called by the kernel
4923 * whenever multicast addresses must be enabled/disabled. This also gets
4924 * called to set/reset promiscuous mode. Depending on the deivce flag, we
4925 * determine, if multicast address must be enabled or if promiscuous mode
4926 * is to be disabled etc.
4931 static void s2io_set_multicast(struct net_device *dev)
4934 struct dev_mc_list *mclist;
4935 struct s2io_nic *sp = dev->priv;
4936 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4937 u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
4939 u64 dis_addr = S2IO_DISABLE_MAC_ENTRY, mac_addr = 0;
4941 struct config_param *config = &sp->config;
4943 if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
4944 /* Enable all Multicast addresses */
4945 writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
4946 &bar0->rmac_addr_data0_mem);
4947 writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
4948 &bar0->rmac_addr_data1_mem);
4949 val64 = RMAC_ADDR_CMD_MEM_WE |
4950 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4951 RMAC_ADDR_CMD_MEM_OFFSET(config->max_mc_addr - 1);
4952 writeq(val64, &bar0->rmac_addr_cmd_mem);
4953 /* Wait till command completes */
4954 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4955 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4959 sp->all_multi_pos = config->max_mc_addr - 1;
4960 } else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
4961 /* Disable all Multicast addresses */
4962 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4963 &bar0->rmac_addr_data0_mem);
4964 writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
4965 &bar0->rmac_addr_data1_mem);
4966 val64 = RMAC_ADDR_CMD_MEM_WE |
4967 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4968 RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
4969 writeq(val64, &bar0->rmac_addr_cmd_mem);
4970 /* Wait till command completes */
4971 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4972 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4976 sp->all_multi_pos = 0;
4979 if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
4980 /* Put the NIC into promiscuous mode */
4981 add = &bar0->mac_cfg;
4982 val64 = readq(&bar0->mac_cfg);
4983 val64 |= MAC_CFG_RMAC_PROM_ENABLE;
4985 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4986 writel((u32) val64, add);
4987 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4988 writel((u32) (val64 >> 32), (add + 4));
4990 if (vlan_tag_strip != 1) {
4991 val64 = readq(&bar0->rx_pa_cfg);
4992 val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
4993 writeq(val64, &bar0->rx_pa_cfg);
4994 vlan_strip_flag = 0;
4997 val64 = readq(&bar0->mac_cfg);
4998 sp->promisc_flg = 1;
4999 DBG_PRINT(INFO_DBG, "%s: entered promiscuous mode\n",
5001 } else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
5002 /* Remove the NIC from promiscuous mode */
5003 add = &bar0->mac_cfg;
5004 val64 = readq(&bar0->mac_cfg);
5005 val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
5007 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
5008 writel((u32) val64, add);
5009 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
5010 writel((u32) (val64 >> 32), (add + 4));
5012 if (vlan_tag_strip != 0) {
5013 val64 = readq(&bar0->rx_pa_cfg);
5014 val64 |= RX_PA_CFG_STRIP_VLAN_TAG;
5015 writeq(val64, &bar0->rx_pa_cfg);
5016 vlan_strip_flag = 1;
5019 val64 = readq(&bar0->mac_cfg);
5020 sp->promisc_flg = 0;
5021 DBG_PRINT(INFO_DBG, "%s: left promiscuous mode\n",
5025 /* Update individual M_CAST address list */
5026 if ((!sp->m_cast_flg) && dev->mc_count) {
5028 (config->max_mc_addr - config->max_mac_addr)) {
5029 DBG_PRINT(ERR_DBG, "%s: No more Rx filters ",
5031 DBG_PRINT(ERR_DBG, "can be added, please enable ");
5032 DBG_PRINT(ERR_DBG, "ALL_MULTI instead\n");
5036 prev_cnt = sp->mc_addr_count;
5037 sp->mc_addr_count = dev->mc_count;
5039 /* Clear out the previous list of Mc in the H/W. */
5040 for (i = 0; i < prev_cnt; i++) {
5041 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
5042 &bar0->rmac_addr_data0_mem);
5043 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
5044 &bar0->rmac_addr_data1_mem);
5045 val64 = RMAC_ADDR_CMD_MEM_WE |
5046 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5047 RMAC_ADDR_CMD_MEM_OFFSET
5048 (config->mc_start_offset + i);
5049 writeq(val64, &bar0->rmac_addr_cmd_mem);
5051 /* Wait for command completes */
5052 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5053 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5055 DBG_PRINT(ERR_DBG, "%s: Adding ",
5057 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
5062 /* Create the new Rx filter list and update the same in H/W. */
5063 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
5064 i++, mclist = mclist->next) {
5065 memcpy(sp->usr_addrs[i].addr, mclist->dmi_addr,
5068 for (j = 0; j < ETH_ALEN; j++) {
5069 mac_addr |= mclist->dmi_addr[j];
5073 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
5074 &bar0->rmac_addr_data0_mem);
5075 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
5076 &bar0->rmac_addr_data1_mem);
5077 val64 = RMAC_ADDR_CMD_MEM_WE |
5078 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5079 RMAC_ADDR_CMD_MEM_OFFSET
5080 (i + config->mc_start_offset);
5081 writeq(val64, &bar0->rmac_addr_cmd_mem);
5083 /* Wait for command completes */
5084 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5085 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5087 DBG_PRINT(ERR_DBG, "%s: Adding ",
5089 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
5096 /* read from CAM unicast & multicast addresses and store it in
5097 * def_mac_addr structure
5099 void do_s2io_store_unicast_mc(struct s2io_nic *sp)
5103 struct config_param *config = &sp->config;
5105 /* store unicast & multicast mac addresses */
5106 for (offset = 0; offset < config->max_mc_addr; offset++) {
5107 mac_addr = do_s2io_read_unicast_mc(sp, offset);
5108 /* if read fails disable the entry */
5109 if (mac_addr == FAILURE)
5110 mac_addr = S2IO_DISABLE_MAC_ENTRY;
5111 do_s2io_copy_mac_addr(sp, offset, mac_addr);
5115 /* restore unicast & multicast MAC to CAM from def_mac_addr structure */
5116 static void do_s2io_restore_unicast_mc(struct s2io_nic *sp)
5119 struct config_param *config = &sp->config;
5120 /* restore unicast mac address */
5121 for (offset = 0; offset < config->max_mac_addr; offset++)
5122 do_s2io_prog_unicast(sp->dev,
5123 sp->def_mac_addr[offset].mac_addr);
5125 /* restore multicast mac address */
5126 for (offset = config->mc_start_offset;
5127 offset < config->max_mc_addr; offset++)
5128 do_s2io_add_mc(sp, sp->def_mac_addr[offset].mac_addr);
5131 /* add a multicast MAC address to CAM */
5132 static int do_s2io_add_mc(struct s2io_nic *sp, u8 *addr)
5136 struct config_param *config = &sp->config;
5138 for (i = 0; i < ETH_ALEN; i++) {
5140 mac_addr |= addr[i];
5142 if ((0ULL == mac_addr) || (mac_addr == S2IO_DISABLE_MAC_ENTRY))
5145 /* check if the multicast mac already preset in CAM */
5146 for (i = config->mc_start_offset; i < config->max_mc_addr; i++) {
5148 tmp64 = do_s2io_read_unicast_mc(sp, i);
5149 if (tmp64 == S2IO_DISABLE_MAC_ENTRY) /* CAM entry is empty */
5152 if (tmp64 == mac_addr)
5155 if (i == config->max_mc_addr) {
5157 "CAM full no space left for multicast MAC\n");
5160 /* Update the internal structure with this new mac address */
5161 do_s2io_copy_mac_addr(sp, i, mac_addr);
5163 return (do_s2io_add_mac(sp, mac_addr, i));
5166 /* add MAC address to CAM */
5167 static int do_s2io_add_mac(struct s2io_nic *sp, u64 addr, int off)
5170 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5172 writeq(RMAC_ADDR_DATA0_MEM_ADDR(addr),
5173 &bar0->rmac_addr_data0_mem);
5176 RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5177 RMAC_ADDR_CMD_MEM_OFFSET(off);
5178 writeq(val64, &bar0->rmac_addr_cmd_mem);
5180 /* Wait till command completes */
5181 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5182 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5184 DBG_PRINT(INFO_DBG, "do_s2io_add_mac failed\n");
5189 /* deletes a specified unicast/multicast mac entry from CAM */
5190 static int do_s2io_delete_unicast_mc(struct s2io_nic *sp, u64 addr)
5193 u64 dis_addr = S2IO_DISABLE_MAC_ENTRY, tmp64;
5194 struct config_param *config = &sp->config;
5197 offset < config->max_mc_addr; offset++) {
5198 tmp64 = do_s2io_read_unicast_mc(sp, offset);
5199 if (tmp64 == addr) {
5200 /* disable the entry by writing 0xffffffffffffULL */
5201 if (do_s2io_add_mac(sp, dis_addr, offset) == FAILURE)
5203 /* store the new mac list from CAM */
5204 do_s2io_store_unicast_mc(sp);
5208 DBG_PRINT(ERR_DBG, "MAC address 0x%llx not found in CAM\n",
5209 (unsigned long long)addr);
5213 /* read mac entries from CAM */
5214 static u64 do_s2io_read_unicast_mc(struct s2io_nic *sp, int offset)
5216 u64 tmp64 = 0xffffffffffff0000ULL, val64;
5217 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5221 RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5222 RMAC_ADDR_CMD_MEM_OFFSET(offset);
5223 writeq(val64, &bar0->rmac_addr_cmd_mem);
5225 /* Wait till command completes */
5226 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5227 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5229 DBG_PRINT(INFO_DBG, "do_s2io_read_unicast_mc failed\n");
5232 tmp64 = readq(&bar0->rmac_addr_data0_mem);
5233 return (tmp64 >> 16);
5237 * s2io_set_mac_addr driver entry point
5240 static int s2io_set_mac_addr(struct net_device *dev, void *p)
5242 struct sockaddr *addr = p;
5244 if (!is_valid_ether_addr(addr->sa_data))
5247 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5249 /* store the MAC address in CAM */
5250 return (do_s2io_prog_unicast(dev, dev->dev_addr));
5253 * do_s2io_prog_unicast - Programs the Xframe mac address
5254 * @dev : pointer to the device structure.
5255 * @addr: a uchar pointer to the new mac address which is to be set.
5256 * Description : This procedure will program the Xframe to receive
5257 * frames with new Mac Address
5258 * Return value: SUCCESS on success and an appropriate (-)ve integer
5259 * as defined in errno.h file on failure.
5262 static int do_s2io_prog_unicast(struct net_device *dev, u8 *addr)
5264 struct s2io_nic *sp = dev->priv;
5265 register u64 mac_addr = 0, perm_addr = 0;
5268 struct config_param *config = &sp->config;
5271 * Set the new MAC address as the new unicast filter and reflect this
5272 * change on the device address registered with the OS. It will be
5275 for (i = 0; i < ETH_ALEN; i++) {
5277 mac_addr |= addr[i];
5279 perm_addr |= sp->def_mac_addr[0].mac_addr[i];
5282 /* check if the dev_addr is different than perm_addr */
5283 if (mac_addr == perm_addr)
5286 /* check if the mac already preset in CAM */
5287 for (i = 1; i < config->max_mac_addr; i++) {
5288 tmp64 = do_s2io_read_unicast_mc(sp, i);
5289 if (tmp64 == S2IO_DISABLE_MAC_ENTRY) /* CAM entry is empty */
5292 if (tmp64 == mac_addr) {
5294 "MAC addr:0x%llx already present in CAM\n",
5295 (unsigned long long)mac_addr);
5299 if (i == config->max_mac_addr) {
5300 DBG_PRINT(ERR_DBG, "CAM full no space left for Unicast MAC\n");
5303 /* Update the internal structure with this new mac address */
5304 do_s2io_copy_mac_addr(sp, i, mac_addr);
5305 return (do_s2io_add_mac(sp, mac_addr, i));
5309 * s2io_ethtool_sset - Sets different link parameters.
5310 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
5311 * @info: pointer to the structure with parameters given by ethtool to set
5314 * The function sets different link parameters provided by the user onto
5320 static int s2io_ethtool_sset(struct net_device *dev,
5321 struct ethtool_cmd *info)
5323 struct s2io_nic *sp = dev->priv;
5324 if ((info->autoneg == AUTONEG_ENABLE) ||
5325 (info->speed != SPEED_10000) || (info->duplex != DUPLEX_FULL))
5328 s2io_close(sp->dev);
5336 * s2io_ethtol_gset - Return link specific information.
5337 * @sp : private member of the device structure, pointer to the
5338 * s2io_nic structure.
5339 * @info : pointer to the structure with parameters given by ethtool
5340 * to return link information.
5342 * Returns link specific information like speed, duplex etc.. to ethtool.
5344 * return 0 on success.
5347 static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
5349 struct s2io_nic *sp = dev->priv;
5350 info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
5351 info->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
5352 info->port = PORT_FIBRE;
5354 /* info->transceiver */
5355 info->transceiver = XCVR_EXTERNAL;
5357 if (netif_carrier_ok(sp->dev)) {
5358 info->speed = 10000;
5359 info->duplex = DUPLEX_FULL;
5365 info->autoneg = AUTONEG_DISABLE;
5370 * s2io_ethtool_gdrvinfo - Returns driver specific information.
5371 * @sp : private member of the device structure, which is a pointer to the
5372 * s2io_nic structure.
5373 * @info : pointer to the structure with parameters given by ethtool to
5374 * return driver information.
5376 * Returns driver specefic information like name, version etc.. to ethtool.
5381 static void s2io_ethtool_gdrvinfo(struct net_device *dev,
5382 struct ethtool_drvinfo *info)
5384 struct s2io_nic *sp = dev->priv;
5386 strncpy(info->driver, s2io_driver_name, sizeof(info->driver));
5387 strncpy(info->version, s2io_driver_version, sizeof(info->version));
5388 strncpy(info->fw_version, "", sizeof(info->fw_version));
5389 strncpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info));
5390 info->regdump_len = XENA_REG_SPACE;
5391 info->eedump_len = XENA_EEPROM_SPACE;
5395 * s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
5396 * @sp: private member of the device structure, which is a pointer to the
5397 * s2io_nic structure.
5398 * @regs : pointer to the structure with parameters given by ethtool for
5399 * dumping the registers.
5400 * @reg_space: The input argumnet into which all the registers are dumped.
5402 * Dumps the entire register space of xFrame NIC into the user given
5408 static void s2io_ethtool_gregs(struct net_device *dev,
5409 struct ethtool_regs *regs, void *space)
5413 u8 *reg_space = (u8 *) space;
5414 struct s2io_nic *sp = dev->priv;
5416 regs->len = XENA_REG_SPACE;
5417 regs->version = sp->pdev->subsystem_device;
5419 for (i = 0; i < regs->len; i += 8) {
5420 reg = readq(sp->bar0 + i);
5421 memcpy((reg_space + i), ®, 8);
5426 * s2io_phy_id - timer function that alternates adapter LED.
5427 * @data : address of the private member of the device structure, which
5428 * is a pointer to the s2io_nic structure, provided as an u32.
5429 * Description: This is actually the timer function that alternates the
5430 * adapter LED bit of the adapter control bit to set/reset every time on
5431 * invocation. The timer is set for 1/2 a second, hence tha NIC blinks
5432 * once every second.
5434 static void s2io_phy_id(unsigned long data)
5436 struct s2io_nic *sp = (struct s2io_nic *) data;
5437 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5441 subid = sp->pdev->subsystem_device;
5442 if ((sp->device_type == XFRAME_II_DEVICE) ||
5443 ((subid & 0xFF) >= 0x07)) {
5444 val64 = readq(&bar0->gpio_control);
5445 val64 ^= GPIO_CTRL_GPIO_0;
5446 writeq(val64, &bar0->gpio_control);
5448 val64 = readq(&bar0->adapter_control);
5449 val64 ^= ADAPTER_LED_ON;
5450 writeq(val64, &bar0->adapter_control);
5453 mod_timer(&sp->id_timer, jiffies + HZ / 2);
5457 * s2io_ethtool_idnic - To physically identify the nic on the system.
5458 * @sp : private member of the device structure, which is a pointer to the
5459 * s2io_nic structure.
5460 * @id : pointer to the structure with identification parameters given by
5462 * Description: Used to physically identify the NIC on the system.
5463 * The Link LED will blink for a time specified by the user for
5465 * NOTE: The Link has to be Up to be able to blink the LED. Hence
5466 * identification is possible only if it's link is up.
5468 * int , returns 0 on success
5471 static int s2io_ethtool_idnic(struct net_device *dev, u32 data)
5473 u64 val64 = 0, last_gpio_ctrl_val;
5474 struct s2io_nic *sp = dev->priv;
5475 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5478 subid = sp->pdev->subsystem_device;
5479 last_gpio_ctrl_val = readq(&bar0->gpio_control);
5480 if ((sp->device_type == XFRAME_I_DEVICE) &&
5481 ((subid & 0xFF) < 0x07)) {
5482 val64 = readq(&bar0->adapter_control);
5483 if (!(val64 & ADAPTER_CNTL_EN)) {
5485 "Adapter Link down, cannot blink LED\n");
5489 if (sp->id_timer.function == NULL) {
5490 init_timer(&sp->id_timer);
5491 sp->id_timer.function = s2io_phy_id;
5492 sp->id_timer.data = (unsigned long) sp;
5494 mod_timer(&sp->id_timer, jiffies);
5496 msleep_interruptible(data * HZ);
5498 msleep_interruptible(MAX_FLICKER_TIME);
5499 del_timer_sync(&sp->id_timer);
5501 if (CARDS_WITH_FAULTY_LINK_INDICATORS(sp->device_type, subid)) {
5502 writeq(last_gpio_ctrl_val, &bar0->gpio_control);
5503 last_gpio_ctrl_val = readq(&bar0->gpio_control);
5509 static void s2io_ethtool_gringparam(struct net_device *dev,
5510 struct ethtool_ringparam *ering)
5512 struct s2io_nic *sp = dev->priv;
5513 int i,tx_desc_count=0,rx_desc_count=0;
5515 if (sp->rxd_mode == RXD_MODE_1)
5516 ering->rx_max_pending = MAX_RX_DESC_1;
5517 else if (sp->rxd_mode == RXD_MODE_3B)
5518 ering->rx_max_pending = MAX_RX_DESC_2;
5520 ering->tx_max_pending = MAX_TX_DESC;
5521 for (i = 0 ; i < sp->config.tx_fifo_num ; i++)
5522 tx_desc_count += sp->config.tx_cfg[i].fifo_len;
5524 DBG_PRINT(INFO_DBG,"\nmax txds : %d\n",sp->config.max_txds);
5525 ering->tx_pending = tx_desc_count;
5527 for (i = 0 ; i < sp->config.rx_ring_num ; i++)
5528 rx_desc_count += sp->config.rx_cfg[i].num_rxd;
5530 ering->rx_pending = rx_desc_count;
5532 ering->rx_mini_max_pending = 0;
5533 ering->rx_mini_pending = 0;
5534 if(sp->rxd_mode == RXD_MODE_1)
5535 ering->rx_jumbo_max_pending = MAX_RX_DESC_1;
5536 else if (sp->rxd_mode == RXD_MODE_3B)
5537 ering->rx_jumbo_max_pending = MAX_RX_DESC_2;
5538 ering->rx_jumbo_pending = rx_desc_count;
5542 * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
5543 * @sp : private member of the device structure, which is a pointer to the
5544 * s2io_nic structure.
5545 * @ep : pointer to the structure with pause parameters given by ethtool.
5547 * Returns the Pause frame generation and reception capability of the NIC.
5551 static void s2io_ethtool_getpause_data(struct net_device *dev,
5552 struct ethtool_pauseparam *ep)
5555 struct s2io_nic *sp = dev->priv;
5556 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5558 val64 = readq(&bar0->rmac_pause_cfg);
5559 if (val64 & RMAC_PAUSE_GEN_ENABLE)
5560 ep->tx_pause = TRUE;
5561 if (val64 & RMAC_PAUSE_RX_ENABLE)
5562 ep->rx_pause = TRUE;
5563 ep->autoneg = FALSE;
5567 * s2io_ethtool_setpause_data - set/reset pause frame generation.
5568 * @sp : private member of the device structure, which is a pointer to the
5569 * s2io_nic structure.
5570 * @ep : pointer to the structure with pause parameters given by ethtool.
5572 * It can be used to set or reset Pause frame generation or reception
5573 * support of the NIC.
5575 * int, returns 0 on Success
5578 static int s2io_ethtool_setpause_data(struct net_device *dev,
5579 struct ethtool_pauseparam *ep)
5582 struct s2io_nic *sp = dev->priv;
5583 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5585 val64 = readq(&bar0->rmac_pause_cfg);
5587 val64 |= RMAC_PAUSE_GEN_ENABLE;
5589 val64 &= ~RMAC_PAUSE_GEN_ENABLE;
5591 val64 |= RMAC_PAUSE_RX_ENABLE;
5593 val64 &= ~RMAC_PAUSE_RX_ENABLE;
5594 writeq(val64, &bar0->rmac_pause_cfg);
5599 * read_eeprom - reads 4 bytes of data from user given offset.
5600 * @sp : private member of the device structure, which is a pointer to the
5601 * s2io_nic structure.
5602 * @off : offset at which the data must be written
5603 * @data : Its an output parameter where the data read at the given
5606 * Will read 4 bytes of data from the user given offset and return the
5608 * NOTE: Will allow to read only part of the EEPROM visible through the
5611 * -1 on failure and 0 on success.
5614 #define S2IO_DEV_ID 5
5615 static int read_eeprom(struct s2io_nic * sp, int off, u64 * data)
5620 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5622 if (sp->device_type == XFRAME_I_DEVICE) {
5623 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
5624 I2C_CONTROL_BYTE_CNT(0x3) | I2C_CONTROL_READ |
5625 I2C_CONTROL_CNTL_START;
5626 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
5628 while (exit_cnt < 5) {
5629 val64 = readq(&bar0->i2c_control);
5630 if (I2C_CONTROL_CNTL_END(val64)) {
5631 *data = I2C_CONTROL_GET_DATA(val64);
5640 if (sp->device_type == XFRAME_II_DEVICE) {
5641 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
5642 SPI_CONTROL_BYTECNT(0x3) |
5643 SPI_CONTROL_CMD(0x3) | SPI_CONTROL_ADDR(off);
5644 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5645 val64 |= SPI_CONTROL_REQ;
5646 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5647 while (exit_cnt < 5) {
5648 val64 = readq(&bar0->spi_control);
5649 if (val64 & SPI_CONTROL_NACK) {
5652 } else if (val64 & SPI_CONTROL_DONE) {
5653 *data = readq(&bar0->spi_data);
5666 * write_eeprom - actually writes the relevant part of the data value.
5667 * @sp : private member of the device structure, which is a pointer to the
5668 * s2io_nic structure.
5669 * @off : offset at which the data must be written
5670 * @data : The data that is to be written
5671 * @cnt : Number of bytes of the data that are actually to be written into
5672 * the Eeprom. (max of 3)
5674 * Actually writes the relevant part of the data value into the Eeprom
5675 * through the I2C bus.
5677 * 0 on success, -1 on failure.
5680 static int write_eeprom(struct s2io_nic * sp, int off, u64 data, int cnt)
5682 int exit_cnt = 0, ret = -1;
5684 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5686 if (sp->device_type == XFRAME_I_DEVICE) {
5687 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
5688 I2C_CONTROL_BYTE_CNT(cnt) | I2C_CONTROL_SET_DATA((u32)data) |
5689 I2C_CONTROL_CNTL_START;
5690 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
5692 while (exit_cnt < 5) {
5693 val64 = readq(&bar0->i2c_control);
5694 if (I2C_CONTROL_CNTL_END(val64)) {
5695 if (!(val64 & I2C_CONTROL_NACK))
5704 if (sp->device_type == XFRAME_II_DEVICE) {
5705 int write_cnt = (cnt == 8) ? 0 : cnt;
5706 writeq(SPI_DATA_WRITE(data,(cnt<<3)), &bar0->spi_data);
5708 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
5709 SPI_CONTROL_BYTECNT(write_cnt) |
5710 SPI_CONTROL_CMD(0x2) | SPI_CONTROL_ADDR(off);
5711 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5712 val64 |= SPI_CONTROL_REQ;
5713 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5714 while (exit_cnt < 5) {
5715 val64 = readq(&bar0->spi_control);
5716 if (val64 & SPI_CONTROL_NACK) {
5719 } else if (val64 & SPI_CONTROL_DONE) {
5729 static void s2io_vpd_read(struct s2io_nic *nic)
5733 int i=0, cnt, fail = 0;
5734 int vpd_addr = 0x80;
5736 if (nic->device_type == XFRAME_II_DEVICE) {
5737 strcpy(nic->product_name, "Xframe II 10GbE network adapter");
5741 strcpy(nic->product_name, "Xframe I 10GbE network adapter");
5744 strcpy(nic->serial_num, "NOT AVAILABLE");
5746 vpd_data = kmalloc(256, GFP_KERNEL);
5748 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
5751 nic->mac_control.stats_info->sw_stat.mem_allocated += 256;
5753 for (i = 0; i < 256; i +=4 ) {
5754 pci_write_config_byte(nic->pdev, (vpd_addr + 2), i);
5755 pci_read_config_byte(nic->pdev, (vpd_addr + 2), &data);
5756 pci_write_config_byte(nic->pdev, (vpd_addr + 3), 0);
5757 for (cnt = 0; cnt <5; cnt++) {
5759 pci_read_config_byte(nic->pdev, (vpd_addr + 3), &data);
5764 DBG_PRINT(ERR_DBG, "Read of VPD data failed\n");
5768 pci_read_config_dword(nic->pdev, (vpd_addr + 4),
5769 (u32 *)&vpd_data[i]);
5773 /* read serial number of adapter */
5774 for (cnt = 0; cnt < 256; cnt++) {
5775 if ((vpd_data[cnt] == 'S') &&
5776 (vpd_data[cnt+1] == 'N') &&
5777 (vpd_data[cnt+2] < VPD_STRING_LEN)) {
5778 memset(nic->serial_num, 0, VPD_STRING_LEN);
5779 memcpy(nic->serial_num, &vpd_data[cnt + 3],
5786 if ((!fail) && (vpd_data[1] < VPD_STRING_LEN)) {
5787 memset(nic->product_name, 0, vpd_data[1]);
5788 memcpy(nic->product_name, &vpd_data[3], vpd_data[1]);
5791 nic->mac_control.stats_info->sw_stat.mem_freed += 256;
5795 * s2io_ethtool_geeprom - reads the value stored in the Eeprom.
5796 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
5797 * @eeprom : pointer to the user level structure provided by ethtool,
5798 * containing all relevant information.
5799 * @data_buf : user defined value to be written into Eeprom.
5800 * Description: Reads the values stored in the Eeprom at given offset
5801 * for a given length. Stores these values int the input argument data
5802 * buffer 'data_buf' and returns these to the caller (ethtool.)
5807 static int s2io_ethtool_geeprom(struct net_device *dev,
5808 struct ethtool_eeprom *eeprom, u8 * data_buf)
5812 struct s2io_nic *sp = dev->priv;
5814 eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
5816 if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
5817 eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
5819 for (i = 0; i < eeprom->len; i += 4) {
5820 if (read_eeprom(sp, (eeprom->offset + i), &data)) {
5821 DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
5825 memcpy((data_buf + i), &valid, 4);
5831 * s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
5832 * @sp : private member of the device structure, which is a pointer to the
5833 * s2io_nic structure.
5834 * @eeprom : pointer to the user level structure provided by ethtool,
5835 * containing all relevant information.
5836 * @data_buf ; user defined value to be written into Eeprom.
5838 * Tries to write the user provided value in the Eeprom, at the offset
5839 * given by the user.
5841 * 0 on success, -EFAULT on failure.
5844 static int s2io_ethtool_seeprom(struct net_device *dev,
5845 struct ethtool_eeprom *eeprom,
5848 int len = eeprom->len, cnt = 0;
5849 u64 valid = 0, data;
5850 struct s2io_nic *sp = dev->priv;
5852 if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
5854 "ETHTOOL_WRITE_EEPROM Err: Magic value ");
5855 DBG_PRINT(ERR_DBG, "is wrong, Its not 0x%x\n",
5861 data = (u32) data_buf[cnt] & 0x000000FF;
5863 valid = (u32) (data << 24);
5867 if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
5869 "ETHTOOL_WRITE_EEPROM Err: Cannot ");
5871 "write into the specified offset\n");
5882 * s2io_register_test - reads and writes into all clock domains.
5883 * @sp : private member of the device structure, which is a pointer to the
5884 * s2io_nic structure.
5885 * @data : variable that returns the result of each of the test conducted b
5888 * Read and write into all clock domains. The NIC has 3 clock domains,
5889 * see that registers in all the three regions are accessible.
5894 static int s2io_register_test(struct s2io_nic * sp, uint64_t * data)
5896 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5897 u64 val64 = 0, exp_val;
5900 val64 = readq(&bar0->pif_rd_swapper_fb);
5901 if (val64 != 0x123456789abcdefULL) {
5903 DBG_PRINT(INFO_DBG, "Read Test level 1 fails\n");
5906 val64 = readq(&bar0->rmac_pause_cfg);
5907 if (val64 != 0xc000ffff00000000ULL) {
5909 DBG_PRINT(INFO_DBG, "Read Test level 2 fails\n");
5912 val64 = readq(&bar0->rx_queue_cfg);
5913 if (sp->device_type == XFRAME_II_DEVICE)
5914 exp_val = 0x0404040404040404ULL;
5916 exp_val = 0x0808080808080808ULL;
5917 if (val64 != exp_val) {
5919 DBG_PRINT(INFO_DBG, "Read Test level 3 fails\n");
5922 val64 = readq(&bar0->xgxs_efifo_cfg);
5923 if (val64 != 0x000000001923141EULL) {
5925 DBG_PRINT(INFO_DBG, "Read Test level 4 fails\n");
5928 val64 = 0x5A5A5A5A5A5A5A5AULL;
5929 writeq(val64, &bar0->xmsi_data);
5930 val64 = readq(&bar0->xmsi_data);
5931 if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
5933 DBG_PRINT(ERR_DBG, "Write Test level 1 fails\n");
5936 val64 = 0xA5A5A5A5A5A5A5A5ULL;
5937 writeq(val64, &bar0->xmsi_data);
5938 val64 = readq(&bar0->xmsi_data);
5939 if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
5941 DBG_PRINT(ERR_DBG, "Write Test level 2 fails\n");
5949 * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
5950 * @sp : private member of the device structure, which is a pointer to the
5951 * s2io_nic structure.
5952 * @data:variable that returns the result of each of the test conducted by
5955 * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
5961 static int s2io_eeprom_test(struct s2io_nic * sp, uint64_t * data)
5964 u64 ret_data, org_4F0, org_7F0;
5965 u8 saved_4F0 = 0, saved_7F0 = 0;
5966 struct net_device *dev = sp->dev;
5968 /* Test Write Error at offset 0 */
5969 /* Note that SPI interface allows write access to all areas
5970 * of EEPROM. Hence doing all negative testing only for Xframe I.
5972 if (sp->device_type == XFRAME_I_DEVICE)
5973 if (!write_eeprom(sp, 0, 0, 3))
5976 /* Save current values at offsets 0x4F0 and 0x7F0 */
5977 if (!read_eeprom(sp, 0x4F0, &org_4F0))
5979 if (!read_eeprom(sp, 0x7F0, &org_7F0))
5982 /* Test Write at offset 4f0 */
5983 if (write_eeprom(sp, 0x4F0, 0x012345, 3))
5985 if (read_eeprom(sp, 0x4F0, &ret_data))
5988 if (ret_data != 0x012345) {
5989 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x4F0. "
5990 "Data written %llx Data read %llx\n",
5991 dev->name, (unsigned long long)0x12345,
5992 (unsigned long long)ret_data);
5996 /* Reset the EEPROM data go FFFF */
5997 write_eeprom(sp, 0x4F0, 0xFFFFFF, 3);
5999 /* Test Write Request Error at offset 0x7c */
6000 if (sp->device_type == XFRAME_I_DEVICE)
6001 if (!write_eeprom(sp, 0x07C, 0, 3))
6004 /* Test Write Request at offset 0x7f0 */
6005 if (write_eeprom(sp, 0x7F0, 0x012345, 3))
6007 if (read_eeprom(sp, 0x7F0, &ret_data))
6010 if (ret_data != 0x012345) {
6011 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x7F0. "
6012 "Data written %llx Data read %llx\n",
6013 dev->name, (unsigned long long)0x12345,
6014 (unsigned long long)ret_data);
6018 /* Reset the EEPROM data go FFFF */
6019 write_eeprom(sp, 0x7F0, 0xFFFFFF, 3);
6021 if (sp->device_type == XFRAME_I_DEVICE) {
6022 /* Test Write Error at offset 0x80 */
6023 if (!write_eeprom(sp, 0x080, 0, 3))
6026 /* Test Write Error at offset 0xfc */
6027 if (!write_eeprom(sp, 0x0FC, 0, 3))
6030 /* Test Write Error at offset 0x100 */
6031 if (!write_eeprom(sp, 0x100, 0, 3))
6034 /* Test Write Error at offset 4ec */
6035 if (!write_eeprom(sp, 0x4EC, 0, 3))
6039 /* Restore values at offsets 0x4F0 and 0x7F0 */
6041 write_eeprom(sp, 0x4F0, org_4F0, 3);
6043 write_eeprom(sp, 0x7F0, org_7F0, 3);
6050 * s2io_bist_test - invokes the MemBist test of the card .
6051 * @sp : private member of the device structure, which is a pointer to the
6052 * s2io_nic structure.
6053 * @data:variable that returns the result of each of the test conducted by
6056 * This invokes the MemBist test of the card. We give around
6057 * 2 secs time for the Test to complete. If it's still not complete
6058 * within this peiod, we consider that the test failed.
6060 * 0 on success and -1 on failure.
6063 static int s2io_bist_test(struct s2io_nic * sp, uint64_t * data)
6066 int cnt = 0, ret = -1;
6068 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
6069 bist |= PCI_BIST_START;
6070 pci_write_config_word(sp->pdev, PCI_BIST, bist);
6073 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
6074 if (!(bist & PCI_BIST_START)) {
6075 *data = (bist & PCI_BIST_CODE_MASK);
6087 * s2io-link_test - verifies the link state of the nic
6088 * @sp ; private member of the device structure, which is a pointer to the
6089 * s2io_nic structure.
6090 * @data: variable that returns the result of each of the test conducted by
6093 * The function verifies the link state of the NIC and updates the input
6094 * argument 'data' appropriately.
6099 static int s2io_link_test(struct s2io_nic * sp, uint64_t * data)
6101 struct XENA_dev_config __iomem *bar0 = sp->bar0;
6104 val64 = readq(&bar0->adapter_status);
6105 if(!(LINK_IS_UP(val64)))
6114 * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
6115 * @sp - private member of the device structure, which is a pointer to the
6116 * s2io_nic structure.
6117 * @data - variable that returns the result of each of the test
6118 * conducted by the driver.
6120 * This is one of the offline test that tests the read and write
6121 * access to the RldRam chip on the NIC.
6126 static int s2io_rldram_test(struct s2io_nic * sp, uint64_t * data)
6128 struct XENA_dev_config __iomem *bar0 = sp->bar0;
6130 int cnt, iteration = 0, test_fail = 0;
6132 val64 = readq(&bar0->adapter_control);
6133 val64 &= ~ADAPTER_ECC_EN;
6134 writeq(val64, &bar0->adapter_control);
6136 val64 = readq(&bar0->mc_rldram_test_ctrl);
6137 val64 |= MC_RLDRAM_TEST_MODE;
6138 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
6140 val64 = readq(&bar0->mc_rldram_mrs);
6141 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
6142 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
6144 val64 |= MC_RLDRAM_MRS_ENABLE;
6145 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
6147 while (iteration < 2) {
6148 val64 = 0x55555555aaaa0000ULL;
6149 if (iteration == 1) {
6150 val64 ^= 0xFFFFFFFFFFFF0000ULL;
6152 writeq(val64, &bar0->mc_rldram_test_d0);
6154 val64 = 0xaaaa5a5555550000ULL;
6155 if (iteration == 1) {
6156 val64 ^= 0xFFFFFFFFFFFF0000ULL;
6158 writeq(val64, &bar0->mc_rldram_test_d1);
6160 val64 = 0x55aaaaaaaa5a0000ULL;
6161 if (iteration == 1) {
6162 val64 ^= 0xFFFFFFFFFFFF0000ULL;
6164 writeq(val64, &bar0->mc_rldram_test_d2);
6166 val64 = (u64) (0x0000003ffffe0100ULL);
6167 writeq(val64, &bar0->mc_rldram_test_add);
6169 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_WRITE |
6171 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
6173 for (cnt = 0; cnt < 5; cnt++) {
6174 val64 = readq(&bar0->mc_rldram_test_ctrl);
6175 if (val64 & MC_RLDRAM_TEST_DONE)
6183 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
6184 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
6186 for (cnt = 0; cnt < 5; cnt++) {
6187 val64 = readq(&bar0->mc_rldram_test_ctrl);
6188 if (val64 & MC_RLDRAM_TEST_DONE)
6196 val64 = readq(&bar0->mc_rldram_test_ctrl);
6197 if (!(val64 & MC_RLDRAM_TEST_PASS))
6205 /* Bring the adapter out of test mode */
6206 SPECIAL_REG_WRITE(0, &bar0->mc_rldram_test_ctrl, LF);
6212 * s2io_ethtool_test - conducts 6 tsets to determine the health of card.
6213 * @sp : private member of the device structure, which is a pointer to the
6214 * s2io_nic structure.
6215 * @ethtest : pointer to a ethtool command specific structure that will be
6216 * returned to the user.
6217 * @data : variable that returns the result of each of the test
6218 * conducted by the driver.
6220 * This function conducts 6 tests ( 4 offline and 2 online) to determine
6221 * the health of the card.
6226 static void s2io_ethtool_test(struct net_device *dev,
6227 struct ethtool_test *ethtest,
6230 struct s2io_nic *sp = dev->priv;
6231 int orig_state = netif_running(sp->dev);
6233 if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
6234 /* Offline Tests. */
6236 s2io_close(sp->dev);
6238 if (s2io_register_test(sp, &data[0]))
6239 ethtest->flags |= ETH_TEST_FL_FAILED;
6243 if (s2io_rldram_test(sp, &data[3]))
6244 ethtest->flags |= ETH_TEST_FL_FAILED;
6248 if (s2io_eeprom_test(sp, &data[1]))
6249 ethtest->flags |= ETH_TEST_FL_FAILED;
6251 if (s2io_bist_test(sp, &data[4]))
6252 ethtest->flags |= ETH_TEST_FL_FAILED;
6262 "%s: is not up, cannot run test\n",
6271 if (s2io_link_test(sp, &data[2]))
6272 ethtest->flags |= ETH_TEST_FL_FAILED;
6281 static void s2io_get_ethtool_stats(struct net_device *dev,
6282 struct ethtool_stats *estats,
6286 struct s2io_nic *sp = dev->priv;
6287 struct stat_block *stat_info = sp->mac_control.stats_info;
6289 s2io_updt_stats(sp);
6291 (u64)le32_to_cpu(stat_info->tmac_frms_oflow) << 32 |
6292 le32_to_cpu(stat_info->tmac_frms);
6294 (u64)le32_to_cpu(stat_info->tmac_data_octets_oflow) << 32 |
6295 le32_to_cpu(stat_info->tmac_data_octets);
6296 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_drop_frms);
6298 (u64)le32_to_cpu(stat_info->tmac_mcst_frms_oflow) << 32 |
6299 le32_to_cpu(stat_info->tmac_mcst_frms);
6301 (u64)le32_to_cpu(stat_info->tmac_bcst_frms_oflow) << 32 |
6302 le32_to_cpu(stat_info->tmac_bcst_frms);
6303 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_pause_ctrl_frms);
6305 (u64)le32_to_cpu(stat_info->tmac_ttl_octets_oflow) << 32 |
6306 le32_to_cpu(stat_info->tmac_ttl_octets);
6308 (u64)le32_to_cpu(stat_info->tmac_ucst_frms_oflow) << 32 |
6309 le32_to_cpu(stat_info->tmac_ucst_frms);
6311 (u64)le32_to_cpu(stat_info->tmac_nucst_frms_oflow) << 32 |
6312 le32_to_cpu(stat_info->tmac_nucst_frms);
6314 (u64)le32_to_cpu(stat_info->tmac_any_err_frms_oflow) << 32 |
6315 le32_to_cpu(stat_info->tmac_any_err_frms);
6316 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_ttl_less_fb_octets);
6317 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_vld_ip_octets);
6319 (u64)le32_to_cpu(stat_info->tmac_vld_ip_oflow) << 32 |
6320 le32_to_cpu(stat_info->tmac_vld_ip);
6322 (u64)le32_to_cpu(stat_info->tmac_drop_ip_oflow) << 32 |
6323 le32_to_cpu(stat_info->tmac_drop_ip);
6325 (u64)le32_to_cpu(stat_info->tmac_icmp_oflow) << 32 |
6326 le32_to_cpu(stat_info->tmac_icmp);
6328 (u64)le32_to_cpu(stat_info->tmac_rst_tcp_oflow) << 32 |
6329 le32_to_cpu(stat_info->tmac_rst_tcp);
6330 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_tcp);
6331 tmp_stats[i++] = (u64)le32_to_cpu(stat_info->tmac_udp_oflow) << 32 |
6332 le32_to_cpu(stat_info->tmac_udp);
6334 (u64)le32_to_cpu(stat_info->rmac_vld_frms_oflow) << 32 |
6335 le32_to_cpu(stat_info->rmac_vld_frms);
6337 (u64)le32_to_cpu(stat_info->rmac_data_octets_oflow) << 32 |
6338 le32_to_cpu(stat_info->rmac_data_octets);
6339 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_fcs_err_frms);
6340 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_drop_frms);
6342 (u64)le32_to_cpu(stat_info->rmac_vld_mcst_frms_oflow) << 32 |
6343 le32_to_cpu(stat_info->rmac_vld_mcst_frms);
6345 (u64)le32_to_cpu(stat_info->rmac_vld_bcst_frms_oflow) << 32 |
6346 le32_to_cpu(stat_info->rmac_vld_bcst_frms);
6347 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_in_rng_len_err_frms);
6348 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_out_rng_len_err_frms);
6349 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_long_frms);
6350 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_pause_ctrl_frms);
6351 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_unsup_ctrl_frms);
6353 (u64)le32_to_cpu(stat_info->rmac_ttl_octets_oflow) << 32 |
6354 le32_to_cpu(stat_info->rmac_ttl_octets);
6356 (u64)le32_to_cpu(stat_info->rmac_accepted_ucst_frms_oflow)
6357 << 32 | le32_to_cpu(stat_info->rmac_accepted_ucst_frms);
6359 (u64)le32_to_cpu(stat_info->rmac_accepted_nucst_frms_oflow)
6360 << 32 | le32_to_cpu(stat_info->rmac_accepted_nucst_frms);
6362 (u64)le32_to_cpu(stat_info->rmac_discarded_frms_oflow) << 32 |
6363 le32_to_cpu(stat_info->rmac_discarded_frms);
6365 (u64)le32_to_cpu(stat_info->rmac_drop_events_oflow)
6366 << 32 | le32_to_cpu(stat_info->rmac_drop_events);
6367 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_less_fb_octets);
6368 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_frms);
6370 (u64)le32_to_cpu(stat_info->rmac_usized_frms_oflow) << 32 |
6371 le32_to_cpu(stat_info->rmac_usized_frms);
6373 (u64)le32_to_cpu(stat_info->rmac_osized_frms_oflow) << 32 |
6374 le32_to_cpu(stat_info->rmac_osized_frms);
6376 (u64)le32_to_cpu(stat_info->rmac_frag_frms_oflow) << 32 |
6377 le32_to_cpu(stat_info->rmac_frag_frms);
6379 (u64)le32_to_cpu(stat_info->rmac_jabber_frms_oflow) << 32 |
6380 le32_to_cpu(stat_info->rmac_jabber_frms);
6381 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_64_frms);
6382 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_65_127_frms);
6383 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_128_255_frms);
6384 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_256_511_frms);
6385 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_512_1023_frms);
6386 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_1024_1518_frms);
6388 (u64)le32_to_cpu(stat_info->rmac_ip_oflow) << 32 |
6389 le32_to_cpu(stat_info->rmac_ip);
6390 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ip_octets);
6391 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_hdr_err_ip);
6393 (u64)le32_to_cpu(stat_info->rmac_drop_ip_oflow) << 32 |
6394 le32_to_cpu(stat_info->rmac_drop_ip);
6396 (u64)le32_to_cpu(stat_info->rmac_icmp_oflow) << 32 |
6397 le32_to_cpu(stat_info->rmac_icmp);
6398 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_tcp);
6400 (u64)le32_to_cpu(stat_info->rmac_udp_oflow) << 32 |
6401 le32_to_cpu(stat_info->rmac_udp);
6403 (u64)le32_to_cpu(stat_info->rmac_err_drp_udp_oflow) << 32 |
6404 le32_to_cpu(stat_info->rmac_err_drp_udp);
6405 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_err_sym);
6406 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q0);
6407 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q1);
6408 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q2);
6409 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q3);
6410 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q4);
6411 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q5);
6412 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q6);
6413 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q7);
6414 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q0);
6415 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q1);
6416 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q2);
6417 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q3);
6418 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q4);
6419 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q5);
6420 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q6);
6421 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q7);
6423 (u64)le32_to_cpu(stat_info->rmac_pause_cnt_oflow) << 32 |
6424 le32_to_cpu(stat_info->rmac_pause_cnt);
6425 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_data_err_cnt);
6426 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_ctrl_err_cnt);
6428 (u64)le32_to_cpu(stat_info->rmac_accepted_ip_oflow) << 32 |
6429 le32_to_cpu(stat_info->rmac_accepted_ip);
6430 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_tcp);
6431 tmp_stats[i++] = le32_to_cpu(stat_info->rd_req_cnt);
6432 tmp_stats[i++] = le32_to_cpu(stat_info->new_rd_req_cnt);
6433 tmp_stats[i++] = le32_to_cpu(stat_info->new_rd_req_rtry_cnt);
6434 tmp_stats[i++] = le32_to_cpu(stat_info->rd_rtry_cnt);
6435 tmp_stats[i++] = le32_to_cpu(stat_info->wr_rtry_rd_ack_cnt);
6436 tmp_stats[i++] = le32_to_cpu(stat_info->wr_req_cnt);
6437 tmp_stats[i++] = le32_to_cpu(stat_info->new_wr_req_cnt);
6438 tmp_stats[i++] = le32_to_cpu(stat_info->new_wr_req_rtry_cnt);
6439 tmp_stats[i++] = le32_to_cpu(stat_info->wr_rtry_cnt);
6440 tmp_stats[i++] = le32_to_cpu(stat_info->wr_disc_cnt);
6441 tmp_stats[i++] = le32_to_cpu(stat_info->rd_rtry_wr_ack_cnt);
6442 tmp_stats[i++] = le32_to_cpu(stat_info->txp_wr_cnt);
6443 tmp_stats[i++] = le32_to_cpu(stat_info->txd_rd_cnt);
6444 tmp_stats[i++] = le32_to_cpu(stat_info->txd_wr_cnt);
6445 tmp_stats[i++] = le32_to_cpu(stat_info->rxd_rd_cnt);
6446 tmp_stats[i++] = le32_to_cpu(stat_info->rxd_wr_cnt);
6447 tmp_stats[i++] = le32_to_cpu(stat_info->txf_rd_cnt);
6448 tmp_stats[i++] = le32_to_cpu(stat_info->rxf_wr_cnt);
6450 /* Enhanced statistics exist only for Hercules */
6451 if(sp->device_type == XFRAME_II_DEVICE) {
6453 le64_to_cpu(stat_info->rmac_ttl_1519_4095_frms);
6455 le64_to_cpu(stat_info->rmac_ttl_4096_8191_frms);
6457 le64_to_cpu(stat_info->rmac_ttl_8192_max_frms);
6458 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_gt_max_frms);
6459 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_osized_alt_frms);
6460 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_jabber_alt_frms);
6461 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_gt_max_alt_frms);
6462 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_vlan_frms);
6463 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_len_discard);
6464 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_fcs_discard);
6465 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_pf_discard);
6466 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_da_discard);
6467 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_red_discard);
6468 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_rts_discard);
6469 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_ingm_full_discard);
6470 tmp_stats[i++] = le32_to_cpu(stat_info->link_fault_cnt);
6474 tmp_stats[i++] = stat_info->sw_stat.single_ecc_errs;
6475 tmp_stats[i++] = stat_info->sw_stat.double_ecc_errs;
6476 tmp_stats[i++] = stat_info->sw_stat.parity_err_cnt;
6477 tmp_stats[i++] = stat_info->sw_stat.serious_err_cnt;
6478 tmp_stats[i++] = stat_info->sw_stat.soft_reset_cnt;
6479 tmp_stats[i++] = stat_info->sw_stat.fifo_full_cnt;
6480 for (k = 0; k < MAX_RX_RINGS; k++)
6481 tmp_stats[i++] = stat_info->sw_stat.ring_full_cnt[k];
6482 tmp_stats[i++] = stat_info->xpak_stat.alarm_transceiver_temp_high;
6483 tmp_stats[i++] = stat_info->xpak_stat.alarm_transceiver_temp_low;
6484 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_bias_current_high;
6485 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_bias_current_low;
6486 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_output_power_high;
6487 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_output_power_low;
6488 tmp_stats[i++] = stat_info->xpak_stat.warn_transceiver_temp_high;
6489 tmp_stats[i++] = stat_info->xpak_stat.warn_transceiver_temp_low;
6490 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_bias_current_high;
6491 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_bias_current_low;
6492 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_output_power_high;
6493 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_output_power_low;
6494 tmp_stats[i++] = stat_info->sw_stat.clubbed_frms_cnt;
6495 tmp_stats[i++] = stat_info->sw_stat.sending_both;
6496 tmp_stats[i++] = stat_info->sw_stat.outof_sequence_pkts;
6497 tmp_stats[i++] = stat_info->sw_stat.flush_max_pkts;
6498 if (stat_info->sw_stat.num_aggregations) {
6499 u64 tmp = stat_info->sw_stat.sum_avg_pkts_aggregated;
6502 * Since 64-bit divide does not work on all platforms,
6503 * do repeated subtraction.
6505 while (tmp >= stat_info->sw_stat.num_aggregations) {
6506 tmp -= stat_info->sw_stat.num_aggregations;
6509 tmp_stats[i++] = count;
6513 tmp_stats[i++] = stat_info->sw_stat.mem_alloc_fail_cnt;
6514 tmp_stats[i++] = stat_info->sw_stat.pci_map_fail_cnt;
6515 tmp_stats[i++] = stat_info->sw_stat.watchdog_timer_cnt;
6516 tmp_stats[i++] = stat_info->sw_stat.mem_allocated;
6517 tmp_stats[i++] = stat_info->sw_stat.mem_freed;
6518 tmp_stats[i++] = stat_info->sw_stat.link_up_cnt;
6519 tmp_stats[i++] = stat_info->sw_stat.link_down_cnt;
6520 tmp_stats[i++] = stat_info->sw_stat.link_up_time;
6521 tmp_stats[i++] = stat_info->sw_stat.link_down_time;
6523 tmp_stats[i++] = stat_info->sw_stat.tx_buf_abort_cnt;
6524 tmp_stats[i++] = stat_info->sw_stat.tx_desc_abort_cnt;
6525 tmp_stats[i++] = stat_info->sw_stat.tx_parity_err_cnt;
6526 tmp_stats[i++] = stat_info->sw_stat.tx_link_loss_cnt;
6527 tmp_stats[i++] = stat_info->sw_stat.tx_list_proc_err_cnt;
6529 tmp_stats[i++] = stat_info->sw_stat.rx_parity_err_cnt;
6530 tmp_stats[i++] = stat_info->sw_stat.rx_abort_cnt;
6531 tmp_stats[i++] = stat_info->sw_stat.rx_parity_abort_cnt;
6532 tmp_stats[i++] = stat_info->sw_stat.rx_rda_fail_cnt;
6533 tmp_stats[i++] = stat_info->sw_stat.rx_unkn_prot_cnt;
6534 tmp_stats[i++] = stat_info->sw_stat.rx_fcs_err_cnt;
6535 tmp_stats[i++] = stat_info->sw_stat.rx_buf_size_err_cnt;
6536 tmp_stats[i++] = stat_info->sw_stat.rx_rxd_corrupt_cnt;
6537 tmp_stats[i++] = stat_info->sw_stat.rx_unkn_err_cnt;
6538 tmp_stats[i++] = stat_info->sw_stat.tda_err_cnt;
6539 tmp_stats[i++] = stat_info->sw_stat.pfc_err_cnt;
6540 tmp_stats[i++] = stat_info->sw_stat.pcc_err_cnt;
6541 tmp_stats[i++] = stat_info->sw_stat.tti_err_cnt;
6542 tmp_stats[i++] = stat_info->sw_stat.tpa_err_cnt;
6543 tmp_stats[i++] = stat_info->sw_stat.sm_err_cnt;
6544 tmp_stats[i++] = stat_info->sw_stat.lso_err_cnt;
6545 tmp_stats[i++] = stat_info->sw_stat.mac_tmac_err_cnt;
6546 tmp_stats[i++] = stat_info->sw_stat.mac_rmac_err_cnt;
6547 tmp_stats[i++] = stat_info->sw_stat.xgxs_txgxs_err_cnt;
6548 tmp_stats[i++] = stat_info->sw_stat.xgxs_rxgxs_err_cnt;
6549 tmp_stats[i++] = stat_info->sw_stat.rc_err_cnt;
6550 tmp_stats[i++] = stat_info->sw_stat.prc_pcix_err_cnt;
6551 tmp_stats[i++] = stat_info->sw_stat.rpa_err_cnt;
6552 tmp_stats[i++] = stat_info->sw_stat.rda_err_cnt;
6553 tmp_stats[i++] = stat_info->sw_stat.rti_err_cnt;
6554 tmp_stats[i++] = stat_info->sw_stat.mc_err_cnt;
6557 static int s2io_ethtool_get_regs_len(struct net_device *dev)
6559 return (XENA_REG_SPACE);
6563 static u32 s2io_ethtool_get_rx_csum(struct net_device * dev)
6565 struct s2io_nic *sp = dev->priv;
6567 return (sp->rx_csum);
6570 static int s2io_ethtool_set_rx_csum(struct net_device *dev, u32 data)
6572 struct s2io_nic *sp = dev->priv;
6582 static int s2io_get_eeprom_len(struct net_device *dev)
6584 return (XENA_EEPROM_SPACE);
6587 static int s2io_get_sset_count(struct net_device *dev, int sset)
6589 struct s2io_nic *sp = dev->priv;
6593 return S2IO_TEST_LEN;
6595 switch(sp->device_type) {
6596 case XFRAME_I_DEVICE:
6597 return XFRAME_I_STAT_LEN;
6598 case XFRAME_II_DEVICE:
6599 return XFRAME_II_STAT_LEN;
6608 static void s2io_ethtool_get_strings(struct net_device *dev,
6609 u32 stringset, u8 * data)
6612 struct s2io_nic *sp = dev->priv;
6614 switch (stringset) {
6616 memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
6619 stat_size = sizeof(ethtool_xena_stats_keys);
6620 memcpy(data, ðtool_xena_stats_keys,stat_size);
6621 if(sp->device_type == XFRAME_II_DEVICE) {
6622 memcpy(data + stat_size,
6623 ðtool_enhanced_stats_keys,
6624 sizeof(ethtool_enhanced_stats_keys));
6625 stat_size += sizeof(ethtool_enhanced_stats_keys);
6628 memcpy(data + stat_size, ðtool_driver_stats_keys,
6629 sizeof(ethtool_driver_stats_keys));
6633 static int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
6636 dev->features |= NETIF_F_IP_CSUM;
6638 dev->features &= ~NETIF_F_IP_CSUM;
6643 static u32 s2io_ethtool_op_get_tso(struct net_device *dev)
6645 return (dev->features & NETIF_F_TSO) != 0;
6647 static int s2io_ethtool_op_set_tso(struct net_device *dev, u32 data)
6650 dev->features |= (NETIF_F_TSO | NETIF_F_TSO6);
6652 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
6657 static const struct ethtool_ops netdev_ethtool_ops = {
6658 .get_settings = s2io_ethtool_gset,
6659 .set_settings = s2io_ethtool_sset,
6660 .get_drvinfo = s2io_ethtool_gdrvinfo,
6661 .get_regs_len = s2io_ethtool_get_regs_len,
6662 .get_regs = s2io_ethtool_gregs,
6663 .get_link = ethtool_op_get_link,
6664 .get_eeprom_len = s2io_get_eeprom_len,
6665 .get_eeprom = s2io_ethtool_geeprom,
6666 .set_eeprom = s2io_ethtool_seeprom,
6667 .get_ringparam = s2io_ethtool_gringparam,
6668 .get_pauseparam = s2io_ethtool_getpause_data,
6669 .set_pauseparam = s2io_ethtool_setpause_data,
6670 .get_rx_csum = s2io_ethtool_get_rx_csum,
6671 .set_rx_csum = s2io_ethtool_set_rx_csum,
6672 .set_tx_csum = s2io_ethtool_op_set_tx_csum,
6673 .set_sg = ethtool_op_set_sg,
6674 .get_tso = s2io_ethtool_op_get_tso,
6675 .set_tso = s2io_ethtool_op_set_tso,
6676 .set_ufo = ethtool_op_set_ufo,
6677 .self_test = s2io_ethtool_test,
6678 .get_strings = s2io_ethtool_get_strings,
6679 .phys_id = s2io_ethtool_idnic,
6680 .get_ethtool_stats = s2io_get_ethtool_stats,
6681 .get_sset_count = s2io_get_sset_count,
6685 * s2io_ioctl - Entry point for the Ioctl
6686 * @dev : Device pointer.
6687 * @ifr : An IOCTL specefic structure, that can contain a pointer to
6688 * a proprietary structure used to pass information to the driver.
6689 * @cmd : This is used to distinguish between the different commands that
6690 * can be passed to the IOCTL functions.
6692 * Currently there are no special functionality supported in IOCTL, hence
6693 * function always return EOPNOTSUPPORTED
6696 static int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6702 * s2io_change_mtu - entry point to change MTU size for the device.
6703 * @dev : device pointer.
6704 * @new_mtu : the new MTU size for the device.
6705 * Description: A driver entry point to change MTU size for the device.
6706 * Before changing the MTU the device must be stopped.
6708 * 0 on success and an appropriate (-)ve integer as defined in errno.h
6712 static int s2io_change_mtu(struct net_device *dev, int new_mtu)
6714 struct s2io_nic *sp = dev->priv;
6717 if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) {
6718 DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n",
6724 if (netif_running(dev)) {
6725 s2io_stop_all_tx_queue(sp);
6727 ret = s2io_card_up(sp);
6729 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
6733 s2io_wake_all_tx_queue(sp);
6734 } else { /* Device is down */
6735 struct XENA_dev_config __iomem *bar0 = sp->bar0;
6736 u64 val64 = new_mtu;
6738 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
6745 * s2io_set_link - Set the LInk status
6746 * @data: long pointer to device private structue
6747 * Description: Sets the link status for the adapter
6750 static void s2io_set_link(struct work_struct *work)
6752 struct s2io_nic *nic = container_of(work, struct s2io_nic, set_link_task);
6753 struct net_device *dev = nic->dev;
6754 struct XENA_dev_config __iomem *bar0 = nic->bar0;
6760 if (!netif_running(dev))
6763 if (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(nic->state))) {
6764 /* The card is being reset, no point doing anything */
6768 subid = nic->pdev->subsystem_device;
6769 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
6771 * Allow a small delay for the NICs self initiated
6772 * cleanup to complete.
6777 val64 = readq(&bar0->adapter_status);
6778 if (LINK_IS_UP(val64)) {
6779 if (!(readq(&bar0->adapter_control) & ADAPTER_CNTL_EN)) {
6780 if (verify_xena_quiescence(nic)) {
6781 val64 = readq(&bar0->adapter_control);
6782 val64 |= ADAPTER_CNTL_EN;
6783 writeq(val64, &bar0->adapter_control);
6784 if (CARDS_WITH_FAULTY_LINK_INDICATORS(
6785 nic->device_type, subid)) {
6786 val64 = readq(&bar0->gpio_control);
6787 val64 |= GPIO_CTRL_GPIO_0;
6788 writeq(val64, &bar0->gpio_control);
6789 val64 = readq(&bar0->gpio_control);
6791 val64 |= ADAPTER_LED_ON;
6792 writeq(val64, &bar0->adapter_control);
6794 nic->device_enabled_once = TRUE;
6796 DBG_PRINT(ERR_DBG, "%s: Error: ", dev->name);
6797 DBG_PRINT(ERR_DBG, "device is not Quiescent\n");
6798 s2io_stop_all_tx_queue(nic);
6801 val64 = readq(&bar0->adapter_control);
6802 val64 |= ADAPTER_LED_ON;
6803 writeq(val64, &bar0->adapter_control);
6804 s2io_link(nic, LINK_UP);
6806 if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
6808 val64 = readq(&bar0->gpio_control);
6809 val64 &= ~GPIO_CTRL_GPIO_0;
6810 writeq(val64, &bar0->gpio_control);
6811 val64 = readq(&bar0->gpio_control);
6814 val64 = readq(&bar0->adapter_control);
6815 val64 = val64 &(~ADAPTER_LED_ON);
6816 writeq(val64, &bar0->adapter_control);
6817 s2io_link(nic, LINK_DOWN);
6819 clear_bit(__S2IO_STATE_LINK_TASK, &(nic->state));
6825 static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
6827 struct sk_buff **skb, u64 *temp0, u64 *temp1,
6828 u64 *temp2, int size)
6830 struct net_device *dev = sp->dev;
6831 struct swStat *stats = &sp->mac_control.stats_info->sw_stat;
6833 if ((sp->rxd_mode == RXD_MODE_1) && (rxdp->Host_Control == 0)) {
6834 struct RxD1 *rxdp1 = (struct RxD1 *)rxdp;
6837 DBG_PRINT(INFO_DBG, "SKB is not NULL\n");
6839 * As Rx frame are not going to be processed,
6840 * using same mapped address for the Rxd
6843 rxdp1->Buffer0_ptr = *temp0;
6845 *skb = dev_alloc_skb(size);
6847 DBG_PRINT(INFO_DBG, "%s: Out of ", dev->name);
6848 DBG_PRINT(INFO_DBG, "memory to allocate ");
6849 DBG_PRINT(INFO_DBG, "1 buf mode SKBs\n");
6850 sp->mac_control.stats_info->sw_stat. \
6851 mem_alloc_fail_cnt++;
6854 sp->mac_control.stats_info->sw_stat.mem_allocated
6855 += (*skb)->truesize;
6856 /* storing the mapped addr in a temp variable
6857 * such it will be used for next rxd whose
6858 * Host Control is NULL
6860 rxdp1->Buffer0_ptr = *temp0 =
6861 pci_map_single( sp->pdev, (*skb)->data,
6862 size - NET_IP_ALIGN,
6863 PCI_DMA_FROMDEVICE);
6864 if (pci_dma_mapping_error(rxdp1->Buffer0_ptr))
6865 goto memalloc_failed;
6866 rxdp->Host_Control = (unsigned long) (*skb);
6868 } else if ((sp->rxd_mode == RXD_MODE_3B) && (rxdp->Host_Control == 0)) {
6869 struct RxD3 *rxdp3 = (struct RxD3 *)rxdp;
6870 /* Two buffer Mode */
6872 rxdp3->Buffer2_ptr = *temp2;
6873 rxdp3->Buffer0_ptr = *temp0;
6874 rxdp3->Buffer1_ptr = *temp1;
6876 *skb = dev_alloc_skb(size);
6878 DBG_PRINT(INFO_DBG, "%s: Out of ", dev->name);
6879 DBG_PRINT(INFO_DBG, "memory to allocate ");
6880 DBG_PRINT(INFO_DBG, "2 buf mode SKBs\n");
6881 sp->mac_control.stats_info->sw_stat. \
6882 mem_alloc_fail_cnt++;
6885 sp->mac_control.stats_info->sw_stat.mem_allocated
6886 += (*skb)->truesize;
6887 rxdp3->Buffer2_ptr = *temp2 =
6888 pci_map_single(sp->pdev, (*skb)->data,
6890 PCI_DMA_FROMDEVICE);
6891 if (pci_dma_mapping_error(rxdp3->Buffer2_ptr))
6892 goto memalloc_failed;
6893 rxdp3->Buffer0_ptr = *temp0 =
6894 pci_map_single( sp->pdev, ba->ba_0, BUF0_LEN,
6895 PCI_DMA_FROMDEVICE);
6896 if (pci_dma_mapping_error(rxdp3->Buffer0_ptr)) {
6897 pci_unmap_single (sp->pdev,
6898 (dma_addr_t)rxdp3->Buffer2_ptr,
6899 dev->mtu + 4, PCI_DMA_FROMDEVICE);
6900 goto memalloc_failed;
6902 rxdp->Host_Control = (unsigned long) (*skb);
6904 /* Buffer-1 will be dummy buffer not used */
6905 rxdp3->Buffer1_ptr = *temp1 =
6906 pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN,
6907 PCI_DMA_FROMDEVICE);
6908 if (pci_dma_mapping_error(rxdp3->Buffer1_ptr)) {
6909 pci_unmap_single (sp->pdev,
6910 (dma_addr_t)rxdp3->Buffer0_ptr,
6911 BUF0_LEN, PCI_DMA_FROMDEVICE);
6912 pci_unmap_single (sp->pdev,
6913 (dma_addr_t)rxdp3->Buffer2_ptr,
6914 dev->mtu + 4, PCI_DMA_FROMDEVICE);
6915 goto memalloc_failed;
6921 stats->pci_map_fail_cnt++;
6922 stats->mem_freed += (*skb)->truesize;
6923 dev_kfree_skb(*skb);
6927 static void set_rxd_buffer_size(struct s2io_nic *sp, struct RxD_t *rxdp,
6930 struct net_device *dev = sp->dev;
6931 if (sp->rxd_mode == RXD_MODE_1) {
6932 rxdp->Control_2 = SET_BUFFER0_SIZE_1( size - NET_IP_ALIGN);
6933 } else if (sp->rxd_mode == RXD_MODE_3B) {
6934 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
6935 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
6936 rxdp->Control_2 |= SET_BUFFER2_SIZE_3( dev->mtu + 4);
6940 static int rxd_owner_bit_reset(struct s2io_nic *sp)
6942 int i, j, k, blk_cnt = 0, size;
6943 struct mac_info * mac_control = &sp->mac_control;
6944 struct config_param *config = &sp->config;
6945 struct net_device *dev = sp->dev;
6946 struct RxD_t *rxdp = NULL;
6947 struct sk_buff *skb = NULL;
6948 struct buffAdd *ba = NULL;
6949 u64 temp0_64 = 0, temp1_64 = 0, temp2_64 = 0;
6951 /* Calculate the size based on ring mode */
6952 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
6953 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
6954 if (sp->rxd_mode == RXD_MODE_1)
6955 size += NET_IP_ALIGN;
6956 else if (sp->rxd_mode == RXD_MODE_3B)
6957 size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
6959 for (i = 0; i < config->rx_ring_num; i++) {
6960 blk_cnt = config->rx_cfg[i].num_rxd /
6961 (rxd_count[sp->rxd_mode] +1);
6963 for (j = 0; j < blk_cnt; j++) {
6964 for (k = 0; k < rxd_count[sp->rxd_mode]; k++) {
6965 rxdp = mac_control->rings[i].
6966 rx_blocks[j].rxds[k].virt_addr;
6967 if(sp->rxd_mode == RXD_MODE_3B)
6968 ba = &mac_control->rings[i].ba[j][k];
6969 if (set_rxd_buffer_pointer(sp, rxdp, ba,
6970 &skb,(u64 *)&temp0_64,
6977 set_rxd_buffer_size(sp, rxdp, size);
6979 /* flip the Ownership bit to Hardware */
6980 rxdp->Control_1 |= RXD_OWN_XENA;
6988 static int s2io_add_isr(struct s2io_nic * sp)
6991 struct net_device *dev = sp->dev;
6994 if (sp->config.intr_type == MSI_X)
6995 ret = s2io_enable_msi_x(sp);
6997 DBG_PRINT(ERR_DBG, "%s: Defaulting to INTA\n", dev->name);
6998 sp->config.intr_type = INTA;
7001 /* Store the values of the MSIX table in the struct s2io_nic structure */
7002 store_xmsi_data(sp);
7004 /* After proper initialization of H/W, register ISR */
7005 if (sp->config.intr_type == MSI_X) {
7006 int i, msix_rx_cnt = 0;
7008 for (i = 0; i < sp->num_entries; i++) {
7009 if (sp->s2io_entries[i].in_use == MSIX_FLG) {
7010 if (sp->s2io_entries[i].type ==
7012 sprintf(sp->desc[i], "%s:MSI-X-%d-RX",
7014 err = request_irq(sp->entries[i].vector,
7015 s2io_msix_ring_handle, 0,
7017 sp->s2io_entries[i].arg);
7018 } else if (sp->s2io_entries[i].type ==
7020 sprintf(sp->desc[i], "%s:MSI-X-%d-TX",
7022 err = request_irq(sp->entries[i].vector,
7023 s2io_msix_fifo_handle, 0,
7025 sp->s2io_entries[i].arg);
7028 /* if either data or addr is zero print it. */
7029 if (!(sp->msix_info[i].addr &&
7030 sp->msix_info[i].data)) {
7032 "%s @Addr:0x%llx Data:0x%llx\n",
7034 (unsigned long long)
7035 sp->msix_info[i].addr,
7036 (unsigned long long)
7037 ntohl(sp->msix_info[i].data));
7041 remove_msix_isr(sp);
7044 "%s:MSI-X-%d registration "
7045 "failed\n", dev->name, i);
7048 "%s: Defaulting to INTA\n",
7050 sp->config.intr_type = INTA;
7053 sp->s2io_entries[i].in_use =
7054 MSIX_REGISTERED_SUCCESS;
7058 printk(KERN_INFO "MSI-X-RX %d entries enabled\n",
7060 DBG_PRINT(INFO_DBG, "MSI-X-TX entries enabled"
7061 " through alarm vector\n");
7064 if (sp->config.intr_type == INTA) {
7065 err = request_irq((int) sp->pdev->irq, s2io_isr, IRQF_SHARED,
7068 DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
7075 static void s2io_rem_isr(struct s2io_nic * sp)
7077 if (sp->config.intr_type == MSI_X)
7078 remove_msix_isr(sp);
7080 remove_inta_isr(sp);
7083 static void do_s2io_card_down(struct s2io_nic * sp, int do_io)
7086 struct XENA_dev_config __iomem *bar0 = sp->bar0;
7087 register u64 val64 = 0;
7088 struct config_param *config;
7089 config = &sp->config;
7091 if (!is_s2io_card_up(sp))
7094 del_timer_sync(&sp->alarm_timer);
7095 /* If s2io_set_link task is executing, wait till it completes. */
7096 while (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(sp->state))) {
7099 clear_bit(__S2IO_STATE_CARD_UP, &sp->state);
7102 if (sp->config.napi) {
7104 if (config->intr_type == MSI_X) {
7105 for (; off < sp->config.rx_ring_num; off++)
7106 napi_disable(&sp->mac_control.rings[off].napi);
7109 napi_disable(&sp->napi);
7112 /* disable Tx and Rx traffic on the NIC */
7118 /* Check if the device is Quiescent and then Reset the NIC */
7120 /* As per the HW requirement we need to replenish the
7121 * receive buffer to avoid the ring bump. Since there is
7122 * no intention of processing the Rx frame at this pointwe are
7123 * just settting the ownership bit of rxd in Each Rx
7124 * ring to HW and set the appropriate buffer size
7125 * based on the ring mode
7127 rxd_owner_bit_reset(sp);
7129 val64 = readq(&bar0->adapter_status);
7130 if (verify_xena_quiescence(sp)) {
7131 if(verify_pcc_quiescent(sp, sp->device_enabled_once))
7139 "s2io_close:Device not Quiescent ");
7140 DBG_PRINT(ERR_DBG, "adaper status reads 0x%llx\n",
7141 (unsigned long long) val64);
7148 /* Free all Tx buffers */
7149 free_tx_buffers(sp);
7151 /* Free all Rx buffers */
7152 free_rx_buffers(sp);
7154 clear_bit(__S2IO_STATE_LINK_TASK, &(sp->state));
7157 static void s2io_card_down(struct s2io_nic * sp)
7159 do_s2io_card_down(sp, 1);
7162 static int s2io_card_up(struct s2io_nic * sp)
7165 struct mac_info *mac_control;
7166 struct config_param *config;
7167 struct net_device *dev = (struct net_device *) sp->dev;
7170 /* Initialize the H/W I/O registers */
7173 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
7181 * Initializing the Rx buffers. For now we are considering only 1
7182 * Rx ring and initializing buffers into 30 Rx blocks
7184 mac_control = &sp->mac_control;
7185 config = &sp->config;
7187 for (i = 0; i < config->rx_ring_num; i++) {
7188 mac_control->rings[i].mtu = dev->mtu;
7189 ret = fill_rx_buffers(&mac_control->rings[i], 1);
7191 DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
7194 free_rx_buffers(sp);
7197 DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
7198 mac_control->rings[i].rx_bufs_left);
7201 /* Initialise napi */
7204 if (config->intr_type == MSI_X) {
7205 for (i = 0; i < sp->config.rx_ring_num; i++)
7206 napi_enable(&sp->mac_control.rings[i].napi);
7208 napi_enable(&sp->napi);
7212 /* Maintain the state prior to the open */
7213 if (sp->promisc_flg)
7214 sp->promisc_flg = 0;
7215 if (sp->m_cast_flg) {
7217 sp->all_multi_pos= 0;
7220 /* Setting its receive mode */
7221 s2io_set_multicast(dev);
7224 /* Initialize max aggregatable pkts per session based on MTU */
7225 sp->lro_max_aggr_per_sess = ((1<<16) - 1) / dev->mtu;
7226 /* Check if we can use(if specified) user provided value */
7227 if (lro_max_pkts < sp->lro_max_aggr_per_sess)
7228 sp->lro_max_aggr_per_sess = lro_max_pkts;
7231 /* Enable Rx Traffic and interrupts on the NIC */
7232 if (start_nic(sp)) {
7233 DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
7235 free_rx_buffers(sp);
7239 /* Add interrupt service routine */
7240 if (s2io_add_isr(sp) != 0) {
7241 if (sp->config.intr_type == MSI_X)
7244 free_rx_buffers(sp);
7248 S2IO_TIMER_CONF(sp->alarm_timer, s2io_alarm_handle, sp, (HZ/2));
7250 /* Enable select interrupts */
7251 en_dis_err_alarms(sp, ENA_ALL_INTRS, ENABLE_INTRS);
7252 if (sp->config.intr_type != INTA)
7253 en_dis_able_nic_intrs(sp, TX_TRAFFIC_INTR, ENABLE_INTRS);
7255 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
7256 interruptible |= TX_PIC_INTR;
7257 en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
7260 set_bit(__S2IO_STATE_CARD_UP, &sp->state);
7265 * s2io_restart_nic - Resets the NIC.
7266 * @data : long pointer to the device private structure
7268 * This function is scheduled to be run by the s2io_tx_watchdog
7269 * function after 0.5 secs to reset the NIC. The idea is to reduce
7270 * the run time of the watch dog routine which is run holding a
7274 static void s2io_restart_nic(struct work_struct *work)
7276 struct s2io_nic *sp = container_of(work, struct s2io_nic, rst_timer_task);
7277 struct net_device *dev = sp->dev;
7281 if (!netif_running(dev))
7285 if (s2io_card_up(sp)) {
7286 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
7289 s2io_wake_all_tx_queue(sp);
7290 DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n",
7297 * s2io_tx_watchdog - Watchdog for transmit side.
7298 * @dev : Pointer to net device structure
7300 * This function is triggered if the Tx Queue is stopped
7301 * for a pre-defined amount of time when the Interface is still up.
7302 * If the Interface is jammed in such a situation, the hardware is
7303 * reset (by s2io_close) and restarted again (by s2io_open) to
7304 * overcome any problem that might have been caused in the hardware.
7309 static void s2io_tx_watchdog(struct net_device *dev)
7311 struct s2io_nic *sp = dev->priv;
7313 if (netif_carrier_ok(dev)) {
7314 sp->mac_control.stats_info->sw_stat.watchdog_timer_cnt++;
7315 schedule_work(&sp->rst_timer_task);
7316 sp->mac_control.stats_info->sw_stat.soft_reset_cnt++;
7321 * rx_osm_handler - To perform some OS related operations on SKB.
7322 * @sp: private member of the device structure,pointer to s2io_nic structure.
7323 * @skb : the socket buffer pointer.
7324 * @len : length of the packet
7325 * @cksum : FCS checksum of the frame.
7326 * @ring_no : the ring from which this RxD was extracted.
7328 * This function is called by the Rx interrupt serivce routine to perform
7329 * some OS related operations on the SKB before passing it to the upper
7330 * layers. It mainly checks if the checksum is OK, if so adds it to the
7331 * SKBs cksum variable, increments the Rx packet count and passes the SKB
7332 * to the upper layer. If the checksum is wrong, it increments the Rx
7333 * packet error count, frees the SKB and returns error.
7335 * SUCCESS on success and -1 on failure.
7337 static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
7339 struct s2io_nic *sp = ring_data->nic;
7340 struct net_device *dev = (struct net_device *) ring_data->dev;
7341 struct sk_buff *skb = (struct sk_buff *)
7342 ((unsigned long) rxdp->Host_Control);
7343 int ring_no = ring_data->ring_no;
7344 u16 l3_csum, l4_csum;
7345 unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
7352 /* Check for parity error */
7354 sp->mac_control.stats_info->sw_stat.parity_err_cnt++;
7356 err_mask = err >> 48;
7359 sp->mac_control.stats_info->sw_stat.
7360 rx_parity_err_cnt++;
7364 sp->mac_control.stats_info->sw_stat.
7369 sp->mac_control.stats_info->sw_stat.
7370 rx_parity_abort_cnt++;
7374 sp->mac_control.stats_info->sw_stat.
7379 sp->mac_control.stats_info->sw_stat.
7384 sp->mac_control.stats_info->sw_stat.
7389 sp->mac_control.stats_info->sw_stat.
7390 rx_buf_size_err_cnt++;
7394 sp->mac_control.stats_info->sw_stat.
7395 rx_rxd_corrupt_cnt++;
7399 sp->mac_control.stats_info->sw_stat.
7404 * Drop the packet if bad transfer code. Exception being
7405 * 0x5, which could be due to unsupported IPv6 extension header.
7406 * In this case, we let stack handle the packet.
7407 * Note that in this case, since checksum will be incorrect,
7408 * stack will validate the same.
7410 if (err_mask != 0x5) {
7411 DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%x\n",
7412 dev->name, err_mask);
7413 sp->stats.rx_crc_errors++;
7414 sp->mac_control.stats_info->sw_stat.mem_freed
7417 ring_data->rx_bufs_left -= 1;
7418 rxdp->Host_Control = 0;
7423 /* Updating statistics */
7424 ring_data->rx_packets++;
7425 rxdp->Host_Control = 0;
7426 if (sp->rxd_mode == RXD_MODE_1) {
7427 int len = RXD_GET_BUFFER0_SIZE_1(rxdp->Control_2);
7429 ring_data->rx_bytes += len;
7432 } else if (sp->rxd_mode == RXD_MODE_3B) {
7433 int get_block = ring_data->rx_curr_get_info.block_index;
7434 int get_off = ring_data->rx_curr_get_info.offset;
7435 int buf0_len = RXD_GET_BUFFER0_SIZE_3(rxdp->Control_2);
7436 int buf2_len = RXD_GET_BUFFER2_SIZE_3(rxdp->Control_2);
7437 unsigned char *buff = skb_push(skb, buf0_len);
7439 struct buffAdd *ba = &ring_data->ba[get_block][get_off];
7440 ring_data->rx_bytes += buf0_len + buf2_len;
7441 memcpy(buff, ba->ba_0, buf0_len);
7442 skb_put(skb, buf2_len);
7445 if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) && ((!ring_data->lro) ||
7446 (ring_data->lro && (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG)))) &&
7448 l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
7449 l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
7450 if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
7452 * NIC verifies if the Checksum of the received
7453 * frame is Ok or not and accordingly returns
7454 * a flag in the RxD.
7456 skb->ip_summed = CHECKSUM_UNNECESSARY;
7457 if (ring_data->lro) {
7462 ret = s2io_club_tcp_session(ring_data,
7463 skb->data, &tcp, &tcp_len, &lro,
7466 case 3: /* Begin anew */
7469 case 1: /* Aggregate */
7471 lro_append_pkt(sp, lro,
7475 case 4: /* Flush session */
7477 lro_append_pkt(sp, lro,
7479 queue_rx_frame(lro->parent,
7481 clear_lro_session(lro);
7482 sp->mac_control.stats_info->
7483 sw_stat.flush_max_pkts++;
7486 case 2: /* Flush both */
7487 lro->parent->data_len =
7489 sp->mac_control.stats_info->
7490 sw_stat.sending_both++;
7491 queue_rx_frame(lro->parent,
7493 clear_lro_session(lro);
7495 case 0: /* sessions exceeded */
7496 case -1: /* non-TCP or not
7500 * First pkt in session not
7501 * L3/L4 aggregatable
7506 "%s: Samadhana!!\n",
7513 * Packet with erroneous checksum, let the
7514 * upper layers deal with it.
7516 skb->ip_summed = CHECKSUM_NONE;
7519 skb->ip_summed = CHECKSUM_NONE;
7521 sp->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
7523 queue_rx_frame(skb, RXD_GET_VLAN_TAG(rxdp->Control_2));
7524 dev->last_rx = jiffies;
7526 sp->mac_control.rings[ring_no].rx_bufs_left -= 1;
7531 * s2io_link - stops/starts the Tx queue.
7532 * @sp : private member of the device structure, which is a pointer to the
7533 * s2io_nic structure.
7534 * @link : inidicates whether link is UP/DOWN.
7536 * This function stops/starts the Tx queue depending on whether the link
7537 * status of the NIC is is down or up. This is called by the Alarm
7538 * interrupt handler whenever a link change interrupt comes up.
7543 static void s2io_link(struct s2io_nic * sp, int link)
7545 struct net_device *dev = (struct net_device *) sp->dev;
7547 if (link != sp->last_link_state) {
7549 if (link == LINK_DOWN) {
7550 DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
7551 s2io_stop_all_tx_queue(sp);
7552 netif_carrier_off(dev);
7553 if(sp->mac_control.stats_info->sw_stat.link_up_cnt)
7554 sp->mac_control.stats_info->sw_stat.link_up_time =
7555 jiffies - sp->start_time;
7556 sp->mac_control.stats_info->sw_stat.link_down_cnt++;
7558 DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
7559 if (sp->mac_control.stats_info->sw_stat.link_down_cnt)
7560 sp->mac_control.stats_info->sw_stat.link_down_time =
7561 jiffies - sp->start_time;
7562 sp->mac_control.stats_info->sw_stat.link_up_cnt++;
7563 netif_carrier_on(dev);
7564 s2io_wake_all_tx_queue(sp);
7567 sp->last_link_state = link;
7568 sp->start_time = jiffies;
7572 * s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
7573 * @sp : private member of the device structure, which is a pointer to the
7574 * s2io_nic structure.
7576 * This function initializes a few of the PCI and PCI-X configuration registers
7577 * with recommended values.
7582 static void s2io_init_pci(struct s2io_nic * sp)
7584 u16 pci_cmd = 0, pcix_cmd = 0;
7586 /* Enable Data Parity Error Recovery in PCI-X command register. */
7587 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7589 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7591 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7594 /* Set the PErr Response bit in PCI command register. */
7595 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
7596 pci_write_config_word(sp->pdev, PCI_COMMAND,
7597 (pci_cmd | PCI_COMMAND_PARITY));
7598 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
7601 static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type,
7604 if ((tx_fifo_num > MAX_TX_FIFOS) ||
7605 (tx_fifo_num < 1)) {
7606 DBG_PRINT(ERR_DBG, "s2io: Requested number of tx fifos "
7607 "(%d) not supported\n", tx_fifo_num);
7609 if (tx_fifo_num < 1)
7612 tx_fifo_num = MAX_TX_FIFOS;
7614 DBG_PRINT(ERR_DBG, "s2io: Default to %d ", tx_fifo_num);
7615 DBG_PRINT(ERR_DBG, "tx fifos\n");
7619 *dev_multiq = multiq;
7621 if (tx_steering_type && (1 == tx_fifo_num)) {
7622 if (tx_steering_type != TX_DEFAULT_STEERING)
7624 "s2io: Tx steering is not supported with "
7625 "one fifo. Disabling Tx steering.\n");
7626 tx_steering_type = NO_STEERING;
7629 if ((tx_steering_type < NO_STEERING) ||
7630 (tx_steering_type > TX_DEFAULT_STEERING)) {
7631 DBG_PRINT(ERR_DBG, "s2io: Requested transmit steering not "
7633 DBG_PRINT(ERR_DBG, "s2io: Disabling transmit steering\n");
7634 tx_steering_type = NO_STEERING;
7637 if (rx_ring_num > MAX_RX_RINGS) {
7638 DBG_PRINT(ERR_DBG, "s2io: Requested number of rx rings not "
7640 DBG_PRINT(ERR_DBG, "s2io: Default to %d rx rings\n",
7642 rx_ring_num = MAX_RX_RINGS;
7645 if ((*dev_intr_type != INTA) && (*dev_intr_type != MSI_X)) {
7646 DBG_PRINT(ERR_DBG, "s2io: Wrong intr_type requested. "
7647 "Defaulting to INTA\n");
7648 *dev_intr_type = INTA;
7651 if ((*dev_intr_type == MSI_X) &&
7652 ((pdev->device != PCI_DEVICE_ID_HERC_WIN) &&
7653 (pdev->device != PCI_DEVICE_ID_HERC_UNI))) {
7654 DBG_PRINT(ERR_DBG, "s2io: Xframe I does not support MSI_X. "
7655 "Defaulting to INTA\n");
7656 *dev_intr_type = INTA;
7659 if ((rx_ring_mode != 1) && (rx_ring_mode != 2)) {
7660 DBG_PRINT(ERR_DBG, "s2io: Requested ring mode not supported\n");
7661 DBG_PRINT(ERR_DBG, "s2io: Defaulting to 1-buffer mode\n");
7668 * rts_ds_steer - Receive traffic steering based on IPv4 or IPv6 TOS
7669 * or Traffic class respectively.
7670 * @nic: device private variable
7671 * Description: The function configures the receive steering to
7672 * desired receive ring.
7673 * Return Value: SUCCESS on success and
7674 * '-1' on failure (endian settings incorrect).
7676 static int rts_ds_steer(struct s2io_nic *nic, u8 ds_codepoint, u8 ring)
7678 struct XENA_dev_config __iomem *bar0 = nic->bar0;
7679 register u64 val64 = 0;
7681 if (ds_codepoint > 63)
7684 val64 = RTS_DS_MEM_DATA(ring);
7685 writeq(val64, &bar0->rts_ds_mem_data);
7687 val64 = RTS_DS_MEM_CTRL_WE |
7688 RTS_DS_MEM_CTRL_STROBE_NEW_CMD |
7689 RTS_DS_MEM_CTRL_OFFSET(ds_codepoint);
7691 writeq(val64, &bar0->rts_ds_mem_ctrl);
7693 return wait_for_cmd_complete(&bar0->rts_ds_mem_ctrl,
7694 RTS_DS_MEM_CTRL_STROBE_CMD_BEING_EXECUTED,
7699 * s2io_init_nic - Initialization of the adapter .
7700 * @pdev : structure containing the PCI related information of the device.
7701 * @pre: List of PCI devices supported by the driver listed in s2io_tbl.
7703 * The function initializes an adapter identified by the pci_dec structure.
7704 * All OS related initialization including memory and device structure and
7705 * initlaization of the device private variable is done. Also the swapper
7706 * control register is initialized to enable read and write into the I/O
7707 * registers of the device.
7709 * returns 0 on success and negative on failure.
7712 static int __devinit
7713 s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7715 struct s2io_nic *sp;
7716 struct net_device *dev;
7718 int dma_flag = FALSE;
7719 u32 mac_up, mac_down;
7720 u64 val64 = 0, tmp64 = 0;
7721 struct XENA_dev_config __iomem *bar0 = NULL;
7723 struct mac_info *mac_control;
7724 struct config_param *config;
7726 u8 dev_intr_type = intr_type;
7728 DECLARE_MAC_BUF(mac);
7730 ret = s2io_verify_parm(pdev, &dev_intr_type, &dev_multiq);
7734 if ((ret = pci_enable_device(pdev))) {
7736 "s2io_init_nic: pci_enable_device failed\n");
7740 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
7741 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 64bit DMA\n");
7743 if (pci_set_consistent_dma_mask
7744 (pdev, DMA_64BIT_MASK)) {
7746 "Unable to obtain 64bit DMA for \
7747 consistent allocations\n");
7748 pci_disable_device(pdev);
7751 } else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
7752 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 32bit DMA\n");
7754 pci_disable_device(pdev);
7757 if ((ret = pci_request_regions(pdev, s2io_driver_name))) {
7758 DBG_PRINT(ERR_DBG, "%s: Request Regions failed - %x \n", __FUNCTION__, ret);
7759 pci_disable_device(pdev);
7763 dev = alloc_etherdev_mq(sizeof(struct s2io_nic), tx_fifo_num);
7765 dev = alloc_etherdev(sizeof(struct s2io_nic));
7767 DBG_PRINT(ERR_DBG, "Device allocation failed\n");
7768 pci_disable_device(pdev);
7769 pci_release_regions(pdev);
7773 pci_set_master(pdev);
7774 pci_set_drvdata(pdev, dev);
7775 SET_NETDEV_DEV(dev, &pdev->dev);
7777 /* Private member variable initialized to s2io NIC structure */
7779 memset(sp, 0, sizeof(struct s2io_nic));
7782 sp->high_dma_flag = dma_flag;
7783 sp->device_enabled_once = FALSE;
7784 if (rx_ring_mode == 1)
7785 sp->rxd_mode = RXD_MODE_1;
7786 if (rx_ring_mode == 2)
7787 sp->rxd_mode = RXD_MODE_3B;
7789 sp->config.intr_type = dev_intr_type;
7791 if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) ||
7792 (pdev->device == PCI_DEVICE_ID_HERC_UNI))
7793 sp->device_type = XFRAME_II_DEVICE;
7795 sp->device_type = XFRAME_I_DEVICE;
7797 sp->lro = lro_enable;
7799 /* Initialize some PCI/PCI-X fields of the NIC. */
7803 * Setting the device configuration parameters.
7804 * Most of these parameters can be specified by the user during
7805 * module insertion as they are module loadable parameters. If
7806 * these parameters are not not specified during load time, they
7807 * are initialized with default values.
7809 mac_control = &sp->mac_control;
7810 config = &sp->config;
7812 config->napi = napi;
7813 config->tx_steering_type = tx_steering_type;
7815 /* Tx side parameters. */
7816 if (config->tx_steering_type == TX_PRIORITY_STEERING)
7817 config->tx_fifo_num = MAX_TX_FIFOS;
7819 config->tx_fifo_num = tx_fifo_num;
7821 /* Initialize the fifos used for tx steering */
7822 if (config->tx_fifo_num < 5) {
7823 if (config->tx_fifo_num == 1)
7824 sp->total_tcp_fifos = 1;
7826 sp->total_tcp_fifos = config->tx_fifo_num - 1;
7827 sp->udp_fifo_idx = config->tx_fifo_num - 1;
7828 sp->total_udp_fifos = 1;
7829 sp->other_fifo_idx = sp->total_tcp_fifos - 1;
7831 sp->total_tcp_fifos = (tx_fifo_num - FIFO_UDP_MAX_NUM -
7832 FIFO_OTHER_MAX_NUM);
7833 sp->udp_fifo_idx = sp->total_tcp_fifos;
7834 sp->total_udp_fifos = FIFO_UDP_MAX_NUM;
7835 sp->other_fifo_idx = sp->udp_fifo_idx + FIFO_UDP_MAX_NUM;
7838 config->multiq = dev_multiq;
7839 for (i = 0; i < config->tx_fifo_num; i++) {
7840 config->tx_cfg[i].fifo_len = tx_fifo_len[i];
7841 config->tx_cfg[i].fifo_priority = i;
7844 /* mapping the QoS priority to the configured fifos */
7845 for (i = 0; i < MAX_TX_FIFOS; i++)
7846 config->fifo_mapping[i] = fifo_map[config->tx_fifo_num - 1][i];
7848 /* map the hashing selector table to the configured fifos */
7849 for (i = 0; i < config->tx_fifo_num; i++)
7850 sp->fifo_selector[i] = fifo_selector[i];
7853 config->tx_intr_type = TXD_INT_TYPE_UTILZ;
7854 for (i = 0; i < config->tx_fifo_num; i++) {
7855 config->tx_cfg[i].f_no_snoop =
7856 (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
7857 if (config->tx_cfg[i].fifo_len < 65) {
7858 config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
7862 /* + 2 because one Txd for skb->data and one Txd for UFO */
7863 config->max_txds = MAX_SKB_FRAGS + 2;
7865 /* Rx side parameters. */
7866 config->rx_ring_num = rx_ring_num;
7867 for (i = 0; i < config->rx_ring_num; i++) {
7868 config->rx_cfg[i].num_rxd = rx_ring_sz[i] *
7869 (rxd_count[sp->rxd_mode] + 1);
7870 config->rx_cfg[i].ring_priority = i;
7871 mac_control->rings[i].rx_bufs_left = 0;
7872 mac_control->rings[i].rxd_mode = sp->rxd_mode;
7873 mac_control->rings[i].rxd_count = rxd_count[sp->rxd_mode];
7874 mac_control->rings[i].pdev = sp->pdev;
7875 mac_control->rings[i].dev = sp->dev;
7878 for (i = 0; i < rx_ring_num; i++) {
7879 config->rx_cfg[i].ring_org = RING_ORG_BUFF1;
7880 config->rx_cfg[i].f_no_snoop =
7881 (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
7884 /* Setting Mac Control parameters */
7885 mac_control->rmac_pause_time = rmac_pause_time;
7886 mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
7887 mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
7890 /* initialize the shared memory used by the NIC and the host */
7891 if (init_shared_mem(sp)) {
7892 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n",
7895 goto mem_alloc_failed;
7898 sp->bar0 = ioremap(pci_resource_start(pdev, 0),
7899 pci_resource_len(pdev, 0));
7901 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem1\n",
7904 goto bar0_remap_failed;
7907 sp->bar1 = ioremap(pci_resource_start(pdev, 2),
7908 pci_resource_len(pdev, 2));
7910 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem2\n",
7913 goto bar1_remap_failed;
7916 dev->irq = pdev->irq;
7917 dev->base_addr = (unsigned long) sp->bar0;
7919 /* Initializing the BAR1 address as the start of the FIFO pointer. */
7920 for (j = 0; j < MAX_TX_FIFOS; j++) {
7921 mac_control->tx_FIFO_start[j] = (struct TxFIFO_element __iomem *)
7922 (sp->bar1 + (j * 0x00020000));
7925 /* Driver entry points */
7926 dev->open = &s2io_open;
7927 dev->stop = &s2io_close;
7928 dev->hard_start_xmit = &s2io_xmit;
7929 dev->get_stats = &s2io_get_stats;
7930 dev->set_multicast_list = &s2io_set_multicast;
7931 dev->do_ioctl = &s2io_ioctl;
7932 dev->set_mac_address = &s2io_set_mac_addr;
7933 dev->change_mtu = &s2io_change_mtu;
7934 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
7935 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
7936 dev->vlan_rx_register = s2io_vlan_rx_register;
7937 dev->vlan_rx_kill_vid = (void *)s2io_vlan_rx_kill_vid;
7940 * will use eth_mac_addr() for dev->set_mac_address
7941 * mac address will be set every time dev->open() is called
7943 #ifdef CONFIG_NET_POLL_CONTROLLER
7944 dev->poll_controller = s2io_netpoll;
7947 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
7948 if (sp->high_dma_flag == TRUE)
7949 dev->features |= NETIF_F_HIGHDMA;
7950 dev->features |= NETIF_F_TSO;
7951 dev->features |= NETIF_F_TSO6;
7952 if ((sp->device_type & XFRAME_II_DEVICE) && (ufo)) {
7953 dev->features |= NETIF_F_UFO;
7954 dev->features |= NETIF_F_HW_CSUM;
7957 dev->features |= NETIF_F_MULTI_QUEUE;
7958 dev->tx_timeout = &s2io_tx_watchdog;
7959 dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
7960 INIT_WORK(&sp->rst_timer_task, s2io_restart_nic);
7961 INIT_WORK(&sp->set_link_task, s2io_set_link);
7963 pci_save_state(sp->pdev);
7965 /* Setting swapper control on the NIC, for proper reset operation */
7966 if (s2io_set_swapper(sp)) {
7967 DBG_PRINT(ERR_DBG, "%s:swapper settings are wrong\n",
7970 goto set_swap_failed;
7973 /* Verify if the Herc works on the slot its placed into */
7974 if (sp->device_type & XFRAME_II_DEVICE) {
7975 mode = s2io_verify_pci_mode(sp);
7977 DBG_PRINT(ERR_DBG, "%s: ", __FUNCTION__);
7978 DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
7980 goto set_swap_failed;
7984 if (sp->config.intr_type == MSI_X) {
7985 sp->num_entries = config->rx_ring_num + 1;
7986 ret = s2io_enable_msi_x(sp);
7989 ret = s2io_test_msi(sp);
7990 /* rollback MSI-X, will re-enable during add_isr() */
7991 remove_msix_isr(sp);
7996 "%s: MSI-X requested but failed to enable\n",
7998 sp->config.intr_type = INTA;
8002 if (config->intr_type == MSI_X) {
8003 for (i = 0; i < config->rx_ring_num ; i++)
8004 netif_napi_add(dev, &mac_control->rings[i].napi,
8005 s2io_poll_msix, 64);
8007 netif_napi_add(dev, &sp->napi, s2io_poll_inta, 64);
8010 /* Not needed for Herc */
8011 if (sp->device_type & XFRAME_I_DEVICE) {
8013 * Fix for all "FFs" MAC address problems observed on
8016 fix_mac_address(sp);
8021 * MAC address initialization.
8022 * For now only one mac address will be read and used.
8025 val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
8026 RMAC_ADDR_CMD_MEM_OFFSET(0 + S2IO_MAC_ADDR_START_OFFSET);
8027 writeq(val64, &bar0->rmac_addr_cmd_mem);
8028 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
8029 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, S2IO_BIT_RESET);
8030 tmp64 = readq(&bar0->rmac_addr_data0_mem);
8031 mac_down = (u32) tmp64;
8032 mac_up = (u32) (tmp64 >> 32);
8034 sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
8035 sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
8036 sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
8037 sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
8038 sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
8039 sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
8041 /* Set the factory defined MAC address initially */
8042 dev->addr_len = ETH_ALEN;
8043 memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
8044 memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
8046 /* initialize number of multicast & unicast MAC entries variables */
8047 if (sp->device_type == XFRAME_I_DEVICE) {
8048 config->max_mc_addr = S2IO_XENA_MAX_MC_ADDRESSES;
8049 config->max_mac_addr = S2IO_XENA_MAX_MAC_ADDRESSES;
8050 config->mc_start_offset = S2IO_XENA_MC_ADDR_START_OFFSET;
8051 } else if (sp->device_type == XFRAME_II_DEVICE) {
8052 config->max_mc_addr = S2IO_HERC_MAX_MC_ADDRESSES;
8053 config->max_mac_addr = S2IO_HERC_MAX_MAC_ADDRESSES;
8054 config->mc_start_offset = S2IO_HERC_MC_ADDR_START_OFFSET;
8057 /* store mac addresses from CAM to s2io_nic structure */
8058 do_s2io_store_unicast_mc(sp);
8060 /* Configure MSIX vector for number of rings configured plus one */
8061 if ((sp->device_type == XFRAME_II_DEVICE) &&
8062 (config->intr_type == MSI_X))
8063 sp->num_entries = config->rx_ring_num + 1;
8065 /* Store the values of the MSIX table in the s2io_nic structure */
8066 store_xmsi_data(sp);
8067 /* reset Nic and bring it to known state */
8071 * Initialize link state flags
8072 * and the card state parameter
8076 /* Initialize spinlocks */
8077 for (i = 0; i < sp->config.tx_fifo_num; i++)
8078 spin_lock_init(&mac_control->fifos[i].tx_lock);
8081 * SXE-002: Configure link and activity LED to init state
8084 subid = sp->pdev->subsystem_device;
8085 if ((subid & 0xFF) >= 0x07) {
8086 val64 = readq(&bar0->gpio_control);
8087 val64 |= 0x0000800000000000ULL;
8088 writeq(val64, &bar0->gpio_control);
8089 val64 = 0x0411040400000000ULL;
8090 writeq(val64, (void __iomem *) bar0 + 0x2700);
8091 val64 = readq(&bar0->gpio_control);
8094 sp->rx_csum = 1; /* Rx chksum verify enabled by default */
8096 if (register_netdev(dev)) {
8097 DBG_PRINT(ERR_DBG, "Device registration failed\n");
8099 goto register_failed;
8102 DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2007 Neterion Inc.\n");
8103 DBG_PRINT(ERR_DBG, "%s: Neterion %s (rev %d)\n",dev->name,
8104 sp->product_name, pdev->revision);
8105 DBG_PRINT(ERR_DBG, "%s: Driver version %s\n", dev->name,
8106 s2io_driver_version);
8107 DBG_PRINT(ERR_DBG, "%s: MAC ADDR: %s\n",
8108 dev->name, print_mac(mac, dev->dev_addr));
8109 DBG_PRINT(ERR_DBG, "SERIAL NUMBER: %s\n", sp->serial_num);
8110 if (sp->device_type & XFRAME_II_DEVICE) {
8111 mode = s2io_print_pci_mode(sp);
8113 DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
8115 unregister_netdev(dev);
8116 goto set_swap_failed;
8119 switch(sp->rxd_mode) {
8121 DBG_PRINT(ERR_DBG, "%s: 1-Buffer receive mode enabled\n",
8125 DBG_PRINT(ERR_DBG, "%s: 2-Buffer receive mode enabled\n",
8130 switch (sp->config.napi) {
8132 DBG_PRINT(ERR_DBG, "%s: NAPI disabled\n", dev->name);
8135 DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name);
8139 DBG_PRINT(ERR_DBG, "%s: Using %d Tx fifo(s)\n", dev->name,
8140 sp->config.tx_fifo_num);
8142 DBG_PRINT(ERR_DBG, "%s: Using %d Rx ring(s)\n", dev->name,
8143 sp->config.rx_ring_num);
8145 switch(sp->config.intr_type) {
8147 DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name);
8150 DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI-X\n", dev->name);
8153 if (sp->config.multiq) {
8154 for (i = 0; i < sp->config.tx_fifo_num; i++)
8155 mac_control->fifos[i].multiq = config->multiq;
8156 DBG_PRINT(ERR_DBG, "%s: Multiqueue support enabled\n",
8159 DBG_PRINT(ERR_DBG, "%s: Multiqueue support disabled\n",
8162 switch (sp->config.tx_steering_type) {
8164 DBG_PRINT(ERR_DBG, "%s: No steering enabled for"
8165 " transmit\n", dev->name);
8167 case TX_PRIORITY_STEERING:
8168 DBG_PRINT(ERR_DBG, "%s: Priority steering enabled for"
8169 " transmit\n", dev->name);
8171 case TX_DEFAULT_STEERING:
8172 DBG_PRINT(ERR_DBG, "%s: Default steering enabled for"
8173 " transmit\n", dev->name);
8177 DBG_PRINT(ERR_DBG, "%s: Large receive offload enabled\n",
8180 DBG_PRINT(ERR_DBG, "%s: UDP Fragmentation Offload(UFO)"
8181 " enabled\n", dev->name);
8182 /* Initialize device name */
8183 sprintf(sp->name, "%s Neterion %s", dev->name, sp->product_name);
8186 * Make Link state as off at this point, when the Link change
8187 * interrupt comes the state will be automatically changed to
8190 netif_carrier_off(dev);
8201 free_shared_mem(sp);
8202 pci_disable_device(pdev);
8203 pci_release_regions(pdev);
8204 pci_set_drvdata(pdev, NULL);
8211 * s2io_rem_nic - Free the PCI device
8212 * @pdev: structure containing the PCI related information of the device.
8213 * Description: This function is called by the Pci subsystem to release a
8214 * PCI device and free up all resource held up by the device. This could
8215 * be in response to a Hot plug event or when the driver is to be removed
8219 static void __devexit s2io_rem_nic(struct pci_dev *pdev)
8221 struct net_device *dev =
8222 (struct net_device *) pci_get_drvdata(pdev);
8223 struct s2io_nic *sp;
8226 DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
8230 flush_scheduled_work();
8233 unregister_netdev(dev);
8235 free_shared_mem(sp);
8238 pci_release_regions(pdev);
8239 pci_set_drvdata(pdev, NULL);
8241 pci_disable_device(pdev);
8245 * s2io_starter - Entry point for the driver
8246 * Description: This function is the entry point for the driver. It verifies
8247 * the module loadable parameters and initializes PCI configuration space.
8250 static int __init s2io_starter(void)
8252 return pci_register_driver(&s2io_driver);
8256 * s2io_closer - Cleanup routine for the driver
8257 * Description: This function is the cleanup routine for the driver. It unregist * ers the driver.
8260 static __exit void s2io_closer(void)
8262 pci_unregister_driver(&s2io_driver);
8263 DBG_PRINT(INIT_DBG, "cleanup done\n");
8266 module_init(s2io_starter);
8267 module_exit(s2io_closer);
8269 static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip,
8270 struct tcphdr **tcp, struct RxD_t *rxdp,
8271 struct s2io_nic *sp)
8274 u8 l2_type = (u8)((rxdp->Control_1 >> 37) & 0x7), ip_len;
8276 if (!(rxdp->Control_1 & RXD_FRAME_PROTO_TCP)) {
8277 DBG_PRINT(INIT_DBG,"%s: Non-TCP frames not supported for LRO\n",
8282 /* Checking for DIX type or DIX type with VLAN */
8284 || (l2_type == 4)) {
8285 ip_off = HEADER_ETHERNET_II_802_3_SIZE;
8287 * If vlan stripping is disabled and the frame is VLAN tagged,
8288 * shift the offset by the VLAN header size bytes.
8290 if ((!vlan_strip_flag) &&
8291 (rxdp->Control_1 & RXD_FRAME_VLAN_TAG))
8292 ip_off += HEADER_VLAN_SIZE;
8294 /* LLC, SNAP etc are considered non-mergeable */
8298 *ip = (struct iphdr *)((u8 *)buffer + ip_off);
8299 ip_len = (u8)((*ip)->ihl);
8301 *tcp = (struct tcphdr *)((unsigned long)*ip + ip_len);
8306 static int check_for_socket_match(struct lro *lro, struct iphdr *ip,
8309 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
8310 if ((lro->iph->saddr != ip->saddr) || (lro->iph->daddr != ip->daddr) ||
8311 (lro->tcph->source != tcp->source) || (lro->tcph->dest != tcp->dest))
8316 static inline int get_l4_pyld_length(struct iphdr *ip, struct tcphdr *tcp)
8318 return(ntohs(ip->tot_len) - (ip->ihl << 2) - (tcp->doff << 2));
8321 static void initiate_new_session(struct lro *lro, u8 *l2h,
8322 struct iphdr *ip, struct tcphdr *tcp, u32 tcp_pyld_len, u16 vlan_tag)
8324 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
8328 lro->tcp_next_seq = tcp_pyld_len + ntohl(tcp->seq);
8329 lro->tcp_ack = tcp->ack_seq;
8331 lro->total_len = ntohs(ip->tot_len);
8333 lro->vlan_tag = vlan_tag;
8335 * check if we saw TCP timestamp. Other consistency checks have
8336 * already been done.
8338 if (tcp->doff == 8) {
8340 ptr = (__be32 *)(tcp+1);
8342 lro->cur_tsval = ntohl(*(ptr+1));
8343 lro->cur_tsecr = *(ptr+2);
8348 static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro)
8350 struct iphdr *ip = lro->iph;
8351 struct tcphdr *tcp = lro->tcph;
8353 struct stat_block *statinfo = sp->mac_control.stats_info;
8354 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
8356 /* Update L3 header */
8357 ip->tot_len = htons(lro->total_len);
8359 nchk = ip_fast_csum((u8 *)lro->iph, ip->ihl);
8362 /* Update L4 header */
8363 tcp->ack_seq = lro->tcp_ack;
8364 tcp->window = lro->window;
8366 /* Update tsecr field if this session has timestamps enabled */
8368 __be32 *ptr = (__be32 *)(tcp + 1);
8369 *(ptr+2) = lro->cur_tsecr;
8372 /* Update counters required for calculation of
8373 * average no. of packets aggregated.
8375 statinfo->sw_stat.sum_avg_pkts_aggregated += lro->sg_num;
8376 statinfo->sw_stat.num_aggregations++;
8379 static void aggregate_new_rx(struct lro *lro, struct iphdr *ip,
8380 struct tcphdr *tcp, u32 l4_pyld)
8382 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
8383 lro->total_len += l4_pyld;
8384 lro->frags_len += l4_pyld;
8385 lro->tcp_next_seq += l4_pyld;
8388 /* Update ack seq no. and window ad(from this pkt) in LRO object */
8389 lro->tcp_ack = tcp->ack_seq;
8390 lro->window = tcp->window;
8394 /* Update tsecr and tsval from this packet */
8395 ptr = (__be32 *)(tcp+1);
8396 lro->cur_tsval = ntohl(*(ptr+1));
8397 lro->cur_tsecr = *(ptr + 2);
8401 static int verify_l3_l4_lro_capable(struct lro *l_lro, struct iphdr *ip,
8402 struct tcphdr *tcp, u32 tcp_pyld_len)
8406 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
8408 if (!tcp_pyld_len) {
8409 /* Runt frame or a pure ack */
8413 if (ip->ihl != 5) /* IP has options */
8416 /* If we see CE codepoint in IP header, packet is not mergeable */
8417 if (INET_ECN_is_ce(ipv4_get_dsfield(ip)))
8420 /* If we see ECE or CWR flags in TCP header, packet is not mergeable */
8421 if (tcp->urg || tcp->psh || tcp->rst || tcp->syn || tcp->fin ||
8422 tcp->ece || tcp->cwr || !tcp->ack) {
8424 * Currently recognize only the ack control word and
8425 * any other control field being set would result in
8426 * flushing the LRO session
8432 * Allow only one TCP timestamp option. Don't aggregate if
8433 * any other options are detected.
8435 if (tcp->doff != 5 && tcp->doff != 8)
8438 if (tcp->doff == 8) {
8439 ptr = (u8 *)(tcp + 1);
8440 while (*ptr == TCPOPT_NOP)
8442 if (*ptr != TCPOPT_TIMESTAMP || *(ptr+1) != TCPOLEN_TIMESTAMP)
8445 /* Ensure timestamp value increases monotonically */
8447 if (l_lro->cur_tsval > ntohl(*((__be32 *)(ptr+2))))
8450 /* timestamp echo reply should be non-zero */
8451 if (*((__be32 *)(ptr+6)) == 0)
8459 s2io_club_tcp_session(struct ring_info *ring_data, u8 *buffer, u8 **tcp,
8460 u32 *tcp_len, struct lro **lro, struct RxD_t *rxdp,
8461 struct s2io_nic *sp)
8464 struct tcphdr *tcph;
8468 if (!(ret = check_L2_lro_capable(buffer, &ip, (struct tcphdr **)tcp,
8470 DBG_PRINT(INFO_DBG,"IP Saddr: %x Daddr: %x\n",
8471 ip->saddr, ip->daddr);
8475 vlan_tag = RXD_GET_VLAN_TAG(rxdp->Control_2);
8476 tcph = (struct tcphdr *)*tcp;
8477 *tcp_len = get_l4_pyld_length(ip, tcph);
8478 for (i=0; i<MAX_LRO_SESSIONS; i++) {
8479 struct lro *l_lro = &ring_data->lro0_n[i];
8480 if (l_lro->in_use) {
8481 if (check_for_socket_match(l_lro, ip, tcph))
8483 /* Sock pair matched */
8486 if ((*lro)->tcp_next_seq != ntohl(tcph->seq)) {
8487 DBG_PRINT(INFO_DBG, "%s:Out of order. expected "
8488 "0x%x, actual 0x%x\n", __FUNCTION__,
8489 (*lro)->tcp_next_seq,
8492 sp->mac_control.stats_info->
8493 sw_stat.outof_sequence_pkts++;
8498 if (!verify_l3_l4_lro_capable(l_lro, ip, tcph,*tcp_len))
8499 ret = 1; /* Aggregate */
8501 ret = 2; /* Flush both */
8507 /* Before searching for available LRO objects,
8508 * check if the pkt is L3/L4 aggregatable. If not
8509 * don't create new LRO session. Just send this
8512 if (verify_l3_l4_lro_capable(NULL, ip, tcph, *tcp_len)) {
8516 for (i=0; i<MAX_LRO_SESSIONS; i++) {
8517 struct lro *l_lro = &ring_data->lro0_n[i];
8518 if (!(l_lro->in_use)) {
8520 ret = 3; /* Begin anew */
8526 if (ret == 0) { /* sessions exceeded */
8527 DBG_PRINT(INFO_DBG,"%s:All LRO sessions already in use\n",
8535 initiate_new_session(*lro, buffer, ip, tcph, *tcp_len,
8539 update_L3L4_header(sp, *lro);
8542 aggregate_new_rx(*lro, ip, tcph, *tcp_len);
8543 if ((*lro)->sg_num == sp->lro_max_aggr_per_sess) {
8544 update_L3L4_header(sp, *lro);
8545 ret = 4; /* Flush the LRO */
8549 DBG_PRINT(ERR_DBG,"%s:Dont know, can't say!!\n",
8557 static void clear_lro_session(struct lro *lro)
8559 static u16 lro_struct_size = sizeof(struct lro);
8561 memset(lro, 0, lro_struct_size);
8564 static void queue_rx_frame(struct sk_buff *skb, u16 vlan_tag)
8566 struct net_device *dev = skb->dev;
8567 struct s2io_nic *sp = dev->priv;
8569 skb->protocol = eth_type_trans(skb, dev);
8570 if (sp->vlgrp && vlan_tag
8571 && (vlan_strip_flag)) {
8572 /* Queueing the vlan frame to the upper layer */
8573 if (sp->config.napi)
8574 vlan_hwaccel_receive_skb(skb, sp->vlgrp, vlan_tag);
8576 vlan_hwaccel_rx(skb, sp->vlgrp, vlan_tag);
8578 if (sp->config.napi)
8579 netif_receive_skb(skb);
8585 static void lro_append_pkt(struct s2io_nic *sp, struct lro *lro,
8586 struct sk_buff *skb,
8589 struct sk_buff *first = lro->parent;
8591 first->len += tcp_len;
8592 first->data_len = lro->frags_len;
8593 skb_pull(skb, (skb->len - tcp_len));
8594 if (skb_shinfo(first)->frag_list)
8595 lro->last_frag->next = skb;
8597 skb_shinfo(first)->frag_list = skb;
8598 first->truesize += skb->truesize;
8599 lro->last_frag = skb;
8600 sp->mac_control.stats_info->sw_stat.clubbed_frms_cnt++;
8605 * s2io_io_error_detected - called when PCI error is detected
8606 * @pdev: Pointer to PCI device
8607 * @state: The current pci connection state
8609 * This function is called after a PCI bus error affecting
8610 * this device has been detected.
8612 static pci_ers_result_t s2io_io_error_detected(struct pci_dev *pdev,
8613 pci_channel_state_t state)
8615 struct net_device *netdev = pci_get_drvdata(pdev);
8616 struct s2io_nic *sp = netdev->priv;
8618 netif_device_detach(netdev);
8620 if (netif_running(netdev)) {
8621 /* Bring down the card, while avoiding PCI I/O */
8622 do_s2io_card_down(sp, 0);
8624 pci_disable_device(pdev);
8626 return PCI_ERS_RESULT_NEED_RESET;
8630 * s2io_io_slot_reset - called after the pci bus has been reset.
8631 * @pdev: Pointer to PCI device
8633 * Restart the card from scratch, as if from a cold-boot.
8634 * At this point, the card has exprienced a hard reset,
8635 * followed by fixups by BIOS, and has its config space
8636 * set up identically to what it was at cold boot.
8638 static pci_ers_result_t s2io_io_slot_reset(struct pci_dev *pdev)
8640 struct net_device *netdev = pci_get_drvdata(pdev);
8641 struct s2io_nic *sp = netdev->priv;
8643 if (pci_enable_device(pdev)) {
8644 printk(KERN_ERR "s2io: "
8645 "Cannot re-enable PCI device after reset.\n");
8646 return PCI_ERS_RESULT_DISCONNECT;
8649 pci_set_master(pdev);
8652 return PCI_ERS_RESULT_RECOVERED;
8656 * s2io_io_resume - called when traffic can start flowing again.
8657 * @pdev: Pointer to PCI device
8659 * This callback is called when the error recovery driver tells
8660 * us that its OK to resume normal operation.
8662 static void s2io_io_resume(struct pci_dev *pdev)
8664 struct net_device *netdev = pci_get_drvdata(pdev);
8665 struct s2io_nic *sp = netdev->priv;
8667 if (netif_running(netdev)) {
8668 if (s2io_card_up(sp)) {
8669 printk(KERN_ERR "s2io: "
8670 "Can't bring device back up after reset.\n");
8674 if (s2io_set_mac_addr(netdev, netdev->dev_addr) == FAILURE) {
8676 printk(KERN_ERR "s2io: "
8677 "Can't resetore mac addr after reset.\n");
8682 netif_device_attach(netdev);
8683 netif_wake_queue(netdev);