1 /************************************************************************
2 * s2io.c: A Linux PCI-X Ethernet driver for S2IO 10GbE Server NIC
3 * Copyright(c) 2002-2005 Neterion Inc.
5 * This software may be used and distributed according to the terms of
6 * the GNU General Public License (GPL), incorporated herein by reference.
7 * Drivers based on or derived from this code fall under the GPL and must
8 * retain the authorship, copyright and license notice. This file is not
9 * a complete program and may only be used when the entire operating
10 * system is licensed under the GPL.
11 * See the file COPYING in this distribution for more information.
14 * Jeff Garzik : For pointing out the improper error condition
15 * check in the s2io_xmit routine and also some
16 * issues in the Tx watch dog function. Also for
17 * patiently answering all those innumerable
18 * questions regaring the 2.6 porting issues.
19 * Stephen Hemminger : Providing proper 2.6 porting mechanism for some
20 * macros available only in 2.6 Kernel.
21 * Francois Romieu : For pointing out all code part that were
22 * deprecated and also styling related comments.
23 * Grant Grundler : For helping me get rid of some Architecture
25 * Christopher Hellwig : Some more 2.6 specific issues in the driver.
27 * The module loadable parameters that are supported by the driver and a brief
28 * explaination of all the variables.
29 * rx_ring_num : This can be used to program the number of receive rings used
31 * rx_ring_len: This defines the number of descriptors each ring can have. This
32 * is also an array of size 8.
33 * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
34 * tx_fifo_len: This too is an array of 8. Each element defines the number of
35 * Tx descriptors that can be associated with each corresponding FIFO.
36 ************************************************************************/
38 #include <linux/config.h>
39 #include <linux/module.h>
40 #include <linux/types.h>
41 #include <linux/errno.h>
42 #include <linux/ioport.h>
43 #include <linux/pci.h>
44 #include <linux/dma-mapping.h>
45 #include <linux/kernel.h>
46 #include <linux/netdevice.h>
47 #include <linux/etherdevice.h>
48 #include <linux/skbuff.h>
49 #include <linux/init.h>
50 #include <linux/delay.h>
51 #include <linux/stddef.h>
52 #include <linux/ioctl.h>
53 #include <linux/timex.h>
54 #include <linux/sched.h>
55 #include <linux/ethtool.h>
56 #include <linux/version.h>
57 #include <linux/workqueue.h>
58 #include <linux/if_vlan.h>
60 #include <asm/system.h>
61 #include <asm/uaccess.h>
66 #include "s2io-regs.h"
68 /* S2io Driver name & version. */
69 static char s2io_driver_name[] = "Neterion";
70 static char s2io_driver_version[] = "Version 2.0.2.0";
72 static inline int RXD_IS_UP2DT(RxD_t *rxdp)
76 ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
77 (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
83 * Cards with following subsystem_id have a link state indication
84 * problem, 600B, 600C, 600D, 640B, 640C and 640D.
85 * macro below identifies these cards given the subsystem_id.
87 #define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid) \
88 (dev_type == XFRAME_I_DEVICE) ? \
89 ((((subid >= 0x600B) && (subid <= 0x600D)) || \
90 ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
92 #define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
93 ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
94 #define TASKLET_IN_USE test_and_set_bit(0, (&sp->tasklet_status))
97 static inline int rx_buffer_level(nic_t * sp, int rxb_size, int ring)
100 mac_info_t *mac_control;
102 mac_control = &sp->mac_control;
103 if ((mac_control->rings[ring].pkt_cnt - rxb_size) > 16) {
105 if (rxb_size <= MAX_RXDS_PER_BLOCK) {
113 /* Ethtool related variables and Macros. */
114 static char s2io_gstrings[][ETH_GSTRING_LEN] = {
115 "Register test\t(offline)",
116 "Eeprom test\t(offline)",
117 "Link test\t(online)",
118 "RLDRAM test\t(offline)",
119 "BIST Test\t(offline)"
122 static char ethtool_stats_keys[][ETH_GSTRING_LEN] = {
124 {"tmac_data_octets"},
128 {"tmac_pause_ctrl_frms"},
129 {"tmac_any_err_frms"},
130 {"tmac_vld_ip_octets"},
138 {"rmac_data_octets"},
139 {"rmac_fcs_err_frms"},
141 {"rmac_vld_mcst_frms"},
142 {"rmac_vld_bcst_frms"},
143 {"rmac_in_rng_len_err_frms"},
145 {"rmac_pause_ctrl_frms"},
146 {"rmac_discarded_frms"},
147 {"rmac_usized_frms"},
148 {"rmac_osized_frms"},
150 {"rmac_jabber_frms"},
158 {"rmac_err_drp_udp"},
160 {"rmac_accepted_ip"},
162 {"\n DRIVER STATISTICS"},
163 {"single_bit_ecc_errs"},
164 {"double_bit_ecc_errs"},
167 #define S2IO_STAT_LEN sizeof(ethtool_stats_keys)/ ETH_GSTRING_LEN
168 #define S2IO_STAT_STRINGS_LEN S2IO_STAT_LEN * ETH_GSTRING_LEN
170 #define S2IO_TEST_LEN sizeof(s2io_gstrings) / ETH_GSTRING_LEN
171 #define S2IO_STRINGS_LEN S2IO_TEST_LEN * ETH_GSTRING_LEN
173 #define S2IO_TIMER_CONF(timer, handle, arg, exp) \
174 init_timer(&timer); \
175 timer.function = handle; \
176 timer.data = (unsigned long) arg; \
177 mod_timer(&timer, (jiffies + exp)) \
180 static void s2io_vlan_rx_register(struct net_device *dev,
181 struct vlan_group *grp)
183 nic_t *nic = dev->priv;
186 spin_lock_irqsave(&nic->tx_lock, flags);
188 spin_unlock_irqrestore(&nic->tx_lock, flags);
191 /* Unregister the vlan */
192 static void s2io_vlan_rx_kill_vid(struct net_device *dev, unsigned long vid)
194 nic_t *nic = dev->priv;
197 spin_lock_irqsave(&nic->tx_lock, flags);
199 nic->vlgrp->vlan_devices[vid] = NULL;
200 spin_unlock_irqrestore(&nic->tx_lock, flags);
204 * Constants to be programmed into the Xena's registers, to configure
208 #define SWITCH_SIGN 0xA5A5A5A5A5A5A5A5ULL
211 static u64 herc_act_dtx_cfg[] = {
213 0x80000515BA750000ULL, 0x80000515BA7500E0ULL,
215 0x80000515BA750004ULL, 0x80000515BA7500E4ULL,
217 0x80010515003F0000ULL, 0x80010515003F00E0ULL,
219 0x80010515003F0004ULL, 0x80010515003F00E4ULL,
221 0x80020515F2100000ULL, 0x80020515F21000E0ULL,
223 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
228 static u64 xena_mdio_cfg[] = {
230 0xC001010000000000ULL, 0xC0010100000000E0ULL,
231 0xC0010100008000E4ULL,
232 /* Remove Reset from PMA PLL */
233 0xC001010000000000ULL, 0xC0010100000000E0ULL,
234 0xC0010100000000E4ULL,
238 static u64 xena_dtx_cfg[] = {
239 0x8000051500000000ULL, 0x80000515000000E0ULL,
240 0x80000515D93500E4ULL, 0x8001051500000000ULL,
241 0x80010515000000E0ULL, 0x80010515001E00E4ULL,
242 0x8002051500000000ULL, 0x80020515000000E0ULL,
243 0x80020515F21000E4ULL,
244 /* Set PADLOOPBACKN */
245 0x8002051500000000ULL, 0x80020515000000E0ULL,
246 0x80020515B20000E4ULL, 0x8003051500000000ULL,
247 0x80030515000000E0ULL, 0x80030515B20000E4ULL,
248 0x8004051500000000ULL, 0x80040515000000E0ULL,
249 0x80040515B20000E4ULL, 0x8005051500000000ULL,
250 0x80050515000000E0ULL, 0x80050515B20000E4ULL,
252 /* Remove PADLOOPBACKN */
253 0x8002051500000000ULL, 0x80020515000000E0ULL,
254 0x80020515F20000E4ULL, 0x8003051500000000ULL,
255 0x80030515000000E0ULL, 0x80030515F20000E4ULL,
256 0x8004051500000000ULL, 0x80040515000000E0ULL,
257 0x80040515F20000E4ULL, 0x8005051500000000ULL,
258 0x80050515000000E0ULL, 0x80050515F20000E4ULL,
263 * Constants for Fixing the MacAddress problem seen mostly on
266 static u64 fix_mac[] = {
267 0x0060000000000000ULL, 0x0060600000000000ULL,
268 0x0040600000000000ULL, 0x0000600000000000ULL,
269 0x0020600000000000ULL, 0x0060600000000000ULL,
270 0x0020600000000000ULL, 0x0060600000000000ULL,
271 0x0020600000000000ULL, 0x0060600000000000ULL,
272 0x0020600000000000ULL, 0x0060600000000000ULL,
273 0x0020600000000000ULL, 0x0060600000000000ULL,
274 0x0020600000000000ULL, 0x0060600000000000ULL,
275 0x0020600000000000ULL, 0x0060600000000000ULL,
276 0x0020600000000000ULL, 0x0060600000000000ULL,
277 0x0020600000000000ULL, 0x0060600000000000ULL,
278 0x0020600000000000ULL, 0x0060600000000000ULL,
279 0x0020600000000000ULL, 0x0000600000000000ULL,
280 0x0040600000000000ULL, 0x0060600000000000ULL,
284 /* Module Loadable parameters. */
285 static unsigned int tx_fifo_num = 1;
286 static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
287 {[0 ...(MAX_TX_FIFOS - 1)] = 0 };
288 static unsigned int rx_ring_num = 1;
289 static unsigned int rx_ring_sz[MAX_RX_RINGS] =
290 {[0 ...(MAX_RX_RINGS - 1)] = 0 };
291 static unsigned int rts_frm_len[MAX_RX_RINGS] =
292 {[0 ...(MAX_RX_RINGS - 1)] = 0 };
293 static unsigned int use_continuous_tx_intrs = 1;
294 static unsigned int rmac_pause_time = 65535;
295 static unsigned int mc_pause_threshold_q0q3 = 187;
296 static unsigned int mc_pause_threshold_q4q7 = 187;
297 static unsigned int shared_splits;
298 static unsigned int tmac_util_period = 5;
299 static unsigned int rmac_util_period = 5;
300 static unsigned int bimodal = 0;
301 #ifndef CONFIG_S2IO_NAPI
302 static unsigned int indicate_max_pkts;
307 * This table lists all the devices that this driver supports.
309 static struct pci_device_id s2io_tbl[] __devinitdata = {
310 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
311 PCI_ANY_ID, PCI_ANY_ID},
312 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
313 PCI_ANY_ID, PCI_ANY_ID},
314 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
315 PCI_ANY_ID, PCI_ANY_ID},
316 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
317 PCI_ANY_ID, PCI_ANY_ID},
321 MODULE_DEVICE_TABLE(pci, s2io_tbl);
323 static struct pci_driver s2io_driver = {
325 .id_table = s2io_tbl,
326 .probe = s2io_init_nic,
327 .remove = __devexit_p(s2io_rem_nic),
330 /* A simplifier macro used both by init and free shared_mem Fns(). */
331 #define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
334 * init_shared_mem - Allocation and Initialization of Memory
335 * @nic: Device private variable.
336 * Description: The function allocates all the memory areas shared
337 * between the NIC and the driver. This includes Tx descriptors,
338 * Rx descriptors and the statistics block.
341 static int init_shared_mem(struct s2io_nic *nic)
344 void *tmp_v_addr, *tmp_v_addr_next;
345 dma_addr_t tmp_p_addr, tmp_p_addr_next;
346 RxD_block_t *pre_rxd_blk = NULL;
347 int i, j, blk_cnt, rx_sz, tx_sz;
348 int lst_size, lst_per_page;
349 struct net_device *dev = nic->dev;
350 #ifdef CONFIG_2BUFF_MODE
355 mac_info_t *mac_control;
356 struct config_param *config;
358 mac_control = &nic->mac_control;
359 config = &nic->config;
362 /* Allocation and initialization of TXDLs in FIOFs */
364 for (i = 0; i < config->tx_fifo_num; i++) {
365 size += config->tx_cfg[i].fifo_len;
367 if (size > MAX_AVAILABLE_TXDS) {
368 DBG_PRINT(ERR_DBG, "%s: Total number of Tx FIFOs ",
370 DBG_PRINT(ERR_DBG, "exceeds the maximum value ");
371 DBG_PRINT(ERR_DBG, "that can be used\n");
375 lst_size = (sizeof(TxD_t) * config->max_txds);
376 tx_sz = lst_size * size;
377 lst_per_page = PAGE_SIZE / lst_size;
379 for (i = 0; i < config->tx_fifo_num; i++) {
380 int fifo_len = config->tx_cfg[i].fifo_len;
381 int list_holder_size = fifo_len * sizeof(list_info_hold_t);
382 mac_control->fifos[i].list_info = kmalloc(list_holder_size,
384 if (!mac_control->fifos[i].list_info) {
386 "Malloc failed for list_info\n");
389 memset(mac_control->fifos[i].list_info, 0, list_holder_size);
391 for (i = 0; i < config->tx_fifo_num; i++) {
392 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
394 mac_control->fifos[i].tx_curr_put_info.offset = 0;
395 mac_control->fifos[i].tx_curr_put_info.fifo_len =
396 config->tx_cfg[i].fifo_len - 1;
397 mac_control->fifos[i].tx_curr_get_info.offset = 0;
398 mac_control->fifos[i].tx_curr_get_info.fifo_len =
399 config->tx_cfg[i].fifo_len - 1;
400 mac_control->fifos[i].fifo_no = i;
401 mac_control->fifos[i].nic = nic;
402 mac_control->fifos[i].max_txds = MAX_SKB_FRAGS;
404 for (j = 0; j < page_num; j++) {
408 tmp_v = pci_alloc_consistent(nic->pdev,
412 "pci_alloc_consistent ");
413 DBG_PRINT(ERR_DBG, "failed for TxDL\n");
416 while (k < lst_per_page) {
417 int l = (j * lst_per_page) + k;
418 if (l == config->tx_cfg[i].fifo_len)
420 mac_control->fifos[i].list_info[l].list_virt_addr =
421 tmp_v + (k * lst_size);
422 mac_control->fifos[i].list_info[l].list_phy_addr =
423 tmp_p + (k * lst_size);
429 /* Allocation and initialization of RXDs in Rings */
431 for (i = 0; i < config->rx_ring_num; i++) {
432 if (config->rx_cfg[i].num_rxd % (MAX_RXDS_PER_BLOCK + 1)) {
433 DBG_PRINT(ERR_DBG, "%s: RxD count of ", dev->name);
434 DBG_PRINT(ERR_DBG, "Ring%d is not a multiple of ",
436 DBG_PRINT(ERR_DBG, "RxDs per Block");
439 size += config->rx_cfg[i].num_rxd;
440 mac_control->rings[i].block_count =
441 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
442 mac_control->rings[i].pkt_cnt =
443 config->rx_cfg[i].num_rxd - mac_control->rings[i].block_count;
445 size = (size * (sizeof(RxD_t)));
448 for (i = 0; i < config->rx_ring_num; i++) {
449 mac_control->rings[i].rx_curr_get_info.block_index = 0;
450 mac_control->rings[i].rx_curr_get_info.offset = 0;
451 mac_control->rings[i].rx_curr_get_info.ring_len =
452 config->rx_cfg[i].num_rxd - 1;
453 mac_control->rings[i].rx_curr_put_info.block_index = 0;
454 mac_control->rings[i].rx_curr_put_info.offset = 0;
455 mac_control->rings[i].rx_curr_put_info.ring_len =
456 config->rx_cfg[i].num_rxd - 1;
457 mac_control->rings[i].nic = nic;
458 mac_control->rings[i].ring_no = i;
461 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
462 /* Allocating all the Rx blocks */
463 for (j = 0; j < blk_cnt; j++) {
464 #ifndef CONFIG_2BUFF_MODE
465 size = (MAX_RXDS_PER_BLOCK + 1) * (sizeof(RxD_t));
467 size = SIZE_OF_BLOCK;
469 tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
471 if (tmp_v_addr == NULL) {
473 * In case of failure, free_shared_mem()
474 * is called, which should free any
475 * memory that was alloced till the
478 mac_control->rings[i].rx_blocks[j].block_virt_addr =
482 memset(tmp_v_addr, 0, size);
483 mac_control->rings[i].rx_blocks[j].block_virt_addr =
485 mac_control->rings[i].rx_blocks[j].block_dma_addr =
488 /* Interlinking all Rx Blocks */
489 for (j = 0; j < blk_cnt; j++) {
491 mac_control->rings[i].rx_blocks[j].block_virt_addr;
493 mac_control->rings[i].rx_blocks[(j + 1) %
494 blk_cnt].block_virt_addr;
496 mac_control->rings[i].rx_blocks[j].block_dma_addr;
498 mac_control->rings[i].rx_blocks[(j + 1) %
499 blk_cnt].block_dma_addr;
501 pre_rxd_blk = (RxD_block_t *) tmp_v_addr;
502 pre_rxd_blk->reserved_1 = END_OF_BLOCK; /* last RxD
505 #ifndef CONFIG_2BUFF_MODE
506 pre_rxd_blk->reserved_2_pNext_RxD_block =
507 (unsigned long) tmp_v_addr_next;
509 pre_rxd_blk->pNext_RxD_Blk_physical =
510 (u64) tmp_p_addr_next;
514 #ifdef CONFIG_2BUFF_MODE
516 * Allocation of Storages for buffer addresses in 2BUFF mode
517 * and the buffers as well.
519 for (i = 0; i < config->rx_ring_num; i++) {
521 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
522 mac_control->rings[i].ba = kmalloc((sizeof(buffAdd_t *) * blk_cnt),
524 if (!mac_control->rings[i].ba)
526 for (j = 0; j < blk_cnt; j++) {
528 mac_control->rings[i].ba[j] = kmalloc((sizeof(buffAdd_t) *
529 (MAX_RXDS_PER_BLOCK + 1)),
531 if (!mac_control->rings[i].ba[j])
533 while (k != MAX_RXDS_PER_BLOCK) {
534 ba = &mac_control->rings[i].ba[j][k];
536 ba->ba_0_org = (void *) kmalloc
537 (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL);
540 tmp = (u64) ba->ba_0_org;
542 tmp &= ~((u64) ALIGN_SIZE);
543 ba->ba_0 = (void *) tmp;
545 ba->ba_1_org = (void *) kmalloc
546 (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL);
549 tmp = (u64) ba->ba_1_org;
551 tmp &= ~((u64) ALIGN_SIZE);
552 ba->ba_1 = (void *) tmp;
559 /* Allocation and initialization of Statistics block */
560 size = sizeof(StatInfo_t);
561 mac_control->stats_mem = pci_alloc_consistent
562 (nic->pdev, size, &mac_control->stats_mem_phy);
564 if (!mac_control->stats_mem) {
566 * In case of failure, free_shared_mem() is called, which
567 * should free any memory that was alloced till the
572 mac_control->stats_mem_sz = size;
574 tmp_v_addr = mac_control->stats_mem;
575 mac_control->stats_info = (StatInfo_t *) tmp_v_addr;
576 memset(tmp_v_addr, 0, size);
577 DBG_PRINT(INIT_DBG, "%s:Ring Mem PHY: 0x%llx\n", dev->name,
578 (unsigned long long) tmp_p_addr);
584 * free_shared_mem - Free the allocated Memory
585 * @nic: Device private variable.
586 * Description: This function is to free all memory locations allocated by
587 * the init_shared_mem() function and return it to the kernel.
590 static void free_shared_mem(struct s2io_nic *nic)
592 int i, j, blk_cnt, size;
594 dma_addr_t tmp_p_addr;
595 mac_info_t *mac_control;
596 struct config_param *config;
597 int lst_size, lst_per_page;
603 mac_control = &nic->mac_control;
604 config = &nic->config;
606 lst_size = (sizeof(TxD_t) * config->max_txds);
607 lst_per_page = PAGE_SIZE / lst_size;
609 for (i = 0; i < config->tx_fifo_num; i++) {
610 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
612 for (j = 0; j < page_num; j++) {
613 int mem_blks = (j * lst_per_page);
614 if (!mac_control->fifos[i].list_info[mem_blks].
617 pci_free_consistent(nic->pdev, PAGE_SIZE,
618 mac_control->fifos[i].
621 mac_control->fifos[i].
625 kfree(mac_control->fifos[i].list_info);
628 #ifndef CONFIG_2BUFF_MODE
629 size = (MAX_RXDS_PER_BLOCK + 1) * (sizeof(RxD_t));
631 size = SIZE_OF_BLOCK;
633 for (i = 0; i < config->rx_ring_num; i++) {
634 blk_cnt = mac_control->rings[i].block_count;
635 for (j = 0; j < blk_cnt; j++) {
636 tmp_v_addr = mac_control->rings[i].rx_blocks[j].
638 tmp_p_addr = mac_control->rings[i].rx_blocks[j].
640 if (tmp_v_addr == NULL)
642 pci_free_consistent(nic->pdev, size,
643 tmp_v_addr, tmp_p_addr);
647 #ifdef CONFIG_2BUFF_MODE
648 /* Freeing buffer storage addresses in 2BUFF mode. */
649 for (i = 0; i < config->rx_ring_num; i++) {
651 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
652 for (j = 0; j < blk_cnt; j++) {
654 if (!mac_control->rings[i].ba[j])
656 while (k != MAX_RXDS_PER_BLOCK) {
657 buffAdd_t *ba = &mac_control->rings[i].ba[j][k];
662 kfree(mac_control->rings[i].ba[j]);
664 if (mac_control->rings[i].ba)
665 kfree(mac_control->rings[i].ba);
669 if (mac_control->stats_mem) {
670 pci_free_consistent(nic->pdev,
671 mac_control->stats_mem_sz,
672 mac_control->stats_mem,
673 mac_control->stats_mem_phy);
678 * s2io_verify_pci_mode -
681 static int s2io_verify_pci_mode(nic_t *nic)
683 XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0;
684 register u64 val64 = 0;
687 val64 = readq(&bar0->pci_mode);
688 mode = (u8)GET_PCI_MODE(val64);
690 if ( val64 & PCI_MODE_UNKNOWN_MODE)
691 return -1; /* Unknown PCI mode */
697 * s2io_print_pci_mode -
699 static int s2io_print_pci_mode(nic_t *nic)
701 XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0;
702 register u64 val64 = 0;
704 struct config_param *config = &nic->config;
706 val64 = readq(&bar0->pci_mode);
707 mode = (u8)GET_PCI_MODE(val64);
709 if ( val64 & PCI_MODE_UNKNOWN_MODE)
710 return -1; /* Unknown PCI mode */
712 if (val64 & PCI_MODE_32_BITS) {
713 DBG_PRINT(ERR_DBG, "%s: Device is on 32 bit ", nic->dev->name);
715 DBG_PRINT(ERR_DBG, "%s: Device is on 64 bit ", nic->dev->name);
719 case PCI_MODE_PCI_33:
720 DBG_PRINT(ERR_DBG, "33MHz PCI bus\n");
721 config->bus_speed = 33;
723 case PCI_MODE_PCI_66:
724 DBG_PRINT(ERR_DBG, "66MHz PCI bus\n");
725 config->bus_speed = 133;
727 case PCI_MODE_PCIX_M1_66:
728 DBG_PRINT(ERR_DBG, "66MHz PCIX(M1) bus\n");
729 config->bus_speed = 133; /* Herc doubles the clock rate */
731 case PCI_MODE_PCIX_M1_100:
732 DBG_PRINT(ERR_DBG, "100MHz PCIX(M1) bus\n");
733 config->bus_speed = 200;
735 case PCI_MODE_PCIX_M1_133:
736 DBG_PRINT(ERR_DBG, "133MHz PCIX(M1) bus\n");
737 config->bus_speed = 266;
739 case PCI_MODE_PCIX_M2_66:
740 DBG_PRINT(ERR_DBG, "133MHz PCIX(M2) bus\n");
741 config->bus_speed = 133;
743 case PCI_MODE_PCIX_M2_100:
744 DBG_PRINT(ERR_DBG, "200MHz PCIX(M2) bus\n");
745 config->bus_speed = 200;
747 case PCI_MODE_PCIX_M2_133:
748 DBG_PRINT(ERR_DBG, "266MHz PCIX(M2) bus\n");
749 config->bus_speed = 266;
752 return -1; /* Unsupported bus speed */
759 * init_nic - Initialization of hardware
760 * @nic: device peivate variable
761 * Description: The function sequentially configures every block
762 * of the H/W from their reset values.
763 * Return Value: SUCCESS on success and
764 * '-1' on failure (endian settings incorrect).
767 static int init_nic(struct s2io_nic *nic)
769 XENA_dev_config_t __iomem *bar0 = nic->bar0;
770 struct net_device *dev = nic->dev;
771 register u64 val64 = 0;
775 mac_info_t *mac_control;
776 struct config_param *config;
777 int mdio_cnt = 0, dtx_cnt = 0;
778 unsigned long long mem_share;
781 mac_control = &nic->mac_control;
782 config = &nic->config;
784 /* to set the swapper controle on the card */
785 if(s2io_set_swapper(nic)) {
786 DBG_PRINT(ERR_DBG,"ERROR: Setting Swapper failed\n");
791 * Herc requires EOI to be removed from reset before XGXS, so..
793 if (nic->device_type & XFRAME_II_DEVICE) {
794 val64 = 0xA500000000ULL;
795 writeq(val64, &bar0->sw_reset);
797 val64 = readq(&bar0->sw_reset);
800 /* Remove XGXS from reset state */
802 writeq(val64, &bar0->sw_reset);
804 val64 = readq(&bar0->sw_reset);
806 /* Enable Receiving broadcasts */
807 add = &bar0->mac_cfg;
808 val64 = readq(&bar0->mac_cfg);
809 val64 |= MAC_RMAC_BCAST_ENABLE;
810 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
811 writel((u32) val64, add);
812 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
813 writel((u32) (val64 >> 32), (add + 4));
815 /* Read registers in all blocks */
816 val64 = readq(&bar0->mac_int_mask);
817 val64 = readq(&bar0->mc_int_mask);
818 val64 = readq(&bar0->xgxs_int_mask);
822 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
825 * Configuring the XAUI Interface of Xena.
826 * ***************************************
827 * To Configure the Xena's XAUI, one has to write a series
828 * of 64 bit values into two registers in a particular
829 * sequence. Hence a macro 'SWITCH_SIGN' has been defined
830 * which will be defined in the array of configuration values
831 * (xena_dtx_cfg & xena_mdio_cfg) at appropriate places
832 * to switch writing from one regsiter to another. We continue
833 * writing these values until we encounter the 'END_SIGN' macro.
834 * For example, After making a series of 21 writes into
835 * dtx_control register the 'SWITCH_SIGN' appears and hence we
836 * start writing into mdio_control until we encounter END_SIGN.
838 if (nic->device_type & XFRAME_II_DEVICE) {
839 while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
840 SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
841 &bar0->dtx_control, UF);
843 msleep(1); /* Necessary!! */
849 while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
850 if (xena_dtx_cfg[dtx_cnt] == SWITCH_SIGN) {
854 SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
855 &bar0->dtx_control, UF);
856 val64 = readq(&bar0->dtx_control);
860 while (xena_mdio_cfg[mdio_cnt] != END_SIGN) {
861 if (xena_mdio_cfg[mdio_cnt] == SWITCH_SIGN) {
865 SPECIAL_REG_WRITE(xena_mdio_cfg[mdio_cnt],
866 &bar0->mdio_control, UF);
867 val64 = readq(&bar0->mdio_control);
870 if ((xena_dtx_cfg[dtx_cnt] == END_SIGN) &&
871 (xena_mdio_cfg[mdio_cnt] == END_SIGN)) {
879 /* Tx DMA Initialization */
881 writeq(val64, &bar0->tx_fifo_partition_0);
882 writeq(val64, &bar0->tx_fifo_partition_1);
883 writeq(val64, &bar0->tx_fifo_partition_2);
884 writeq(val64, &bar0->tx_fifo_partition_3);
887 for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
889 vBIT(config->tx_cfg[i].fifo_len - 1, ((i * 32) + 19),
890 13) | vBIT(config->tx_cfg[i].fifo_priority,
893 if (i == (config->tx_fifo_num - 1)) {
900 writeq(val64, &bar0->tx_fifo_partition_0);
904 writeq(val64, &bar0->tx_fifo_partition_1);
908 writeq(val64, &bar0->tx_fifo_partition_2);
912 writeq(val64, &bar0->tx_fifo_partition_3);
917 /* Enable Tx FIFO partition 0. */
918 val64 = readq(&bar0->tx_fifo_partition_0);
919 val64 |= BIT(0); /* To enable the FIFO partition. */
920 writeq(val64, &bar0->tx_fifo_partition_0);
923 * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
924 * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
926 if ((nic->device_type == XFRAME_I_DEVICE) &&
927 (get_xena_rev_id(nic->pdev) < 4))
928 writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
930 val64 = readq(&bar0->tx_fifo_partition_0);
931 DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
932 &bar0->tx_fifo_partition_0, (unsigned long long) val64);
935 * Initialization of Tx_PA_CONFIG register to ignore packet
936 * integrity checking.
938 val64 = readq(&bar0->tx_pa_cfg);
939 val64 |= TX_PA_CFG_IGNORE_FRM_ERR | TX_PA_CFG_IGNORE_SNAP_OUI |
940 TX_PA_CFG_IGNORE_LLC_CTRL | TX_PA_CFG_IGNORE_L2_ERR;
941 writeq(val64, &bar0->tx_pa_cfg);
943 /* Rx DMA intialization. */
945 for (i = 0; i < config->rx_ring_num; i++) {
947 vBIT(config->rx_cfg[i].ring_priority, (5 + (i * 8)),
950 writeq(val64, &bar0->rx_queue_priority);
953 * Allocating equal share of memory to all the
957 if (nic->device_type & XFRAME_II_DEVICE)
962 for (i = 0; i < config->rx_ring_num; i++) {
965 mem_share = (mem_size / config->rx_ring_num +
966 mem_size % config->rx_ring_num);
967 val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
970 mem_share = (mem_size / config->rx_ring_num);
971 val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
974 mem_share = (mem_size / config->rx_ring_num);
975 val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
978 mem_share = (mem_size / config->rx_ring_num);
979 val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
982 mem_share = (mem_size / config->rx_ring_num);
983 val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
986 mem_share = (mem_size / config->rx_ring_num);
987 val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
990 mem_share = (mem_size / config->rx_ring_num);
991 val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
994 mem_share = (mem_size / config->rx_ring_num);
995 val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
999 writeq(val64, &bar0->rx_queue_cfg);
1002 * Filling Tx round robin registers
1003 * as per the number of FIFOs
1005 switch (config->tx_fifo_num) {
1007 val64 = 0x0000000000000000ULL;
1008 writeq(val64, &bar0->tx_w_round_robin_0);
1009 writeq(val64, &bar0->tx_w_round_robin_1);
1010 writeq(val64, &bar0->tx_w_round_robin_2);
1011 writeq(val64, &bar0->tx_w_round_robin_3);
1012 writeq(val64, &bar0->tx_w_round_robin_4);
1015 val64 = 0x0000010000010000ULL;
1016 writeq(val64, &bar0->tx_w_round_robin_0);
1017 val64 = 0x0100000100000100ULL;
1018 writeq(val64, &bar0->tx_w_round_robin_1);
1019 val64 = 0x0001000001000001ULL;
1020 writeq(val64, &bar0->tx_w_round_robin_2);
1021 val64 = 0x0000010000010000ULL;
1022 writeq(val64, &bar0->tx_w_round_robin_3);
1023 val64 = 0x0100000000000000ULL;
1024 writeq(val64, &bar0->tx_w_round_robin_4);
1027 val64 = 0x0001000102000001ULL;
1028 writeq(val64, &bar0->tx_w_round_robin_0);
1029 val64 = 0x0001020000010001ULL;
1030 writeq(val64, &bar0->tx_w_round_robin_1);
1031 val64 = 0x0200000100010200ULL;
1032 writeq(val64, &bar0->tx_w_round_robin_2);
1033 val64 = 0x0001000102000001ULL;
1034 writeq(val64, &bar0->tx_w_round_robin_3);
1035 val64 = 0x0001020000000000ULL;
1036 writeq(val64, &bar0->tx_w_round_robin_4);
1039 val64 = 0x0001020300010200ULL;
1040 writeq(val64, &bar0->tx_w_round_robin_0);
1041 val64 = 0x0100000102030001ULL;
1042 writeq(val64, &bar0->tx_w_round_robin_1);
1043 val64 = 0x0200010000010203ULL;
1044 writeq(val64, &bar0->tx_w_round_robin_2);
1045 val64 = 0x0001020001000001ULL;
1046 writeq(val64, &bar0->tx_w_round_robin_3);
1047 val64 = 0x0203000100000000ULL;
1048 writeq(val64, &bar0->tx_w_round_robin_4);
1051 val64 = 0x0001000203000102ULL;
1052 writeq(val64, &bar0->tx_w_round_robin_0);
1053 val64 = 0x0001020001030004ULL;
1054 writeq(val64, &bar0->tx_w_round_robin_1);
1055 val64 = 0x0001000203000102ULL;
1056 writeq(val64, &bar0->tx_w_round_robin_2);
1057 val64 = 0x0001020001030004ULL;
1058 writeq(val64, &bar0->tx_w_round_robin_3);
1059 val64 = 0x0001000000000000ULL;
1060 writeq(val64, &bar0->tx_w_round_robin_4);
1063 val64 = 0x0001020304000102ULL;
1064 writeq(val64, &bar0->tx_w_round_robin_0);
1065 val64 = 0x0304050001020001ULL;
1066 writeq(val64, &bar0->tx_w_round_robin_1);
1067 val64 = 0x0203000100000102ULL;
1068 writeq(val64, &bar0->tx_w_round_robin_2);
1069 val64 = 0x0304000102030405ULL;
1070 writeq(val64, &bar0->tx_w_round_robin_3);
1071 val64 = 0x0001000200000000ULL;
1072 writeq(val64, &bar0->tx_w_round_robin_4);
1075 val64 = 0x0001020001020300ULL;
1076 writeq(val64, &bar0->tx_w_round_robin_0);
1077 val64 = 0x0102030400010203ULL;
1078 writeq(val64, &bar0->tx_w_round_robin_1);
1079 val64 = 0x0405060001020001ULL;
1080 writeq(val64, &bar0->tx_w_round_robin_2);
1081 val64 = 0x0304050000010200ULL;
1082 writeq(val64, &bar0->tx_w_round_robin_3);
1083 val64 = 0x0102030000000000ULL;
1084 writeq(val64, &bar0->tx_w_round_robin_4);
1087 val64 = 0x0001020300040105ULL;
1088 writeq(val64, &bar0->tx_w_round_robin_0);
1089 val64 = 0x0200030106000204ULL;
1090 writeq(val64, &bar0->tx_w_round_robin_1);
1091 val64 = 0x0103000502010007ULL;
1092 writeq(val64, &bar0->tx_w_round_robin_2);
1093 val64 = 0x0304010002060500ULL;
1094 writeq(val64, &bar0->tx_w_round_robin_3);
1095 val64 = 0x0103020400000000ULL;
1096 writeq(val64, &bar0->tx_w_round_robin_4);
1100 /* Filling the Rx round robin registers as per the
1101 * number of Rings and steering based on QoS.
1103 switch (config->rx_ring_num) {
1105 val64 = 0x8080808080808080ULL;
1106 writeq(val64, &bar0->rts_qos_steering);
1109 val64 = 0x0000010000010000ULL;
1110 writeq(val64, &bar0->rx_w_round_robin_0);
1111 val64 = 0x0100000100000100ULL;
1112 writeq(val64, &bar0->rx_w_round_robin_1);
1113 val64 = 0x0001000001000001ULL;
1114 writeq(val64, &bar0->rx_w_round_robin_2);
1115 val64 = 0x0000010000010000ULL;
1116 writeq(val64, &bar0->rx_w_round_robin_3);
1117 val64 = 0x0100000000000000ULL;
1118 writeq(val64, &bar0->rx_w_round_robin_4);
1120 val64 = 0x8080808040404040ULL;
1121 writeq(val64, &bar0->rts_qos_steering);
1124 val64 = 0x0001000102000001ULL;
1125 writeq(val64, &bar0->rx_w_round_robin_0);
1126 val64 = 0x0001020000010001ULL;
1127 writeq(val64, &bar0->rx_w_round_robin_1);
1128 val64 = 0x0200000100010200ULL;
1129 writeq(val64, &bar0->rx_w_round_robin_2);
1130 val64 = 0x0001000102000001ULL;
1131 writeq(val64, &bar0->rx_w_round_robin_3);
1132 val64 = 0x0001020000000000ULL;
1133 writeq(val64, &bar0->rx_w_round_robin_4);
1135 val64 = 0x8080804040402020ULL;
1136 writeq(val64, &bar0->rts_qos_steering);
1139 val64 = 0x0001020300010200ULL;
1140 writeq(val64, &bar0->rx_w_round_robin_0);
1141 val64 = 0x0100000102030001ULL;
1142 writeq(val64, &bar0->rx_w_round_robin_1);
1143 val64 = 0x0200010000010203ULL;
1144 writeq(val64, &bar0->rx_w_round_robin_2);
1145 val64 = 0x0001020001000001ULL;
1146 writeq(val64, &bar0->rx_w_round_robin_3);
1147 val64 = 0x0203000100000000ULL;
1148 writeq(val64, &bar0->rx_w_round_robin_4);
1150 val64 = 0x8080404020201010ULL;
1151 writeq(val64, &bar0->rts_qos_steering);
1154 val64 = 0x0001000203000102ULL;
1155 writeq(val64, &bar0->rx_w_round_robin_0);
1156 val64 = 0x0001020001030004ULL;
1157 writeq(val64, &bar0->rx_w_round_robin_1);
1158 val64 = 0x0001000203000102ULL;
1159 writeq(val64, &bar0->rx_w_round_robin_2);
1160 val64 = 0x0001020001030004ULL;
1161 writeq(val64, &bar0->rx_w_round_robin_3);
1162 val64 = 0x0001000000000000ULL;
1163 writeq(val64, &bar0->rx_w_round_robin_4);
1165 val64 = 0x8080404020201008ULL;
1166 writeq(val64, &bar0->rts_qos_steering);
1169 val64 = 0x0001020304000102ULL;
1170 writeq(val64, &bar0->rx_w_round_robin_0);
1171 val64 = 0x0304050001020001ULL;
1172 writeq(val64, &bar0->rx_w_round_robin_1);
1173 val64 = 0x0203000100000102ULL;
1174 writeq(val64, &bar0->rx_w_round_robin_2);
1175 val64 = 0x0304000102030405ULL;
1176 writeq(val64, &bar0->rx_w_round_robin_3);
1177 val64 = 0x0001000200000000ULL;
1178 writeq(val64, &bar0->rx_w_round_robin_4);
1180 val64 = 0x8080404020100804ULL;
1181 writeq(val64, &bar0->rts_qos_steering);
1184 val64 = 0x0001020001020300ULL;
1185 writeq(val64, &bar0->rx_w_round_robin_0);
1186 val64 = 0x0102030400010203ULL;
1187 writeq(val64, &bar0->rx_w_round_robin_1);
1188 val64 = 0x0405060001020001ULL;
1189 writeq(val64, &bar0->rx_w_round_robin_2);
1190 val64 = 0x0304050000010200ULL;
1191 writeq(val64, &bar0->rx_w_round_robin_3);
1192 val64 = 0x0102030000000000ULL;
1193 writeq(val64, &bar0->rx_w_round_robin_4);
1195 val64 = 0x8080402010080402ULL;
1196 writeq(val64, &bar0->rts_qos_steering);
1199 val64 = 0x0001020300040105ULL;
1200 writeq(val64, &bar0->rx_w_round_robin_0);
1201 val64 = 0x0200030106000204ULL;
1202 writeq(val64, &bar0->rx_w_round_robin_1);
1203 val64 = 0x0103000502010007ULL;
1204 writeq(val64, &bar0->rx_w_round_robin_2);
1205 val64 = 0x0304010002060500ULL;
1206 writeq(val64, &bar0->rx_w_round_robin_3);
1207 val64 = 0x0103020400000000ULL;
1208 writeq(val64, &bar0->rx_w_round_robin_4);
1210 val64 = 0x8040201008040201ULL;
1211 writeq(val64, &bar0->rts_qos_steering);
1217 for (i = 0; i < 8; i++)
1218 writeq(val64, &bar0->rts_frm_len_n[i]);
1220 /* Set the default rts frame length for the rings configured */
1221 val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1222 for (i = 0 ; i < config->rx_ring_num ; i++)
1223 writeq(val64, &bar0->rts_frm_len_n[i]);
1225 /* Set the frame length for the configured rings
1226 * desired by the user
1228 for (i = 0; i < config->rx_ring_num; i++) {
1229 /* If rts_frm_len[i] == 0 then it is assumed that user not
1230 * specified frame length steering.
1231 * If the user provides the frame length then program
1232 * the rts_frm_len register for those values or else
1233 * leave it as it is.
1235 if (rts_frm_len[i] != 0) {
1236 writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
1237 &bar0->rts_frm_len_n[i]);
1241 /* Program statistics memory */
1242 writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1244 if (nic->device_type == XFRAME_II_DEVICE) {
1245 val64 = STAT_BC(0x320);
1246 writeq(val64, &bar0->stat_byte_cnt);
1250 * Initializing the sampling rate for the device to calculate the
1251 * bandwidth utilization.
1253 val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
1254 MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1255 writeq(val64, &bar0->mac_link_util);
1259 * Initializing the Transmit and Receive Traffic Interrupt
1263 * TTI Initialization. Default Tx timer gets us about
1264 * 250 interrupts per sec. Continuous interrupts are enabled
1267 if (nic->device_type == XFRAME_II_DEVICE) {
1268 int count = (nic->config.bus_speed * 125)/2;
1269 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count);
1272 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
1274 val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) |
1275 TTI_DATA1_MEM_TX_URNG_B(0x10) |
1276 TTI_DATA1_MEM_TX_URNG_C(0x30) | TTI_DATA1_MEM_TX_TIMER_AC_EN;
1277 if (use_continuous_tx_intrs)
1278 val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
1279 writeq(val64, &bar0->tti_data1_mem);
1281 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1282 TTI_DATA2_MEM_TX_UFC_B(0x20) |
1283 TTI_DATA2_MEM_TX_UFC_C(0x70) | TTI_DATA2_MEM_TX_UFC_D(0x80);
1284 writeq(val64, &bar0->tti_data2_mem);
1286 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1287 writeq(val64, &bar0->tti_command_mem);
1290 * Once the operation completes, the Strobe bit of the command
1291 * register will be reset. We poll for this particular condition
1292 * We wait for a maximum of 500ms for the operation to complete,
1293 * if it's not complete by then we return error.
1297 val64 = readq(&bar0->tti_command_mem);
1298 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1302 DBG_PRINT(ERR_DBG, "%s: TTI init Failed\n",
1310 if (nic->config.bimodal) {
1312 for (k = 0; k < config->rx_ring_num; k++) {
1313 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1314 val64 |= TTI_CMD_MEM_OFFSET(0x38+k);
1315 writeq(val64, &bar0->tti_command_mem);
1318 * Once the operation completes, the Strobe bit of the command
1319 * register will be reset. We poll for this particular condition
1320 * We wait for a maximum of 500ms for the operation to complete,
1321 * if it's not complete by then we return error.
1325 val64 = readq(&bar0->tti_command_mem);
1326 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1331 "%s: TTI init Failed\n",
1341 /* RTI Initialization */
1342 if (nic->device_type == XFRAME_II_DEVICE) {
1344 * Programmed to generate Apprx 500 Intrs per
1347 int count = (nic->config.bus_speed * 125)/4;
1348 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count);
1350 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
1352 val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) |
1353 RTI_DATA1_MEM_RX_URNG_B(0x10) |
1354 RTI_DATA1_MEM_RX_URNG_C(0x30) | RTI_DATA1_MEM_RX_TIMER_AC_EN;
1356 writeq(val64, &bar0->rti_data1_mem);
1358 val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
1359 RTI_DATA2_MEM_RX_UFC_B(0x2) |
1360 RTI_DATA2_MEM_RX_UFC_C(0x40) | RTI_DATA2_MEM_RX_UFC_D(0x80);
1361 writeq(val64, &bar0->rti_data2_mem);
1363 for (i = 0; i < config->rx_ring_num; i++) {
1364 val64 = RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE_NEW_CMD
1365 | RTI_CMD_MEM_OFFSET(i);
1366 writeq(val64, &bar0->rti_command_mem);
1369 * Once the operation completes, the Strobe bit of the
1370 * command register will be reset. We poll for this
1371 * particular condition. We wait for a maximum of 500ms
1372 * for the operation to complete, if it's not complete
1373 * by then we return error.
1377 val64 = readq(&bar0->rti_command_mem);
1378 if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD)) {
1382 DBG_PRINT(ERR_DBG, "%s: RTI init Failed\n",
1393 * Initializing proper values as Pause threshold into all
1394 * the 8 Queues on Rx side.
1396 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1397 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1399 /* Disable RMAC PAD STRIPPING */
1400 add = (void *) &bar0->mac_cfg;
1401 val64 = readq(&bar0->mac_cfg);
1402 val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1403 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1404 writel((u32) (val64), add);
1405 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1406 writel((u32) (val64 >> 32), (add + 4));
1407 val64 = readq(&bar0->mac_cfg);
1410 * Set the time value to be inserted in the pause frame
1411 * generated by xena.
1413 val64 = readq(&bar0->rmac_pause_cfg);
1414 val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1415 val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1416 writeq(val64, &bar0->rmac_pause_cfg);
1419 * Set the Threshold Limit for Generating the pause frame
1420 * If the amount of data in any Queue exceeds ratio of
1421 * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1422 * pause frame is generated
1425 for (i = 0; i < 4; i++) {
1427 (((u64) 0xFF00 | nic->mac_control.
1428 mc_pause_threshold_q0q3)
1431 writeq(val64, &bar0->mc_pause_thresh_q0q3);
1434 for (i = 0; i < 4; i++) {
1436 (((u64) 0xFF00 | nic->mac_control.
1437 mc_pause_threshold_q4q7)
1440 writeq(val64, &bar0->mc_pause_thresh_q4q7);
1443 * TxDMA will stop Read request if the number of read split has
1444 * exceeded the limit pointed by shared_splits
1446 val64 = readq(&bar0->pic_control);
1447 val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1448 writeq(val64, &bar0->pic_control);
1451 * Programming the Herc to split every write transaction
1452 * that does not start on an ADB to reduce disconnects.
1454 if (nic->device_type == XFRAME_II_DEVICE) {
1455 val64 = WREQ_SPLIT_MASK_SET_MASK(255);
1456 writeq(val64, &bar0->wreq_split_mask);
1459 /* Setting Link stability period to 64 ms */
1460 if (nic->device_type == XFRAME_II_DEVICE) {
1461 val64 = MISC_LINK_STABILITY_PRD(3);
1462 writeq(val64, &bar0->misc_control);
1467 #define LINK_UP_DOWN_INTERRUPT 1
1468 #define MAC_RMAC_ERR_TIMER 2
1470 #if defined(CONFIG_MSI_MODE) || defined(CONFIG_MSIX_MODE)
1471 #define s2io_link_fault_indication(x) MAC_RMAC_ERR_TIMER
1473 int s2io_link_fault_indication(nic_t *nic)
1475 if (nic->device_type == XFRAME_II_DEVICE)
1476 return LINK_UP_DOWN_INTERRUPT;
1478 return MAC_RMAC_ERR_TIMER;
1483 * en_dis_able_nic_intrs - Enable or Disable the interrupts
1484 * @nic: device private variable,
1485 * @mask: A mask indicating which Intr block must be modified and,
1486 * @flag: A flag indicating whether to enable or disable the Intrs.
1487 * Description: This function will either disable or enable the interrupts
1488 * depending on the flag argument. The mask argument can be used to
1489 * enable/disable any Intr block.
1490 * Return Value: NONE.
1493 static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1495 XENA_dev_config_t __iomem *bar0 = nic->bar0;
1496 register u64 val64 = 0, temp64 = 0;
1498 /* Top level interrupt classification */
1499 /* PIC Interrupts */
1500 if ((mask & (TX_PIC_INTR | RX_PIC_INTR))) {
1501 /* Enable PIC Intrs in the general intr mask register */
1502 val64 = TXPIC_INT_M | PIC_RX_INT_M;
1503 if (flag == ENABLE_INTRS) {
1504 temp64 = readq(&bar0->general_int_mask);
1505 temp64 &= ~((u64) val64);
1506 writeq(temp64, &bar0->general_int_mask);
1508 * If Hercules adapter enable GPIO otherwise
1509 * disabled all PCIX, Flash, MDIO, IIC and GPIO
1510 * interrupts for now.
1513 if (s2io_link_fault_indication(nic) ==
1514 LINK_UP_DOWN_INTERRUPT ) {
1515 temp64 = readq(&bar0->pic_int_mask);
1516 temp64 &= ~((u64) PIC_INT_GPIO);
1517 writeq(temp64, &bar0->pic_int_mask);
1518 temp64 = readq(&bar0->gpio_int_mask);
1519 temp64 &= ~((u64) GPIO_INT_MASK_LINK_UP);
1520 writeq(temp64, &bar0->gpio_int_mask);
1522 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1525 * No MSI Support is available presently, so TTI and
1526 * RTI interrupts are also disabled.
1528 } else if (flag == DISABLE_INTRS) {
1530 * Disable PIC Intrs in the general
1531 * intr mask register
1533 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1534 temp64 = readq(&bar0->general_int_mask);
1536 writeq(val64, &bar0->general_int_mask);
1540 /* DMA Interrupts */
1541 /* Enabling/Disabling Tx DMA interrupts */
1542 if (mask & TX_DMA_INTR) {
1543 /* Enable TxDMA Intrs in the general intr mask register */
1544 val64 = TXDMA_INT_M;
1545 if (flag == ENABLE_INTRS) {
1546 temp64 = readq(&bar0->general_int_mask);
1547 temp64 &= ~((u64) val64);
1548 writeq(temp64, &bar0->general_int_mask);
1550 * Keep all interrupts other than PFC interrupt
1551 * and PCC interrupt disabled in DMA level.
1553 val64 = DISABLE_ALL_INTRS & ~(TXDMA_PFC_INT_M |
1555 writeq(val64, &bar0->txdma_int_mask);
1557 * Enable only the MISC error 1 interrupt in PFC block
1559 val64 = DISABLE_ALL_INTRS & (~PFC_MISC_ERR_1);
1560 writeq(val64, &bar0->pfc_err_mask);
1562 * Enable only the FB_ECC error interrupt in PCC block
1564 val64 = DISABLE_ALL_INTRS & (~PCC_FB_ECC_ERR);
1565 writeq(val64, &bar0->pcc_err_mask);
1566 } else if (flag == DISABLE_INTRS) {
1568 * Disable TxDMA Intrs in the general intr mask
1571 writeq(DISABLE_ALL_INTRS, &bar0->txdma_int_mask);
1572 writeq(DISABLE_ALL_INTRS, &bar0->pfc_err_mask);
1573 temp64 = readq(&bar0->general_int_mask);
1575 writeq(val64, &bar0->general_int_mask);
1579 /* Enabling/Disabling Rx DMA interrupts */
1580 if (mask & RX_DMA_INTR) {
1581 /* Enable RxDMA Intrs in the general intr mask register */
1582 val64 = RXDMA_INT_M;
1583 if (flag == ENABLE_INTRS) {
1584 temp64 = readq(&bar0->general_int_mask);
1585 temp64 &= ~((u64) val64);
1586 writeq(temp64, &bar0->general_int_mask);
1588 * All RxDMA block interrupts are disabled for now
1591 writeq(DISABLE_ALL_INTRS, &bar0->rxdma_int_mask);
1592 } else if (flag == DISABLE_INTRS) {
1594 * Disable RxDMA Intrs in the general intr mask
1597 writeq(DISABLE_ALL_INTRS, &bar0->rxdma_int_mask);
1598 temp64 = readq(&bar0->general_int_mask);
1600 writeq(val64, &bar0->general_int_mask);
1604 /* MAC Interrupts */
1605 /* Enabling/Disabling MAC interrupts */
1606 if (mask & (TX_MAC_INTR | RX_MAC_INTR)) {
1607 val64 = TXMAC_INT_M | RXMAC_INT_M;
1608 if (flag == ENABLE_INTRS) {
1609 temp64 = readq(&bar0->general_int_mask);
1610 temp64 &= ~((u64) val64);
1611 writeq(temp64, &bar0->general_int_mask);
1613 * All MAC block error interrupts are disabled for now
1616 } else if (flag == DISABLE_INTRS) {
1618 * Disable MAC Intrs in the general intr mask register
1620 writeq(DISABLE_ALL_INTRS, &bar0->mac_int_mask);
1621 writeq(DISABLE_ALL_INTRS,
1622 &bar0->mac_rmac_err_mask);
1624 temp64 = readq(&bar0->general_int_mask);
1626 writeq(val64, &bar0->general_int_mask);
1630 /* XGXS Interrupts */
1631 if (mask & (TX_XGXS_INTR | RX_XGXS_INTR)) {
1632 val64 = TXXGXS_INT_M | RXXGXS_INT_M;
1633 if (flag == ENABLE_INTRS) {
1634 temp64 = readq(&bar0->general_int_mask);
1635 temp64 &= ~((u64) val64);
1636 writeq(temp64, &bar0->general_int_mask);
1638 * All XGXS block error interrupts are disabled for now
1641 writeq(DISABLE_ALL_INTRS, &bar0->xgxs_int_mask);
1642 } else if (flag == DISABLE_INTRS) {
1644 * Disable MC Intrs in the general intr mask register
1646 writeq(DISABLE_ALL_INTRS, &bar0->xgxs_int_mask);
1647 temp64 = readq(&bar0->general_int_mask);
1649 writeq(val64, &bar0->general_int_mask);
1653 /* Memory Controller(MC) interrupts */
1654 if (mask & MC_INTR) {
1656 if (flag == ENABLE_INTRS) {
1657 temp64 = readq(&bar0->general_int_mask);
1658 temp64 &= ~((u64) val64);
1659 writeq(temp64, &bar0->general_int_mask);
1661 * Enable all MC Intrs.
1663 writeq(0x0, &bar0->mc_int_mask);
1664 writeq(0x0, &bar0->mc_err_mask);
1665 } else if (flag == DISABLE_INTRS) {
1667 * Disable MC Intrs in the general intr mask register
1669 writeq(DISABLE_ALL_INTRS, &bar0->mc_int_mask);
1670 temp64 = readq(&bar0->general_int_mask);
1672 writeq(val64, &bar0->general_int_mask);
1677 /* Tx traffic interrupts */
1678 if (mask & TX_TRAFFIC_INTR) {
1679 val64 = TXTRAFFIC_INT_M;
1680 if (flag == ENABLE_INTRS) {
1681 temp64 = readq(&bar0->general_int_mask);
1682 temp64 &= ~((u64) val64);
1683 writeq(temp64, &bar0->general_int_mask);
1685 * Enable all the Tx side interrupts
1686 * writing 0 Enables all 64 TX interrupt levels
1688 writeq(0x0, &bar0->tx_traffic_mask);
1689 } else if (flag == DISABLE_INTRS) {
1691 * Disable Tx Traffic Intrs in the general intr mask
1694 writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
1695 temp64 = readq(&bar0->general_int_mask);
1697 writeq(val64, &bar0->general_int_mask);
1701 /* Rx traffic interrupts */
1702 if (mask & RX_TRAFFIC_INTR) {
1703 val64 = RXTRAFFIC_INT_M;
1704 if (flag == ENABLE_INTRS) {
1705 temp64 = readq(&bar0->general_int_mask);
1706 temp64 &= ~((u64) val64);
1707 writeq(temp64, &bar0->general_int_mask);
1708 /* writing 0 Enables all 8 RX interrupt levels */
1709 writeq(0x0, &bar0->rx_traffic_mask);
1710 } else if (flag == DISABLE_INTRS) {
1712 * Disable Rx Traffic Intrs in the general intr mask
1715 writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
1716 temp64 = readq(&bar0->general_int_mask);
1718 writeq(val64, &bar0->general_int_mask);
1723 static int check_prc_pcc_state(u64 val64, int flag, int rev_id, int herc)
1727 if (flag == FALSE) {
1728 if ((!herc && (rev_id >= 4)) || herc) {
1729 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) &&
1730 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1731 ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
1735 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) &&
1736 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1737 ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
1742 if ((!herc && (rev_id >= 4)) || herc) {
1743 if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
1744 ADAPTER_STATUS_RMAC_PCC_IDLE) &&
1745 (!(val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ||
1746 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1747 ADAPTER_STATUS_RC_PRC_QUIESCENT))) {
1751 if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
1752 ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) &&
1753 (!(val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ||
1754 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1755 ADAPTER_STATUS_RC_PRC_QUIESCENT))) {
1764 * verify_xena_quiescence - Checks whether the H/W is ready
1765 * @val64 : Value read from adapter status register.
1766 * @flag : indicates if the adapter enable bit was ever written once
1768 * Description: Returns whether the H/W is ready to go or not. Depending
1769 * on whether adapter enable bit was written or not the comparison
1770 * differs and the calling function passes the input argument flag to
1772 * Return: 1 If xena is quiescence
1773 * 0 If Xena is not quiescence
1776 static int verify_xena_quiescence(nic_t *sp, u64 val64, int flag)
1779 u64 tmp64 = ~((u64) val64);
1780 int rev_id = get_xena_rev_id(sp->pdev);
1782 herc = (sp->device_type == XFRAME_II_DEVICE);
1785 (ADAPTER_STATUS_TDMA_READY | ADAPTER_STATUS_RDMA_READY |
1786 ADAPTER_STATUS_PFC_READY | ADAPTER_STATUS_TMAC_BUF_EMPTY |
1787 ADAPTER_STATUS_PIC_QUIESCENT | ADAPTER_STATUS_MC_DRAM_READY |
1788 ADAPTER_STATUS_MC_QUEUES_READY | ADAPTER_STATUS_M_PLL_LOCK |
1789 ADAPTER_STATUS_P_PLL_LOCK))) {
1790 ret = check_prc_pcc_state(val64, flag, rev_id, herc);
1797 * fix_mac_address - Fix for Mac addr problem on Alpha platforms
1798 * @sp: Pointer to device specifc structure
1800 * New procedure to clear mac address reading problems on Alpha platforms
1804 void fix_mac_address(nic_t * sp)
1806 XENA_dev_config_t __iomem *bar0 = sp->bar0;
1810 while (fix_mac[i] != END_SIGN) {
1811 writeq(fix_mac[i++], &bar0->gpio_control);
1813 val64 = readq(&bar0->gpio_control);
1818 * start_nic - Turns the device on
1819 * @nic : device private variable.
1821 * This function actually turns the device on. Before this function is
1822 * called,all Registers are configured from their reset states
1823 * and shared memory is allocated but the NIC is still quiescent. On
1824 * calling this function, the device interrupts are cleared and the NIC is
1825 * literally switched on by writing into the adapter control register.
1827 * SUCCESS on success and -1 on failure.
1830 static int start_nic(struct s2io_nic *nic)
1832 XENA_dev_config_t __iomem *bar0 = nic->bar0;
1833 struct net_device *dev = nic->dev;
1834 register u64 val64 = 0;
1837 mac_info_t *mac_control;
1838 struct config_param *config;
1840 mac_control = &nic->mac_control;
1841 config = &nic->config;
1843 /* PRC Initialization and configuration */
1844 for (i = 0; i < config->rx_ring_num; i++) {
1845 writeq((u64) mac_control->rings[i].rx_blocks[0].block_dma_addr,
1846 &bar0->prc_rxd0_n[i]);
1848 val64 = readq(&bar0->prc_ctrl_n[i]);
1849 if (nic->config.bimodal)
1850 val64 |= PRC_CTRL_BIMODAL_INTERRUPT;
1851 #ifndef CONFIG_2BUFF_MODE
1852 val64 |= PRC_CTRL_RC_ENABLED;
1854 val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
1856 writeq(val64, &bar0->prc_ctrl_n[i]);
1859 #ifdef CONFIG_2BUFF_MODE
1860 /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
1861 val64 = readq(&bar0->rx_pa_cfg);
1862 val64 |= RX_PA_CFG_IGNORE_L2_ERR;
1863 writeq(val64, &bar0->rx_pa_cfg);
1867 * Enabling MC-RLDRAM. After enabling the device, we timeout
1868 * for around 100ms, which is approximately the time required
1869 * for the device to be ready for operation.
1871 val64 = readq(&bar0->mc_rldram_mrs);
1872 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
1873 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
1874 val64 = readq(&bar0->mc_rldram_mrs);
1876 msleep(100); /* Delay by around 100 ms. */
1878 /* Enabling ECC Protection. */
1879 val64 = readq(&bar0->adapter_control);
1880 val64 &= ~ADAPTER_ECC_EN;
1881 writeq(val64, &bar0->adapter_control);
1884 * Clearing any possible Link state change interrupts that
1885 * could have popped up just before Enabling the card.
1887 val64 = readq(&bar0->mac_rmac_err_reg);
1889 writeq(val64, &bar0->mac_rmac_err_reg);
1892 * Verify if the device is ready to be enabled, if so enable
1895 val64 = readq(&bar0->adapter_status);
1896 if (!verify_xena_quiescence(nic, val64, nic->device_enabled_once)) {
1897 DBG_PRINT(ERR_DBG, "%s: device is not ready, ", dev->name);
1898 DBG_PRINT(ERR_DBG, "Adapter status reads: 0x%llx\n",
1899 (unsigned long long) val64);
1903 /* Enable select interrupts */
1904 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR | MC_INTR;
1905 interruptible |= TX_PIC_INTR | RX_PIC_INTR;
1906 interruptible |= TX_MAC_INTR | RX_MAC_INTR;
1908 en_dis_able_nic_intrs(nic, interruptible, ENABLE_INTRS);
1911 * With some switches, link might be already up at this point.
1912 * Because of this weird behavior, when we enable laser,
1913 * we may not get link. We need to handle this. We cannot
1914 * figure out which switch is misbehaving. So we are forced to
1915 * make a global change.
1918 /* Enabling Laser. */
1919 val64 = readq(&bar0->adapter_control);
1920 val64 |= ADAPTER_EOI_TX_ON;
1921 writeq(val64, &bar0->adapter_control);
1923 /* SXE-002: Initialize link and activity LED */
1924 subid = nic->pdev->subsystem_device;
1925 if (((subid & 0xFF) >= 0x07) &&
1926 (nic->device_type == XFRAME_I_DEVICE)) {
1927 val64 = readq(&bar0->gpio_control);
1928 val64 |= 0x0000800000000000ULL;
1929 writeq(val64, &bar0->gpio_control);
1930 val64 = 0x0411040400000000ULL;
1931 writeq(val64, (void __iomem *) ((u8 *) bar0 + 0x2700));
1935 * Don't see link state interrupts on certain switches, so
1936 * directly scheduling a link state task from here.
1938 schedule_work(&nic->set_link_task);
1944 * free_tx_buffers - Free all queued Tx buffers
1945 * @nic : device private variable.
1947 * Free all queued Tx buffers.
1948 * Return Value: void
1951 static void free_tx_buffers(struct s2io_nic *nic)
1953 struct net_device *dev = nic->dev;
1954 struct sk_buff *skb;
1957 mac_info_t *mac_control;
1958 struct config_param *config;
1959 int cnt = 0, frg_cnt;
1961 mac_control = &nic->mac_control;
1962 config = &nic->config;
1964 for (i = 0; i < config->tx_fifo_num; i++) {
1965 for (j = 0; j < config->tx_cfg[i].fifo_len - 1; j++) {
1966 txdp = (TxD_t *) mac_control->fifos[i].list_info[j].
1969 (struct sk_buff *) ((unsigned long) txdp->
1972 memset(txdp, 0, sizeof(TxD_t) *
1976 frg_cnt = skb_shinfo(skb)->nr_frags;
1977 pci_unmap_single(nic->pdev, (dma_addr_t)
1978 txdp->Buffer_Pointer,
1979 skb->len - skb->data_len,
1985 for (j = 0; j < frg_cnt; j++, txdp++) {
1987 &skb_shinfo(skb)->frags[j];
1988 pci_unmap_page(nic->pdev,
1998 memset(txdp, 0, sizeof(TxD_t) * config->max_txds);
2002 "%s:forcibly freeing %d skbs on FIFO%d\n",
2004 mac_control->fifos[i].tx_curr_get_info.offset = 0;
2005 mac_control->fifos[i].tx_curr_put_info.offset = 0;
2010 * stop_nic - To stop the nic
2011 * @nic ; device private variable.
2013 * This function does exactly the opposite of what the start_nic()
2014 * function does. This function is called to stop the device.
2019 static void stop_nic(struct s2io_nic *nic)
2021 XENA_dev_config_t __iomem *bar0 = nic->bar0;
2022 register u64 val64 = 0;
2023 u16 interruptible, i;
2024 mac_info_t *mac_control;
2025 struct config_param *config;
2027 mac_control = &nic->mac_control;
2028 config = &nic->config;
2030 /* Disable all interrupts */
2031 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR | MC_INTR;
2032 interruptible |= TX_PIC_INTR | RX_PIC_INTR;
2033 interruptible |= TX_MAC_INTR | RX_MAC_INTR;
2034 en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
2037 for (i = 0; i < config->rx_ring_num; i++) {
2038 val64 = readq(&bar0->prc_ctrl_n[i]);
2039 val64 &= ~((u64) PRC_CTRL_RC_ENABLED);
2040 writeq(val64, &bar0->prc_ctrl_n[i]);
2045 * fill_rx_buffers - Allocates the Rx side skbs
2046 * @nic: device private variable
2047 * @ring_no: ring number
2049 * The function allocates Rx side skbs and puts the physical
2050 * address of these buffers into the RxD buffer pointers, so that the NIC
2051 * can DMA the received frame into these locations.
2052 * The NIC supports 3 receive modes, viz
2054 * 2. three buffer and
2055 * 3. Five buffer modes.
2056 * Each mode defines how many fragments the received frame will be split
2057 * up into by the NIC. The frame is split into L3 header, L4 Header,
2058 * L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
2059 * is split into 3 fragments. As of now only single buffer mode is
2062 * SUCCESS on success or an appropriate -ve value on failure.
2065 int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2067 struct net_device *dev = nic->dev;
2068 struct sk_buff *skb;
2070 int off, off1, size, block_no, block_no1;
2071 int offset, offset1;
2074 mac_info_t *mac_control;
2075 struct config_param *config;
2076 #ifdef CONFIG_2BUFF_MODE
2081 dma_addr_t rxdpphys;
2083 #ifndef CONFIG_S2IO_NAPI
2084 unsigned long flags;
2087 mac_control = &nic->mac_control;
2088 config = &nic->config;
2089 alloc_cnt = mac_control->rings[ring_no].pkt_cnt -
2090 atomic_read(&nic->rx_bufs_left[ring_no]);
2091 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
2092 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
2094 while (alloc_tab < alloc_cnt) {
2095 block_no = mac_control->rings[ring_no].rx_curr_put_info.
2097 block_no1 = mac_control->rings[ring_no].rx_curr_get_info.
2099 off = mac_control->rings[ring_no].rx_curr_put_info.offset;
2100 off1 = mac_control->rings[ring_no].rx_curr_get_info.offset;
2101 #ifndef CONFIG_2BUFF_MODE
2102 offset = block_no * (MAX_RXDS_PER_BLOCK + 1) + off;
2103 offset1 = block_no1 * (MAX_RXDS_PER_BLOCK + 1) + off1;
2105 offset = block_no * (MAX_RXDS_PER_BLOCK) + off;
2106 offset1 = block_no1 * (MAX_RXDS_PER_BLOCK) + off1;
2109 rxdp = mac_control->rings[ring_no].rx_blocks[block_no].
2110 block_virt_addr + off;
2111 if ((offset == offset1) && (rxdp->Host_Control)) {
2112 DBG_PRINT(INTR_DBG, "%s: Get and Put", dev->name);
2113 DBG_PRINT(INTR_DBG, " info equated\n");
2116 #ifndef CONFIG_2BUFF_MODE
2117 if (rxdp->Control_1 == END_OF_BLOCK) {
2118 mac_control->rings[ring_no].rx_curr_put_info.
2120 mac_control->rings[ring_no].rx_curr_put_info.
2121 block_index %= mac_control->rings[ring_no].block_count;
2122 block_no = mac_control->rings[ring_no].rx_curr_put_info.
2125 off %= (MAX_RXDS_PER_BLOCK + 1);
2126 mac_control->rings[ring_no].rx_curr_put_info.offset =
2128 rxdp = (RxD_t *) ((unsigned long) rxdp->Control_2);
2129 DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
2132 #ifndef CONFIG_S2IO_NAPI
2133 spin_lock_irqsave(&nic->put_lock, flags);
2134 mac_control->rings[ring_no].put_pos =
2135 (block_no * (MAX_RXDS_PER_BLOCK + 1)) + off;
2136 spin_unlock_irqrestore(&nic->put_lock, flags);
2139 if (rxdp->Host_Control == END_OF_BLOCK) {
2140 mac_control->rings[ring_no].rx_curr_put_info.
2142 mac_control->rings[ring_no].rx_curr_put_info.block_index
2143 %= mac_control->rings[ring_no].block_count;
2144 block_no = mac_control->rings[ring_no].rx_curr_put_info
2147 DBG_PRINT(INTR_DBG, "%s: block%d at: 0x%llx\n",
2148 dev->name, block_no,
2149 (unsigned long long) rxdp->Control_1);
2150 mac_control->rings[ring_no].rx_curr_put_info.offset =
2152 rxdp = mac_control->rings[ring_no].rx_blocks[block_no].
2155 #ifndef CONFIG_S2IO_NAPI
2156 spin_lock_irqsave(&nic->put_lock, flags);
2157 mac_control->rings[ring_no].put_pos = (block_no *
2158 (MAX_RXDS_PER_BLOCK + 1)) + off;
2159 spin_unlock_irqrestore(&nic->put_lock, flags);
2163 #ifndef CONFIG_2BUFF_MODE
2164 if (rxdp->Control_1 & RXD_OWN_XENA)
2166 if (rxdp->Control_2 & BIT(0))
2169 mac_control->rings[ring_no].rx_curr_put_info.
2173 #ifdef CONFIG_2BUFF_MODE
2175 * RxDs Spanning cache lines will be replenished only
2176 * if the succeeding RxD is also owned by Host. It
2177 * will always be the ((8*i)+3) and ((8*i)+6)
2178 * descriptors for the 48 byte descriptor. The offending
2179 * decsriptor is of-course the 3rd descriptor.
2181 rxdpphys = mac_control->rings[ring_no].rx_blocks[block_no].
2182 block_dma_addr + (off * sizeof(RxD_t));
2183 if (((u64) (rxdpphys)) % 128 > 80) {
2184 rxdpnext = mac_control->rings[ring_no].rx_blocks[block_no].
2185 block_virt_addr + (off + 1);
2186 if (rxdpnext->Host_Control == END_OF_BLOCK) {
2187 nextblk = (block_no + 1) %
2188 (mac_control->rings[ring_no].block_count);
2189 rxdpnext = mac_control->rings[ring_no].rx_blocks
2190 [nextblk].block_virt_addr;
2192 if (rxdpnext->Control_2 & BIT(0))
2197 #ifndef CONFIG_2BUFF_MODE
2198 skb = dev_alloc_skb(size + NET_IP_ALIGN);
2200 skb = dev_alloc_skb(dev->mtu + ALIGN_SIZE + BUF0_LEN + 4);
2203 DBG_PRINT(ERR_DBG, "%s: Out of ", dev->name);
2204 DBG_PRINT(ERR_DBG, "memory to allocate SKBs\n");
2207 #ifndef CONFIG_2BUFF_MODE
2208 skb_reserve(skb, NET_IP_ALIGN);
2209 memset(rxdp, 0, sizeof(RxD_t));
2210 rxdp->Buffer0_ptr = pci_map_single
2211 (nic->pdev, skb->data, size, PCI_DMA_FROMDEVICE);
2212 rxdp->Control_2 &= (~MASK_BUFFER0_SIZE);
2213 rxdp->Control_2 |= SET_BUFFER0_SIZE(size);
2214 rxdp->Host_Control = (unsigned long) (skb);
2215 rxdp->Control_1 |= RXD_OWN_XENA;
2217 off %= (MAX_RXDS_PER_BLOCK + 1);
2218 mac_control->rings[ring_no].rx_curr_put_info.offset = off;
2220 ba = &mac_control->rings[ring_no].ba[block_no][off];
2221 skb_reserve(skb, BUF0_LEN);
2222 tmp = ((unsigned long) skb->data & ALIGN_SIZE);
2224 skb_reserve(skb, (ALIGN_SIZE + 1) - tmp);
2226 memset(rxdp, 0, sizeof(RxD_t));
2227 rxdp->Buffer2_ptr = pci_map_single
2228 (nic->pdev, skb->data, dev->mtu + BUF0_LEN + 4,
2229 PCI_DMA_FROMDEVICE);
2231 pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN,
2232 PCI_DMA_FROMDEVICE);
2234 pci_map_single(nic->pdev, ba->ba_1, BUF1_LEN,
2235 PCI_DMA_FROMDEVICE);
2237 rxdp->Control_2 = SET_BUFFER2_SIZE(dev->mtu + 4);
2238 rxdp->Control_2 |= SET_BUFFER0_SIZE(BUF0_LEN);
2239 rxdp->Control_2 |= SET_BUFFER1_SIZE(1); /* dummy. */
2240 rxdp->Control_2 |= BIT(0); /* Set Buffer_Empty bit. */
2241 rxdp->Host_Control = (u64) ((unsigned long) (skb));
2242 rxdp->Control_1 |= RXD_OWN_XENA;
2244 mac_control->rings[ring_no].rx_curr_put_info.offset = off;
2246 rxdp->Control_2 |= SET_RXD_MARKER;
2248 atomic_inc(&nic->rx_bufs_left[ring_no]);
2257 * free_rx_buffers - Frees all Rx buffers
2258 * @sp: device private variable.
2260 * This function will free all Rx buffers allocated by host.
2265 static void free_rx_buffers(struct s2io_nic *sp)
2267 struct net_device *dev = sp->dev;
2268 int i, j, blk = 0, off, buf_cnt = 0;
2270 struct sk_buff *skb;
2271 mac_info_t *mac_control;
2272 struct config_param *config;
2273 #ifdef CONFIG_2BUFF_MODE
2277 mac_control = &sp->mac_control;
2278 config = &sp->config;
2280 for (i = 0; i < config->rx_ring_num; i++) {
2281 for (j = 0, blk = 0; j < config->rx_cfg[i].num_rxd; j++) {
2282 off = j % (MAX_RXDS_PER_BLOCK + 1);
2283 rxdp = mac_control->rings[i].rx_blocks[blk].
2284 block_virt_addr + off;
2286 #ifndef CONFIG_2BUFF_MODE
2287 if (rxdp->Control_1 == END_OF_BLOCK) {
2289 (RxD_t *) ((unsigned long) rxdp->
2295 if (rxdp->Host_Control == END_OF_BLOCK) {
2301 if (!(rxdp->Control_1 & RXD_OWN_XENA)) {
2302 memset(rxdp, 0, sizeof(RxD_t));
2307 (struct sk_buff *) ((unsigned long) rxdp->
2310 #ifndef CONFIG_2BUFF_MODE
2311 pci_unmap_single(sp->pdev, (dma_addr_t)
2314 HEADER_ETHERNET_II_802_3_SIZE
2315 + HEADER_802_2_SIZE +
2317 PCI_DMA_FROMDEVICE);
2319 ba = &mac_control->rings[i].ba[blk][off];
2320 pci_unmap_single(sp->pdev, (dma_addr_t)
2323 PCI_DMA_FROMDEVICE);
2324 pci_unmap_single(sp->pdev, (dma_addr_t)
2327 PCI_DMA_FROMDEVICE);
2328 pci_unmap_single(sp->pdev, (dma_addr_t)
2330 dev->mtu + BUF0_LEN + 4,
2331 PCI_DMA_FROMDEVICE);
2334 atomic_dec(&sp->rx_bufs_left[i]);
2337 memset(rxdp, 0, sizeof(RxD_t));
2339 mac_control->rings[i].rx_curr_put_info.block_index = 0;
2340 mac_control->rings[i].rx_curr_get_info.block_index = 0;
2341 mac_control->rings[i].rx_curr_put_info.offset = 0;
2342 mac_control->rings[i].rx_curr_get_info.offset = 0;
2343 atomic_set(&sp->rx_bufs_left[i], 0);
2344 DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n",
2345 dev->name, buf_cnt, i);
2350 * s2io_poll - Rx interrupt handler for NAPI support
2351 * @dev : pointer to the device structure.
2352 * @budget : The number of packets that were budgeted to be processed
2353 * during one pass through the 'Poll" function.
2355 * Comes into picture only if NAPI support has been incorporated. It does
2356 * the same thing that rx_intr_handler does, but not in a interrupt context
2357 * also It will process only a given number of packets.
2359 * 0 on success and 1 if there are No Rx packets to be processed.
2362 #if defined(CONFIG_S2IO_NAPI)
2363 static int s2io_poll(struct net_device *dev, int *budget)
2365 nic_t *nic = dev->priv;
2366 int pkt_cnt = 0, org_pkts_to_process;
2367 mac_info_t *mac_control;
2368 struct config_param *config;
2369 XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0;
2373 atomic_inc(&nic->isr_cnt);
2374 mac_control = &nic->mac_control;
2375 config = &nic->config;
2377 nic->pkts_to_process = *budget;
2378 if (nic->pkts_to_process > dev->quota)
2379 nic->pkts_to_process = dev->quota;
2380 org_pkts_to_process = nic->pkts_to_process;
2382 val64 = readq(&bar0->rx_traffic_int);
2383 writeq(val64, &bar0->rx_traffic_int);
2385 for (i = 0; i < config->rx_ring_num; i++) {
2386 rx_intr_handler(&mac_control->rings[i]);
2387 pkt_cnt = org_pkts_to_process - nic->pkts_to_process;
2388 if (!nic->pkts_to_process) {
2389 /* Quota for the current iteration has been met */
2396 dev->quota -= pkt_cnt;
2398 netif_rx_complete(dev);
2400 for (i = 0; i < config->rx_ring_num; i++) {
2401 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2402 DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2403 DBG_PRINT(ERR_DBG, " in Rx Poll!!\n");
2407 /* Re enable the Rx interrupts. */
2408 en_dis_able_nic_intrs(nic, RX_TRAFFIC_INTR, ENABLE_INTRS);
2409 atomic_dec(&nic->isr_cnt);
2413 dev->quota -= pkt_cnt;
2416 for (i = 0; i < config->rx_ring_num; i++) {
2417 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2418 DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2419 DBG_PRINT(ERR_DBG, " in Rx Poll!!\n");
2423 atomic_dec(&nic->isr_cnt);
2429 * rx_intr_handler - Rx interrupt handler
2430 * @nic: device private variable.
2432 * If the interrupt is because of a received frame or if the
2433 * receive ring contains fresh as yet un-processed frames,this function is
2434 * called. It picks out the RxD at which place the last Rx processing had
2435 * stopped and sends the skb to the OSM's Rx handler and then increments
2440 static void rx_intr_handler(ring_info_t *ring_data)
2442 nic_t *nic = ring_data->nic;
2443 struct net_device *dev = (struct net_device *) nic->dev;
2444 int get_block, get_offset, put_block, put_offset, ring_bufs;
2445 rx_curr_get_info_t get_info, put_info;
2447 struct sk_buff *skb;
2448 #ifndef CONFIG_S2IO_NAPI
2451 spin_lock(&nic->rx_lock);
2452 if (atomic_read(&nic->card_state) == CARD_DOWN) {
2453 DBG_PRINT(ERR_DBG, "%s: %s going down for reset\n",
2454 __FUNCTION__, dev->name);
2455 spin_unlock(&nic->rx_lock);
2458 get_info = ring_data->rx_curr_get_info;
2459 get_block = get_info.block_index;
2460 put_info = ring_data->rx_curr_put_info;
2461 put_block = put_info.block_index;
2462 ring_bufs = get_info.ring_len+1;
2463 rxdp = ring_data->rx_blocks[get_block].block_virt_addr +
2465 get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
2467 #ifndef CONFIG_S2IO_NAPI
2468 spin_lock(&nic->put_lock);
2469 put_offset = ring_data->put_pos;
2470 spin_unlock(&nic->put_lock);
2472 put_offset = (put_block * (MAX_RXDS_PER_BLOCK + 1)) +
2475 while (RXD_IS_UP2DT(rxdp) &&
2476 (((get_offset + 1) % ring_bufs) != put_offset)) {
2477 skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control);
2479 DBG_PRINT(ERR_DBG, "%s: The skb is ",
2481 DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
2482 spin_unlock(&nic->rx_lock);
2485 #ifndef CONFIG_2BUFF_MODE
2486 pci_unmap_single(nic->pdev, (dma_addr_t)
2489 HEADER_ETHERNET_II_802_3_SIZE +
2492 PCI_DMA_FROMDEVICE);
2494 pci_unmap_single(nic->pdev, (dma_addr_t)
2496 BUF0_LEN, PCI_DMA_FROMDEVICE);
2497 pci_unmap_single(nic->pdev, (dma_addr_t)
2499 BUF1_LEN, PCI_DMA_FROMDEVICE);
2500 pci_unmap_single(nic->pdev, (dma_addr_t)
2502 dev->mtu + BUF0_LEN + 4,
2503 PCI_DMA_FROMDEVICE);
2505 rx_osm_handler(ring_data, rxdp);
2507 ring_data->rx_curr_get_info.offset =
2509 rxdp = ring_data->rx_blocks[get_block].block_virt_addr +
2511 if (get_info.offset &&
2512 (!(get_info.offset % MAX_RXDS_PER_BLOCK))) {
2513 get_info.offset = 0;
2514 ring_data->rx_curr_get_info.offset
2517 get_block %= ring_data->block_count;
2518 ring_data->rx_curr_get_info.block_index
2520 rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
2523 get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
2525 #ifdef CONFIG_S2IO_NAPI
2526 nic->pkts_to_process -= 1;
2527 if (!nic->pkts_to_process)
2531 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
2535 spin_unlock(&nic->rx_lock);
2539 * tx_intr_handler - Transmit interrupt handler
2540 * @nic : device private variable
2542 * If an interrupt was raised to indicate DMA complete of the
2543 * Tx packet, this function is called. It identifies the last TxD
2544 * whose buffer was freed and frees all skbs whose data have already
2545 * DMA'ed into the NICs internal memory.
2550 static void tx_intr_handler(fifo_info_t *fifo_data)
2552 nic_t *nic = fifo_data->nic;
2553 struct net_device *dev = (struct net_device *) nic->dev;
2554 tx_curr_get_info_t get_info, put_info;
2555 struct sk_buff *skb;
2559 get_info = fifo_data->tx_curr_get_info;
2560 put_info = fifo_data->tx_curr_put_info;
2561 txdlp = (TxD_t *) fifo_data->list_info[get_info.offset].
2563 while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
2564 (get_info.offset != put_info.offset) &&
2565 (txdlp->Host_Control)) {
2566 /* Check for TxD errors */
2567 if (txdlp->Control_1 & TXD_T_CODE) {
2568 unsigned long long err;
2569 err = txdlp->Control_1 & TXD_T_CODE;
2570 DBG_PRINT(ERR_DBG, "***TxD error %llx\n",
2574 skb = (struct sk_buff *) ((unsigned long)
2575 txdlp->Host_Control);
2577 DBG_PRINT(ERR_DBG, "%s: Null skb ",
2579 DBG_PRINT(ERR_DBG, "in Tx Free Intr\n");
2583 frg_cnt = skb_shinfo(skb)->nr_frags;
2584 nic->tx_pkt_count++;
2586 pci_unmap_single(nic->pdev, (dma_addr_t)
2587 txdlp->Buffer_Pointer,
2588 skb->len - skb->data_len,
2594 for (j = 0; j < frg_cnt; j++, txdlp++) {
2596 &skb_shinfo(skb)->frags[j];
2597 pci_unmap_page(nic->pdev,
2607 (sizeof(TxD_t) * fifo_data->max_txds));
2609 /* Updating the statistics block */
2610 nic->stats.tx_bytes += skb->len;
2611 dev_kfree_skb_irq(skb);
2614 get_info.offset %= get_info.fifo_len + 1;
2615 txdlp = (TxD_t *) fifo_data->list_info
2616 [get_info.offset].list_virt_addr;
2617 fifo_data->tx_curr_get_info.offset =
2621 spin_lock(&nic->tx_lock);
2622 if (netif_queue_stopped(dev))
2623 netif_wake_queue(dev);
2624 spin_unlock(&nic->tx_lock);
2628 * alarm_intr_handler - Alarm Interrrupt handler
2629 * @nic: device private variable
2630 * Description: If the interrupt was neither because of Rx packet or Tx
2631 * complete, this function is called. If the interrupt was to indicate
2632 * a loss of link, the OSM link status handler is invoked for any other
2633 * alarm interrupt the block that raised the interrupt is displayed
2634 * and a H/W reset is issued.
2639 static void alarm_intr_handler(struct s2io_nic *nic)
2641 struct net_device *dev = (struct net_device *) nic->dev;
2642 XENA_dev_config_t __iomem *bar0 = nic->bar0;
2643 register u64 val64 = 0, err_reg = 0;
2645 /* Handling link status change error Intr */
2646 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
2647 err_reg = readq(&bar0->mac_rmac_err_reg);
2648 writeq(err_reg, &bar0->mac_rmac_err_reg);
2649 if (err_reg & RMAC_LINK_STATE_CHANGE_INT) {
2650 schedule_work(&nic->set_link_task);
2654 /* Handling Ecc errors */
2655 val64 = readq(&bar0->mc_err_reg);
2656 writeq(val64, &bar0->mc_err_reg);
2657 if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
2658 if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
2659 nic->mac_control.stats_info->sw_stat.
2661 DBG_PRINT(ERR_DBG, "%s: Device indicates ",
2663 DBG_PRINT(ERR_DBG, "double ECC error!!\n");
2664 netif_stop_queue(dev);
2665 schedule_work(&nic->rst_timer_task);
2667 nic->mac_control.stats_info->sw_stat.
2672 /* In case of a serious error, the device will be Reset. */
2673 val64 = readq(&bar0->serr_source);
2674 if (val64 & SERR_SOURCE_ANY) {
2675 DBG_PRINT(ERR_DBG, "%s: Device indicates ", dev->name);
2676 DBG_PRINT(ERR_DBG, "serious error!!\n");
2677 netif_stop_queue(dev);
2678 schedule_work(&nic->rst_timer_task);
2682 * Also as mentioned in the latest Errata sheets if the PCC_FB_ECC
2683 * Error occurs, the adapter will be recycled by disabling the
2684 * adapter enable bit and enabling it again after the device
2685 * becomes Quiescent.
2687 val64 = readq(&bar0->pcc_err_reg);
2688 writeq(val64, &bar0->pcc_err_reg);
2689 if (val64 & PCC_FB_ECC_DB_ERR) {
2690 u64 ac = readq(&bar0->adapter_control);
2691 ac &= ~(ADAPTER_CNTL_EN);
2692 writeq(ac, &bar0->adapter_control);
2693 ac = readq(&bar0->adapter_control);
2694 schedule_work(&nic->set_link_task);
2697 /* Other type of interrupts are not being handled now, TODO */
2701 * wait_for_cmd_complete - waits for a command to complete.
2702 * @sp : private member of the device structure, which is a pointer to the
2703 * s2io_nic structure.
2704 * Description: Function that waits for a command to Write into RMAC
2705 * ADDR DATA registers to be completed and returns either success or
2706 * error depending on whether the command was complete or not.
2708 * SUCCESS on success and FAILURE on failure.
2711 int wait_for_cmd_complete(nic_t * sp)
2713 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2714 int ret = FAILURE, cnt = 0;
2718 val64 = readq(&bar0->rmac_addr_cmd_mem);
2719 if (!(val64 & RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING)) {
2732 * s2io_reset - Resets the card.
2733 * @sp : private member of the device structure.
2734 * Description: Function to Reset the card. This function then also
2735 * restores the previously saved PCI configuration space registers as
2736 * the card reset also resets the configuration space.
2741 void s2io_reset(nic_t * sp)
2743 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2747 val64 = SW_RESET_ALL;
2748 writeq(val64, &bar0->sw_reset);
2751 * At this stage, if the PCI write is indeed completed, the
2752 * card is reset and so is the PCI Config space of the device.
2753 * So a read cannot be issued at this stage on any of the
2754 * registers to ensure the write into "sw_reset" register
2756 * Question: Is there any system call that will explicitly force
2757 * all the write commands still pending on the bus to be pushed
2759 * As of now I'am just giving a 250ms delay and hoping that the
2760 * PCI write to sw_reset register is done by this time.
2764 if (!(sp->device_type & XFRAME_II_DEVICE)) {
2765 /* Restore the PCI state saved during initializarion. */
2766 pci_restore_state(sp->pdev);
2768 pci_set_master(sp->pdev);
2774 /* Set swapper to enable I/O register access */
2775 s2io_set_swapper(sp);
2777 /* Clear certain PCI/PCI-X fields after reset */
2778 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
2779 pci_cmd &= 0x7FFF; /* Clear parity err detect bit */
2780 pci_write_config_word(sp->pdev, PCI_COMMAND, pci_cmd);
2782 val64 = readq(&bar0->txpic_int_reg);
2783 val64 &= ~BIT(62); /* Clearing PCI_STATUS error reflected here */
2784 writeq(val64, &bar0->txpic_int_reg);
2786 /* Clearing PCIX Ecc status register */
2787 pci_write_config_dword(sp->pdev, 0x68, 0);
2789 /* Reset device statistics maintained by OS */
2790 memset(&sp->stats, 0, sizeof (struct net_device_stats));
2792 /* SXE-002: Configure link and activity LED to turn it off */
2793 subid = sp->pdev->subsystem_device;
2794 if (((subid & 0xFF) >= 0x07) &&
2795 (sp->device_type == XFRAME_I_DEVICE)) {
2796 val64 = readq(&bar0->gpio_control);
2797 val64 |= 0x0000800000000000ULL;
2798 writeq(val64, &bar0->gpio_control);
2799 val64 = 0x0411040400000000ULL;
2800 writeq(val64, (void __iomem *) ((u8 *) bar0 + 0x2700));
2804 * Clear spurious ECC interrupts that would have occured on
2805 * XFRAME II cards after reset.
2807 if (sp->device_type == XFRAME_II_DEVICE) {
2808 val64 = readq(&bar0->pcc_err_reg);
2809 writeq(val64, &bar0->pcc_err_reg);
2812 sp->device_enabled_once = FALSE;
2816 * s2io_set_swapper - to set the swapper controle on the card
2817 * @sp : private member of the device structure,
2818 * pointer to the s2io_nic structure.
2819 * Description: Function to set the swapper control on the card
2820 * correctly depending on the 'endianness' of the system.
2822 * SUCCESS on success and FAILURE on failure.
2825 int s2io_set_swapper(nic_t * sp)
2827 struct net_device *dev = sp->dev;
2828 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2829 u64 val64, valt, valr;
2832 * Set proper endian settings and verify the same by reading
2833 * the PIF Feed-back register.
2836 val64 = readq(&bar0->pif_rd_swapper_fb);
2837 if (val64 != 0x0123456789ABCDEFULL) {
2839 u64 value[] = { 0xC30000C3C30000C3ULL, /* FE=1, SE=1 */
2840 0x8100008181000081ULL, /* FE=1, SE=0 */
2841 0x4200004242000042ULL, /* FE=0, SE=1 */
2842 0}; /* FE=0, SE=0 */
2845 writeq(value[i], &bar0->swapper_ctrl);
2846 val64 = readq(&bar0->pif_rd_swapper_fb);
2847 if (val64 == 0x0123456789ABCDEFULL)
2852 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
2854 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
2855 (unsigned long long) val64);
2860 valr = readq(&bar0->swapper_ctrl);
2863 valt = 0x0123456789ABCDEFULL;
2864 writeq(valt, &bar0->xmsi_address);
2865 val64 = readq(&bar0->xmsi_address);
2869 u64 value[] = { 0x00C3C30000C3C300ULL, /* FE=1, SE=1 */
2870 0x0081810000818100ULL, /* FE=1, SE=0 */
2871 0x0042420000424200ULL, /* FE=0, SE=1 */
2872 0}; /* FE=0, SE=0 */
2875 writeq((value[i] | valr), &bar0->swapper_ctrl);
2876 writeq(valt, &bar0->xmsi_address);
2877 val64 = readq(&bar0->xmsi_address);
2883 unsigned long long x = val64;
2884 DBG_PRINT(ERR_DBG, "Write failed, Xmsi_addr ");
2885 DBG_PRINT(ERR_DBG, "reads:0x%llx\n", x);
2889 val64 = readq(&bar0->swapper_ctrl);
2890 val64 &= 0xFFFF000000000000ULL;
2894 * The device by default set to a big endian format, so a
2895 * big endian driver need not set anything.
2897 val64 |= (SWAPPER_CTRL_TXP_FE |
2898 SWAPPER_CTRL_TXP_SE |
2899 SWAPPER_CTRL_TXD_R_FE |
2900 SWAPPER_CTRL_TXD_W_FE |
2901 SWAPPER_CTRL_TXF_R_FE |
2902 SWAPPER_CTRL_RXD_R_FE |
2903 SWAPPER_CTRL_RXD_W_FE |
2904 SWAPPER_CTRL_RXF_W_FE |
2905 SWAPPER_CTRL_XMSI_FE |
2906 SWAPPER_CTRL_XMSI_SE |
2907 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
2908 writeq(val64, &bar0->swapper_ctrl);
2911 * Initially we enable all bits to make it accessible by the
2912 * driver, then we selectively enable only those bits that
2915 val64 |= (SWAPPER_CTRL_TXP_FE |
2916 SWAPPER_CTRL_TXP_SE |
2917 SWAPPER_CTRL_TXD_R_FE |
2918 SWAPPER_CTRL_TXD_R_SE |
2919 SWAPPER_CTRL_TXD_W_FE |
2920 SWAPPER_CTRL_TXD_W_SE |
2921 SWAPPER_CTRL_TXF_R_FE |
2922 SWAPPER_CTRL_RXD_R_FE |
2923 SWAPPER_CTRL_RXD_R_SE |
2924 SWAPPER_CTRL_RXD_W_FE |
2925 SWAPPER_CTRL_RXD_W_SE |
2926 SWAPPER_CTRL_RXF_W_FE |
2927 SWAPPER_CTRL_XMSI_FE |
2928 SWAPPER_CTRL_XMSI_SE |
2929 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
2930 writeq(val64, &bar0->swapper_ctrl);
2932 val64 = readq(&bar0->swapper_ctrl);
2935 * Verifying if endian settings are accurate by reading a
2936 * feedback register.
2938 val64 = readq(&bar0->pif_rd_swapper_fb);
2939 if (val64 != 0x0123456789ABCDEFULL) {
2940 /* Endian settings are incorrect, calls for another dekko. */
2941 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
2943 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
2944 (unsigned long long) val64);
2951 /* ********************************************************* *
2952 * Functions defined below concern the OS part of the driver *
2953 * ********************************************************* */
2956 * s2io_open - open entry point of the driver
2957 * @dev : pointer to the device structure.
2959 * This function is the open entry point of the driver. It mainly calls a
2960 * function to allocate Rx buffers and inserts them into the buffer
2961 * descriptors and then enables the Rx part of the NIC.
2963 * 0 on success and an appropriate (-)ve integer as defined in errno.h
2967 int s2io_open(struct net_device *dev)
2969 nic_t *sp = dev->priv;
2973 * Make sure you have link off by default every time
2974 * Nic is initialized
2976 netif_carrier_off(dev);
2977 sp->last_link_state = LINK_DOWN;
2979 /* Initialize H/W and enable interrupts */
2980 if (s2io_card_up(sp)) {
2981 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
2984 goto hw_init_failed;
2987 /* After proper initialization of H/W, register ISR */
2988 err = request_irq((int) sp->pdev->irq, s2io_isr, SA_SHIRQ,
2991 DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
2993 goto isr_registration_failed;
2996 if (s2io_set_mac_addr(dev, dev->dev_addr) == FAILURE) {
2997 DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
2999 goto setting_mac_address_failed;
3002 netif_start_queue(dev);
3005 setting_mac_address_failed:
3006 free_irq(sp->pdev->irq, dev);
3007 isr_registration_failed:
3008 del_timer_sync(&sp->alarm_timer);
3015 * s2io_close -close entry point of the driver
3016 * @dev : device pointer.
3018 * This is the stop entry point of the driver. It needs to undo exactly
3019 * whatever was done by the open entry point,thus it's usually referred to
3020 * as the close function.Among other things this function mainly stops the
3021 * Rx side of the NIC and frees all the Rx buffers in the Rx rings.
3023 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3027 int s2io_close(struct net_device *dev)
3029 nic_t *sp = dev->priv;
3030 flush_scheduled_work();
3031 netif_stop_queue(dev);
3032 /* Reset card, kill tasklet and free Tx and Rx buffers. */
3035 free_irq(sp->pdev->irq, dev);
3036 sp->device_close_flag = TRUE; /* Device is shut down. */
3041 * s2io_xmit - Tx entry point of te driver
3042 * @skb : the socket buffer containing the Tx data.
3043 * @dev : device pointer.
3045 * This function is the Tx entry point of the driver. S2IO NIC supports
3046 * certain protocol assist features on Tx side, namely CSO, S/G, LSO.
3047 * NOTE: when device cant queue the pkt,just the trans_start variable will
3050 * 0 on success & 1 on failure.
3053 int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
3055 nic_t *sp = dev->priv;
3056 u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
3059 TxFIFO_element_t __iomem *tx_fifo;
3060 unsigned long flags;
3065 int vlan_priority = 0;
3066 mac_info_t *mac_control;
3067 struct config_param *config;
3069 mac_control = &sp->mac_control;
3070 config = &sp->config;
3072 DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
3073 spin_lock_irqsave(&sp->tx_lock, flags);
3074 if (atomic_read(&sp->card_state) == CARD_DOWN) {
3075 DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
3077 spin_unlock_irqrestore(&sp->tx_lock, flags);
3084 /* Get Fifo number to Transmit based on vlan priority */
3085 if (sp->vlgrp && vlan_tx_tag_present(skb)) {
3086 vlan_tag = vlan_tx_tag_get(skb);
3087 vlan_priority = vlan_tag >> 13;
3088 queue = config->fifo_mapping[vlan_priority];
3091 put_off = (u16) mac_control->fifos[queue].tx_curr_put_info.offset;
3092 get_off = (u16) mac_control->fifos[queue].tx_curr_get_info.offset;
3093 txdp = (TxD_t *) mac_control->fifos[queue].list_info[put_off].
3096 queue_len = mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1;
3097 /* Avoid "put" pointer going beyond "get" pointer */
3098 if (txdp->Host_Control || (((put_off + 1) % queue_len) == get_off)) {
3099 DBG_PRINT(ERR_DBG, "Error in xmit, No free TXDs.\n");
3100 netif_stop_queue(dev);
3102 spin_unlock_irqrestore(&sp->tx_lock, flags);
3106 mss = skb_shinfo(skb)->tso_size;
3108 txdp->Control_1 |= TXD_TCP_LSO_EN;
3109 txdp->Control_1 |= TXD_TCP_LSO_MSS(mss);
3113 frg_cnt = skb_shinfo(skb)->nr_frags;
3114 frg_len = skb->len - skb->data_len;
3116 txdp->Buffer_Pointer = pci_map_single
3117 (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE);
3118 txdp->Host_Control = (unsigned long) skb;
3119 if (skb->ip_summed == CHECKSUM_HW) {
3121 (TXD_TX_CKO_IPV4_EN | TXD_TX_CKO_TCP_EN |
3125 txdp->Control_2 |= config->tx_intr_type;
3127 if (sp->vlgrp && vlan_tx_tag_present(skb)) {
3128 txdp->Control_2 |= TXD_VLAN_ENABLE;
3129 txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
3132 txdp->Control_1 |= (TXD_BUFFER0_SIZE(frg_len) |
3133 TXD_GATHER_CODE_FIRST);
3134 txdp->Control_1 |= TXD_LIST_OWN_XENA;
3136 /* For fragmented SKB. */
3137 for (i = 0; i < frg_cnt; i++) {
3138 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3140 txdp->Buffer_Pointer = (u64) pci_map_page
3141 (sp->pdev, frag->page, frag->page_offset,
3142 frag->size, PCI_DMA_TODEVICE);
3143 txdp->Control_1 |= TXD_BUFFER0_SIZE(frag->size);
3145 txdp->Control_1 |= TXD_GATHER_CODE_LAST;
3147 tx_fifo = mac_control->tx_FIFO_start[queue];
3148 val64 = mac_control->fifos[queue].list_info[put_off].list_phy_addr;
3149 writeq(val64, &tx_fifo->TxDL_Pointer);
3153 val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
3158 val64 |= TX_FIFO_SPECIAL_FUNC;
3160 writeq(val64, &tx_fifo->List_Control);
3163 put_off %= mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1;
3164 mac_control->fifos[queue].tx_curr_put_info.offset = put_off;
3166 /* Avoid "put" pointer going beyond "get" pointer */
3167 if (((put_off + 1) % queue_len) == get_off) {
3169 "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
3171 netif_stop_queue(dev);
3174 dev->trans_start = jiffies;
3175 spin_unlock_irqrestore(&sp->tx_lock, flags);
3181 s2io_alarm_handle(unsigned long data)
3183 nic_t *sp = (nic_t *)data;
3185 alarm_intr_handler(sp);
3186 mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
3189 static void s2io_txpic_intr_handle(nic_t *sp)
3191 XENA_dev_config_t *bar0 = (XENA_dev_config_t *) sp->bar0;
3194 val64 = readq(&bar0->pic_int_status);
3195 if (val64 & PIC_INT_GPIO) {
3196 val64 = readq(&bar0->gpio_int_reg);
3197 if ((val64 & GPIO_INT_REG_LINK_DOWN) &&
3198 (val64 & GPIO_INT_REG_LINK_UP)) {
3199 val64 |= GPIO_INT_REG_LINK_DOWN;
3200 val64 |= GPIO_INT_REG_LINK_UP;
3201 writeq(val64, &bar0->gpio_int_reg);
3205 if (((sp->last_link_state == LINK_UP) &&
3206 (val64 & GPIO_INT_REG_LINK_DOWN)) ||
3207 ((sp->last_link_state == LINK_DOWN) &&
3208 (val64 & GPIO_INT_REG_LINK_UP))) {
3209 val64 = readq(&bar0->gpio_int_mask);
3210 val64 |= GPIO_INT_MASK_LINK_DOWN;
3211 val64 |= GPIO_INT_MASK_LINK_UP;
3212 writeq(val64, &bar0->gpio_int_mask);
3213 s2io_set_link((unsigned long)sp);
3216 if (sp->last_link_state == LINK_UP) {
3217 /*enable down interrupt */
3218 val64 = readq(&bar0->gpio_int_mask);
3219 /* unmasks link down intr */
3220 val64 &= ~GPIO_INT_MASK_LINK_DOWN;
3221 /* masks link up intr */
3222 val64 |= GPIO_INT_MASK_LINK_UP;
3223 writeq(val64, &bar0->gpio_int_mask);
3225 /*enable UP Interrupt */
3226 val64 = readq(&bar0->gpio_int_mask);
3227 /* unmasks link up interrupt */
3228 val64 &= ~GPIO_INT_MASK_LINK_UP;
3229 /* masks link down interrupt */
3230 val64 |= GPIO_INT_MASK_LINK_DOWN;
3231 writeq(val64, &bar0->gpio_int_mask);
3237 * s2io_isr - ISR handler of the device .
3238 * @irq: the irq of the device.
3239 * @dev_id: a void pointer to the dev structure of the NIC.
3240 * @pt_regs: pointer to the registers pushed on the stack.
3241 * Description: This function is the ISR handler of the device. It
3242 * identifies the reason for the interrupt and calls the relevant
3243 * service routines. As a contongency measure, this ISR allocates the
3244 * recv buffers, if their numbers are below the panic value which is
3245 * presently set to 25% of the original number of rcv buffers allocated.
3247 * IRQ_HANDLED: will be returned if IRQ was handled by this routine
3248 * IRQ_NONE: will be returned if interrupt is not from our device
3250 static irqreturn_t s2io_isr(int irq, void *dev_id, struct pt_regs *regs)
3252 struct net_device *dev = (struct net_device *) dev_id;
3253 nic_t *sp = dev->priv;
3254 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3256 u64 reason = 0, val64;
3257 mac_info_t *mac_control;
3258 struct config_param *config;
3260 atomic_inc(&sp->isr_cnt);
3261 mac_control = &sp->mac_control;
3262 config = &sp->config;
3265 * Identify the cause for interrupt and call the appropriate
3266 * interrupt handler. Causes for the interrupt could be;
3270 * 4. Error in any functional blocks of the NIC.
3272 reason = readq(&bar0->general_int_status);
3275 /* The interrupt was not raised by Xena. */
3276 atomic_dec(&sp->isr_cnt);
3280 #ifdef CONFIG_S2IO_NAPI
3281 if (reason & GEN_INTR_RXTRAFFIC) {
3282 if (netif_rx_schedule_prep(dev)) {
3283 en_dis_able_nic_intrs(sp, RX_TRAFFIC_INTR,
3285 __netif_rx_schedule(dev);
3289 /* If Intr is because of Rx Traffic */
3290 if (reason & GEN_INTR_RXTRAFFIC) {
3292 * rx_traffic_int reg is an R1 register, writing all 1's
3293 * will ensure that the actual interrupt causing bit get's
3294 * cleared and hence a read can be avoided.
3296 val64 = 0xFFFFFFFFFFFFFFFFULL;
3297 writeq(val64, &bar0->rx_traffic_int);
3298 for (i = 0; i < config->rx_ring_num; i++) {
3299 rx_intr_handler(&mac_control->rings[i]);
3304 /* If Intr is because of Tx Traffic */
3305 if (reason & GEN_INTR_TXTRAFFIC) {
3307 * tx_traffic_int reg is an R1 register, writing all 1's
3308 * will ensure that the actual interrupt causing bit get's
3309 * cleared and hence a read can be avoided.
3311 val64 = 0xFFFFFFFFFFFFFFFFULL;
3312 writeq(val64, &bar0->tx_traffic_int);
3314 for (i = 0; i < config->tx_fifo_num; i++)
3315 tx_intr_handler(&mac_control->fifos[i]);
3318 if (reason & GEN_INTR_TXPIC)
3319 s2io_txpic_intr_handle(sp);
3321 * If the Rx buffer count is below the panic threshold then
3322 * reallocate the buffers from the interrupt handler itself,
3323 * else schedule a tasklet to reallocate the buffers.
3325 #ifndef CONFIG_S2IO_NAPI
3326 for (i = 0; i < config->rx_ring_num; i++) {
3328 int rxb_size = atomic_read(&sp->rx_bufs_left[i]);
3329 int level = rx_buffer_level(sp, rxb_size, i);
3331 if ((level == PANIC) && (!TASKLET_IN_USE)) {
3332 DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", dev->name);
3333 DBG_PRINT(INTR_DBG, "PANIC levels\n");
3334 if ((ret = fill_rx_buffers(sp, i)) == -ENOMEM) {
3335 DBG_PRINT(ERR_DBG, "%s:Out of memory",
3337 DBG_PRINT(ERR_DBG, " in ISR!!\n");
3338 clear_bit(0, (&sp->tasklet_status));
3339 atomic_dec(&sp->isr_cnt);
3342 clear_bit(0, (&sp->tasklet_status));
3343 } else if (level == LOW) {
3344 tasklet_schedule(&sp->task);
3349 atomic_dec(&sp->isr_cnt);
3356 static void s2io_updt_stats(nic_t *sp)
3358 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3362 if (atomic_read(&sp->card_state) == CARD_UP) {
3363 /* Apprx 30us on a 133 MHz bus */
3364 val64 = SET_UPDT_CLICKS(10) |
3365 STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN;
3366 writeq(val64, &bar0->stat_cfg);
3369 val64 = readq(&bar0->stat_cfg);
3370 if (!(val64 & BIT(0)))
3374 break; /* Updt failed */
3380 * s2io_get_stats - Updates the device statistics structure.
3381 * @dev : pointer to the device structure.
3383 * This function updates the device statistics structure in the s2io_nic
3384 * structure and returns a pointer to the same.
3386 * pointer to the updated net_device_stats structure.
3389 struct net_device_stats *s2io_get_stats(struct net_device *dev)
3391 nic_t *sp = dev->priv;
3392 mac_info_t *mac_control;
3393 struct config_param *config;
3396 mac_control = &sp->mac_control;
3397 config = &sp->config;
3399 /* Configure Stats for immediate updt */
3400 s2io_updt_stats(sp);
3402 sp->stats.tx_packets =
3403 le32_to_cpu(mac_control->stats_info->tmac_frms);
3404 sp->stats.tx_errors =
3405 le32_to_cpu(mac_control->stats_info->tmac_any_err_frms);
3406 sp->stats.rx_errors =
3407 le32_to_cpu(mac_control->stats_info->rmac_drop_frms);
3408 sp->stats.multicast =
3409 le32_to_cpu(mac_control->stats_info->rmac_vld_mcst_frms);
3410 sp->stats.rx_length_errors =
3411 le32_to_cpu(mac_control->stats_info->rmac_long_frms);
3413 return (&sp->stats);
3417 * s2io_set_multicast - entry point for multicast address enable/disable.
3418 * @dev : pointer to the device structure
3420 * This function is a driver entry point which gets called by the kernel
3421 * whenever multicast addresses must be enabled/disabled. This also gets
3422 * called to set/reset promiscuous mode. Depending on the deivce flag, we
3423 * determine, if multicast address must be enabled or if promiscuous mode
3424 * is to be disabled etc.
3429 static void s2io_set_multicast(struct net_device *dev)
3432 struct dev_mc_list *mclist;
3433 nic_t *sp = dev->priv;
3434 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3435 u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
3437 u64 dis_addr = 0xffffffffffffULL, mac_addr = 0;
3440 if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
3441 /* Enable all Multicast addresses */
3442 writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
3443 &bar0->rmac_addr_data0_mem);
3444 writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
3445 &bar0->rmac_addr_data1_mem);
3446 val64 = RMAC_ADDR_CMD_MEM_WE |
3447 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3448 RMAC_ADDR_CMD_MEM_OFFSET(MAC_MC_ALL_MC_ADDR_OFFSET);
3449 writeq(val64, &bar0->rmac_addr_cmd_mem);
3450 /* Wait till command completes */
3451 wait_for_cmd_complete(sp);
3454 sp->all_multi_pos = MAC_MC_ALL_MC_ADDR_OFFSET;
3455 } else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
3456 /* Disable all Multicast addresses */
3457 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
3458 &bar0->rmac_addr_data0_mem);
3459 writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
3460 &bar0->rmac_addr_data1_mem);
3461 val64 = RMAC_ADDR_CMD_MEM_WE |
3462 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3463 RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
3464 writeq(val64, &bar0->rmac_addr_cmd_mem);
3465 /* Wait till command completes */
3466 wait_for_cmd_complete(sp);
3469 sp->all_multi_pos = 0;
3472 if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
3473 /* Put the NIC into promiscuous mode */
3474 add = &bar0->mac_cfg;
3475 val64 = readq(&bar0->mac_cfg);
3476 val64 |= MAC_CFG_RMAC_PROM_ENABLE;
3478 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
3479 writel((u32) val64, add);
3480 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
3481 writel((u32) (val64 >> 32), (add + 4));
3483 val64 = readq(&bar0->mac_cfg);
3484 sp->promisc_flg = 1;
3485 DBG_PRINT(ERR_DBG, "%s: entered promiscuous mode\n",
3487 } else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
3488 /* Remove the NIC from promiscuous mode */
3489 add = &bar0->mac_cfg;
3490 val64 = readq(&bar0->mac_cfg);
3491 val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
3493 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
3494 writel((u32) val64, add);
3495 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
3496 writel((u32) (val64 >> 32), (add + 4));
3498 val64 = readq(&bar0->mac_cfg);
3499 sp->promisc_flg = 0;
3500 DBG_PRINT(ERR_DBG, "%s: left promiscuous mode\n",
3504 /* Update individual M_CAST address list */
3505 if ((!sp->m_cast_flg) && dev->mc_count) {
3507 (MAX_ADDRS_SUPPORTED - MAC_MC_ADDR_START_OFFSET - 1)) {
3508 DBG_PRINT(ERR_DBG, "%s: No more Rx filters ",
3510 DBG_PRINT(ERR_DBG, "can be added, please enable ");
3511 DBG_PRINT(ERR_DBG, "ALL_MULTI instead\n");
3515 prev_cnt = sp->mc_addr_count;
3516 sp->mc_addr_count = dev->mc_count;
3518 /* Clear out the previous list of Mc in the H/W. */
3519 for (i = 0; i < prev_cnt; i++) {
3520 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
3521 &bar0->rmac_addr_data0_mem);
3522 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
3523 &bar0->rmac_addr_data1_mem);
3524 val64 = RMAC_ADDR_CMD_MEM_WE |
3525 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3526 RMAC_ADDR_CMD_MEM_OFFSET
3527 (MAC_MC_ADDR_START_OFFSET + i);
3528 writeq(val64, &bar0->rmac_addr_cmd_mem);
3530 /* Wait for command completes */
3531 if (wait_for_cmd_complete(sp)) {
3532 DBG_PRINT(ERR_DBG, "%s: Adding ",
3534 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
3539 /* Create the new Rx filter list and update the same in H/W. */
3540 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
3541 i++, mclist = mclist->next) {
3542 memcpy(sp->usr_addrs[i].addr, mclist->dmi_addr,
3544 for (j = 0; j < ETH_ALEN; j++) {
3545 mac_addr |= mclist->dmi_addr[j];
3549 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
3550 &bar0->rmac_addr_data0_mem);
3551 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
3552 &bar0->rmac_addr_data1_mem);
3553 val64 = RMAC_ADDR_CMD_MEM_WE |
3554 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3555 RMAC_ADDR_CMD_MEM_OFFSET
3556 (i + MAC_MC_ADDR_START_OFFSET);
3557 writeq(val64, &bar0->rmac_addr_cmd_mem);
3559 /* Wait for command completes */
3560 if (wait_for_cmd_complete(sp)) {
3561 DBG_PRINT(ERR_DBG, "%s: Adding ",
3563 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
3571 * s2io_set_mac_addr - Programs the Xframe mac address
3572 * @dev : pointer to the device structure.
3573 * @addr: a uchar pointer to the new mac address which is to be set.
3574 * Description : This procedure will program the Xframe to receive
3575 * frames with new Mac Address
3576 * Return value: SUCCESS on success and an appropriate (-)ve integer
3577 * as defined in errno.h file on failure.
3580 int s2io_set_mac_addr(struct net_device *dev, u8 * addr)
3582 nic_t *sp = dev->priv;
3583 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3584 register u64 val64, mac_addr = 0;
3588 * Set the new MAC address as the new unicast filter and reflect this
3589 * change on the device address registered with the OS. It will be
3592 for (i = 0; i < ETH_ALEN; i++) {
3594 mac_addr |= addr[i];
3597 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
3598 &bar0->rmac_addr_data0_mem);
3601 RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3602 RMAC_ADDR_CMD_MEM_OFFSET(0);
3603 writeq(val64, &bar0->rmac_addr_cmd_mem);
3604 /* Wait till command completes */
3605 if (wait_for_cmd_complete(sp)) {
3606 DBG_PRINT(ERR_DBG, "%s: set_mac_addr failed\n", dev->name);
3614 * s2io_ethtool_sset - Sets different link parameters.
3615 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
3616 * @info: pointer to the structure with parameters given by ethtool to set
3619 * The function sets different link parameters provided by the user onto
3625 static int s2io_ethtool_sset(struct net_device *dev,
3626 struct ethtool_cmd *info)
3628 nic_t *sp = dev->priv;
3629 if ((info->autoneg == AUTONEG_ENABLE) ||
3630 (info->speed != SPEED_10000) || (info->duplex != DUPLEX_FULL))
3633 s2io_close(sp->dev);
3641 * s2io_ethtol_gset - Return link specific information.
3642 * @sp : private member of the device structure, pointer to the
3643 * s2io_nic structure.
3644 * @info : pointer to the structure with parameters given by ethtool
3645 * to return link information.
3647 * Returns link specific information like speed, duplex etc.. to ethtool.
3649 * return 0 on success.
3652 static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
3654 nic_t *sp = dev->priv;
3655 info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
3656 info->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
3657 info->port = PORT_FIBRE;
3658 /* info->transceiver?? TODO */
3660 if (netif_carrier_ok(sp->dev)) {
3661 info->speed = 10000;
3662 info->duplex = DUPLEX_FULL;
3668 info->autoneg = AUTONEG_DISABLE;
3673 * s2io_ethtool_gdrvinfo - Returns driver specific information.
3674 * @sp : private member of the device structure, which is a pointer to the
3675 * s2io_nic structure.
3676 * @info : pointer to the structure with parameters given by ethtool to
3677 * return driver information.
3679 * Returns driver specefic information like name, version etc.. to ethtool.
3684 static void s2io_ethtool_gdrvinfo(struct net_device *dev,
3685 struct ethtool_drvinfo *info)
3687 nic_t *sp = dev->priv;
3689 strncpy(info->driver, s2io_driver_name, sizeof(s2io_driver_name));
3690 strncpy(info->version, s2io_driver_version,
3691 sizeof(s2io_driver_version));
3692 strncpy(info->fw_version, "", 32);
3693 strncpy(info->bus_info, pci_name(sp->pdev), 32);
3694 info->regdump_len = XENA_REG_SPACE;
3695 info->eedump_len = XENA_EEPROM_SPACE;
3696 info->testinfo_len = S2IO_TEST_LEN;
3697 info->n_stats = S2IO_STAT_LEN;
3701 * s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
3702 * @sp: private member of the device structure, which is a pointer to the
3703 * s2io_nic structure.
3704 * @regs : pointer to the structure with parameters given by ethtool for
3705 * dumping the registers.
3706 * @reg_space: The input argumnet into which all the registers are dumped.
3708 * Dumps the entire register space of xFrame NIC into the user given
3714 static void s2io_ethtool_gregs(struct net_device *dev,
3715 struct ethtool_regs *regs, void *space)
3719 u8 *reg_space = (u8 *) space;
3720 nic_t *sp = dev->priv;
3722 regs->len = XENA_REG_SPACE;
3723 regs->version = sp->pdev->subsystem_device;
3725 for (i = 0; i < regs->len; i += 8) {
3726 reg = readq(sp->bar0 + i);
3727 memcpy((reg_space + i), ®, 8);
3732 * s2io_phy_id - timer function that alternates adapter LED.
3733 * @data : address of the private member of the device structure, which
3734 * is a pointer to the s2io_nic structure, provided as an u32.
3735 * Description: This is actually the timer function that alternates the
3736 * adapter LED bit of the adapter control bit to set/reset every time on
3737 * invocation. The timer is set for 1/2 a second, hence tha NIC blinks
3738 * once every second.
3740 static void s2io_phy_id(unsigned long data)
3742 nic_t *sp = (nic_t *) data;
3743 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3747 subid = sp->pdev->subsystem_device;
3748 if ((sp->device_type == XFRAME_II_DEVICE) ||
3749 ((subid & 0xFF) >= 0x07)) {
3750 val64 = readq(&bar0->gpio_control);
3751 val64 ^= GPIO_CTRL_GPIO_0;
3752 writeq(val64, &bar0->gpio_control);
3754 val64 = readq(&bar0->adapter_control);
3755 val64 ^= ADAPTER_LED_ON;
3756 writeq(val64, &bar0->adapter_control);
3759 mod_timer(&sp->id_timer, jiffies + HZ / 2);
3763 * s2io_ethtool_idnic - To physically identify the nic on the system.
3764 * @sp : private member of the device structure, which is a pointer to the
3765 * s2io_nic structure.
3766 * @id : pointer to the structure with identification parameters given by
3768 * Description: Used to physically identify the NIC on the system.
3769 * The Link LED will blink for a time specified by the user for
3771 * NOTE: The Link has to be Up to be able to blink the LED. Hence
3772 * identification is possible only if it's link is up.
3774 * int , returns 0 on success
3777 static int s2io_ethtool_idnic(struct net_device *dev, u32 data)
3779 u64 val64 = 0, last_gpio_ctrl_val;
3780 nic_t *sp = dev->priv;
3781 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3784 subid = sp->pdev->subsystem_device;
3785 last_gpio_ctrl_val = readq(&bar0->gpio_control);
3786 if ((sp->device_type == XFRAME_I_DEVICE) &&
3787 ((subid & 0xFF) < 0x07)) {
3788 val64 = readq(&bar0->adapter_control);
3789 if (!(val64 & ADAPTER_CNTL_EN)) {
3791 "Adapter Link down, cannot blink LED\n");
3795 if (sp->id_timer.function == NULL) {
3796 init_timer(&sp->id_timer);
3797 sp->id_timer.function = s2io_phy_id;
3798 sp->id_timer.data = (unsigned long) sp;
3800 mod_timer(&sp->id_timer, jiffies);
3802 msleep_interruptible(data * HZ);
3804 msleep_interruptible(MAX_FLICKER_TIME);
3805 del_timer_sync(&sp->id_timer);
3807 if (CARDS_WITH_FAULTY_LINK_INDICATORS(sp->device_type, subid)) {
3808 writeq(last_gpio_ctrl_val, &bar0->gpio_control);
3809 last_gpio_ctrl_val = readq(&bar0->gpio_control);
3816 * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
3817 * @sp : private member of the device structure, which is a pointer to the
3818 * s2io_nic structure.
3819 * @ep : pointer to the structure with pause parameters given by ethtool.
3821 * Returns the Pause frame generation and reception capability of the NIC.
3825 static void s2io_ethtool_getpause_data(struct net_device *dev,
3826 struct ethtool_pauseparam *ep)
3829 nic_t *sp = dev->priv;
3830 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3832 val64 = readq(&bar0->rmac_pause_cfg);
3833 if (val64 & RMAC_PAUSE_GEN_ENABLE)
3834 ep->tx_pause = TRUE;
3835 if (val64 & RMAC_PAUSE_RX_ENABLE)
3836 ep->rx_pause = TRUE;
3837 ep->autoneg = FALSE;
3841 * s2io_ethtool_setpause_data - set/reset pause frame generation.
3842 * @sp : private member of the device structure, which is a pointer to the
3843 * s2io_nic structure.
3844 * @ep : pointer to the structure with pause parameters given by ethtool.
3846 * It can be used to set or reset Pause frame generation or reception
3847 * support of the NIC.
3849 * int, returns 0 on Success
3852 static int s2io_ethtool_setpause_data(struct net_device *dev,
3853 struct ethtool_pauseparam *ep)
3856 nic_t *sp = dev->priv;
3857 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3859 val64 = readq(&bar0->rmac_pause_cfg);
3861 val64 |= RMAC_PAUSE_GEN_ENABLE;
3863 val64 &= ~RMAC_PAUSE_GEN_ENABLE;
3865 val64 |= RMAC_PAUSE_RX_ENABLE;
3867 val64 &= ~RMAC_PAUSE_RX_ENABLE;
3868 writeq(val64, &bar0->rmac_pause_cfg);
3873 * read_eeprom - reads 4 bytes of data from user given offset.
3874 * @sp : private member of the device structure, which is a pointer to the
3875 * s2io_nic structure.
3876 * @off : offset at which the data must be written
3877 * @data : Its an output parameter where the data read at the given
3880 * Will read 4 bytes of data from the user given offset and return the
3882 * NOTE: Will allow to read only part of the EEPROM visible through the
3885 * -1 on failure and 0 on success.
3888 #define S2IO_DEV_ID 5
3889 static int read_eeprom(nic_t * sp, int off, u32 * data)
3894 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3896 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
3897 I2C_CONTROL_BYTE_CNT(0x3) | I2C_CONTROL_READ |
3898 I2C_CONTROL_CNTL_START;
3899 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
3901 while (exit_cnt < 5) {
3902 val64 = readq(&bar0->i2c_control);
3903 if (I2C_CONTROL_CNTL_END(val64)) {
3904 *data = I2C_CONTROL_GET_DATA(val64);
3916 * write_eeprom - actually writes the relevant part of the data value.
3917 * @sp : private member of the device structure, which is a pointer to the
3918 * s2io_nic structure.
3919 * @off : offset at which the data must be written
3920 * @data : The data that is to be written
3921 * @cnt : Number of bytes of the data that are actually to be written into
3922 * the Eeprom. (max of 3)
3924 * Actually writes the relevant part of the data value into the Eeprom
3925 * through the I2C bus.
3927 * 0 on success, -1 on failure.
3930 static int write_eeprom(nic_t * sp, int off, u32 data, int cnt)
3932 int exit_cnt = 0, ret = -1;
3934 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3936 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
3937 I2C_CONTROL_BYTE_CNT(cnt) | I2C_CONTROL_SET_DATA(data) |
3938 I2C_CONTROL_CNTL_START;
3939 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
3941 while (exit_cnt < 5) {
3942 val64 = readq(&bar0->i2c_control);
3943 if (I2C_CONTROL_CNTL_END(val64)) {
3944 if (!(val64 & I2C_CONTROL_NACK))
3956 * s2io_ethtool_geeprom - reads the value stored in the Eeprom.
3957 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
3958 * @eeprom : pointer to the user level structure provided by ethtool,
3959 * containing all relevant information.
3960 * @data_buf : user defined value to be written into Eeprom.
3961 * Description: Reads the values stored in the Eeprom at given offset
3962 * for a given length. Stores these values int the input argument data
3963 * buffer 'data_buf' and returns these to the caller (ethtool.)
3968 static int s2io_ethtool_geeprom(struct net_device *dev,
3969 struct ethtool_eeprom *eeprom, u8 * data_buf)
3972 nic_t *sp = dev->priv;
3974 eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
3976 if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
3977 eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
3979 for (i = 0; i < eeprom->len; i += 4) {
3980 if (read_eeprom(sp, (eeprom->offset + i), &data)) {
3981 DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
3985 memcpy((data_buf + i), &valid, 4);
3991 * s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
3992 * @sp : private member of the device structure, which is a pointer to the
3993 * s2io_nic structure.
3994 * @eeprom : pointer to the user level structure provided by ethtool,
3995 * containing all relevant information.
3996 * @data_buf ; user defined value to be written into Eeprom.
3998 * Tries to write the user provided value in the Eeprom, at the offset
3999 * given by the user.
4001 * 0 on success, -EFAULT on failure.
4004 static int s2io_ethtool_seeprom(struct net_device *dev,
4005 struct ethtool_eeprom *eeprom,
4008 int len = eeprom->len, cnt = 0;
4009 u32 valid = 0, data;
4010 nic_t *sp = dev->priv;
4012 if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
4014 "ETHTOOL_WRITE_EEPROM Err: Magic value ");
4015 DBG_PRINT(ERR_DBG, "is wrong, Its not 0x%x\n",
4021 data = (u32) data_buf[cnt] & 0x000000FF;
4023 valid = (u32) (data << 24);
4027 if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
4029 "ETHTOOL_WRITE_EEPROM Err: Cannot ");
4031 "write into the specified offset\n");
4042 * s2io_register_test - reads and writes into all clock domains.
4043 * @sp : private member of the device structure, which is a pointer to the
4044 * s2io_nic structure.
4045 * @data : variable that returns the result of each of the test conducted b
4048 * Read and write into all clock domains. The NIC has 3 clock domains,
4049 * see that registers in all the three regions are accessible.
4054 static int s2io_register_test(nic_t * sp, uint64_t * data)
4056 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4060 val64 = readq(&bar0->pif_rd_swapper_fb);
4061 if (val64 != 0x123456789abcdefULL) {
4063 DBG_PRINT(INFO_DBG, "Read Test level 1 fails\n");
4066 val64 = readq(&bar0->rmac_pause_cfg);
4067 if (val64 != 0xc000ffff00000000ULL) {
4069 DBG_PRINT(INFO_DBG, "Read Test level 2 fails\n");
4072 val64 = readq(&bar0->rx_queue_cfg);
4073 if (val64 != 0x0808080808080808ULL) {
4075 DBG_PRINT(INFO_DBG, "Read Test level 3 fails\n");
4078 val64 = readq(&bar0->xgxs_efifo_cfg);
4079 if (val64 != 0x000000001923141EULL) {
4081 DBG_PRINT(INFO_DBG, "Read Test level 4 fails\n");
4084 val64 = 0x5A5A5A5A5A5A5A5AULL;
4085 writeq(val64, &bar0->xmsi_data);
4086 val64 = readq(&bar0->xmsi_data);
4087 if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
4089 DBG_PRINT(ERR_DBG, "Write Test level 1 fails\n");
4092 val64 = 0xA5A5A5A5A5A5A5A5ULL;
4093 writeq(val64, &bar0->xmsi_data);
4094 val64 = readq(&bar0->xmsi_data);
4095 if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
4097 DBG_PRINT(ERR_DBG, "Write Test level 2 fails\n");
4105 * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
4106 * @sp : private member of the device structure, which is a pointer to the
4107 * s2io_nic structure.
4108 * @data:variable that returns the result of each of the test conducted by
4111 * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
4117 static int s2io_eeprom_test(nic_t * sp, uint64_t * data)
4122 /* Test Write Error at offset 0 */
4123 if (!write_eeprom(sp, 0, 0, 3))
4126 /* Test Write at offset 4f0 */
4127 if (write_eeprom(sp, 0x4F0, 0x01234567, 3))
4129 if (read_eeprom(sp, 0x4F0, &ret_data))
4132 if (ret_data != 0x01234567)
4135 /* Reset the EEPROM data go FFFF */
4136 write_eeprom(sp, 0x4F0, 0xFFFFFFFF, 3);
4138 /* Test Write Request Error at offset 0x7c */
4139 if (!write_eeprom(sp, 0x07C, 0, 3))
4142 /* Test Write Request at offset 0x7fc */
4143 if (write_eeprom(sp, 0x7FC, 0x01234567, 3))
4145 if (read_eeprom(sp, 0x7FC, &ret_data))
4148 if (ret_data != 0x01234567)
4151 /* Reset the EEPROM data go FFFF */
4152 write_eeprom(sp, 0x7FC, 0xFFFFFFFF, 3);
4154 /* Test Write Error at offset 0x80 */
4155 if (!write_eeprom(sp, 0x080, 0, 3))
4158 /* Test Write Error at offset 0xfc */
4159 if (!write_eeprom(sp, 0x0FC, 0, 3))
4162 /* Test Write Error at offset 0x100 */
4163 if (!write_eeprom(sp, 0x100, 0, 3))
4166 /* Test Write Error at offset 4ec */
4167 if (!write_eeprom(sp, 0x4EC, 0, 3))
4175 * s2io_bist_test - invokes the MemBist test of the card .
4176 * @sp : private member of the device structure, which is a pointer to the
4177 * s2io_nic structure.
4178 * @data:variable that returns the result of each of the test conducted by
4181 * This invokes the MemBist test of the card. We give around
4182 * 2 secs time for the Test to complete. If it's still not complete
4183 * within this peiod, we consider that the test failed.
4185 * 0 on success and -1 on failure.
4188 static int s2io_bist_test(nic_t * sp, uint64_t * data)
4191 int cnt = 0, ret = -1;
4193 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
4194 bist |= PCI_BIST_START;
4195 pci_write_config_word(sp->pdev, PCI_BIST, bist);
4198 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
4199 if (!(bist & PCI_BIST_START)) {
4200 *data = (bist & PCI_BIST_CODE_MASK);
4212 * s2io-link_test - verifies the link state of the nic
4213 * @sp ; private member of the device structure, which is a pointer to the
4214 * s2io_nic structure.
4215 * @data: variable that returns the result of each of the test conducted by
4218 * The function verifies the link state of the NIC and updates the input
4219 * argument 'data' appropriately.
4224 static int s2io_link_test(nic_t * sp, uint64_t * data)
4226 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4229 val64 = readq(&bar0->adapter_status);
4230 if (val64 & ADAPTER_STATUS_RMAC_LOCAL_FAULT)
4237 * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
4238 * @sp - private member of the device structure, which is a pointer to the
4239 * s2io_nic structure.
4240 * @data - variable that returns the result of each of the test
4241 * conducted by the driver.
4243 * This is one of the offline test that tests the read and write
4244 * access to the RldRam chip on the NIC.
4249 static int s2io_rldram_test(nic_t * sp, uint64_t * data)
4251 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4253 int cnt, iteration = 0, test_pass = 0;
4255 val64 = readq(&bar0->adapter_control);
4256 val64 &= ~ADAPTER_ECC_EN;
4257 writeq(val64, &bar0->adapter_control);
4259 val64 = readq(&bar0->mc_rldram_test_ctrl);
4260 val64 |= MC_RLDRAM_TEST_MODE;
4261 writeq(val64, &bar0->mc_rldram_test_ctrl);
4263 val64 = readq(&bar0->mc_rldram_mrs);
4264 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
4265 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
4267 val64 |= MC_RLDRAM_MRS_ENABLE;
4268 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
4270 while (iteration < 2) {
4271 val64 = 0x55555555aaaa0000ULL;
4272 if (iteration == 1) {
4273 val64 ^= 0xFFFFFFFFFFFF0000ULL;
4275 writeq(val64, &bar0->mc_rldram_test_d0);
4277 val64 = 0xaaaa5a5555550000ULL;
4278 if (iteration == 1) {
4279 val64 ^= 0xFFFFFFFFFFFF0000ULL;
4281 writeq(val64, &bar0->mc_rldram_test_d1);
4283 val64 = 0x55aaaaaaaa5a0000ULL;
4284 if (iteration == 1) {
4285 val64 ^= 0xFFFFFFFFFFFF0000ULL;
4287 writeq(val64, &bar0->mc_rldram_test_d2);
4289 val64 = (u64) (0x0000003fffff0000ULL);
4290 writeq(val64, &bar0->mc_rldram_test_add);
4293 val64 = MC_RLDRAM_TEST_MODE;
4294 writeq(val64, &bar0->mc_rldram_test_ctrl);
4297 MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_WRITE |
4299 writeq(val64, &bar0->mc_rldram_test_ctrl);
4301 for (cnt = 0; cnt < 5; cnt++) {
4302 val64 = readq(&bar0->mc_rldram_test_ctrl);
4303 if (val64 & MC_RLDRAM_TEST_DONE)
4311 val64 = MC_RLDRAM_TEST_MODE;
4312 writeq(val64, &bar0->mc_rldram_test_ctrl);
4314 val64 |= MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
4315 writeq(val64, &bar0->mc_rldram_test_ctrl);
4317 for (cnt = 0; cnt < 5; cnt++) {
4318 val64 = readq(&bar0->mc_rldram_test_ctrl);
4319 if (val64 & MC_RLDRAM_TEST_DONE)
4327 val64 = readq(&bar0->mc_rldram_test_ctrl);
4328 if (val64 & MC_RLDRAM_TEST_PASS)
4343 * s2io_ethtool_test - conducts 6 tsets to determine the health of card.
4344 * @sp : private member of the device structure, which is a pointer to the
4345 * s2io_nic structure.
4346 * @ethtest : pointer to a ethtool command specific structure that will be
4347 * returned to the user.
4348 * @data : variable that returns the result of each of the test
4349 * conducted by the driver.
4351 * This function conducts 6 tests ( 4 offline and 2 online) to determine
4352 * the health of the card.
4357 static void s2io_ethtool_test(struct net_device *dev,
4358 struct ethtool_test *ethtest,
4361 nic_t *sp = dev->priv;
4362 int orig_state = netif_running(sp->dev);
4364 if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
4365 /* Offline Tests. */
4367 s2io_close(sp->dev);
4369 if (s2io_register_test(sp, &data[0]))
4370 ethtest->flags |= ETH_TEST_FL_FAILED;
4374 if (s2io_rldram_test(sp, &data[3]))
4375 ethtest->flags |= ETH_TEST_FL_FAILED;
4379 if (s2io_eeprom_test(sp, &data[1]))
4380 ethtest->flags |= ETH_TEST_FL_FAILED;
4382 if (s2io_bist_test(sp, &data[4]))
4383 ethtest->flags |= ETH_TEST_FL_FAILED;
4393 "%s: is not up, cannot run test\n",
4402 if (s2io_link_test(sp, &data[2]))
4403 ethtest->flags |= ETH_TEST_FL_FAILED;
4412 static void s2io_get_ethtool_stats(struct net_device *dev,
4413 struct ethtool_stats *estats,
4417 nic_t *sp = dev->priv;
4418 StatInfo_t *stat_info = sp->mac_control.stats_info;
4420 s2io_updt_stats(sp);
4422 (u64)le32_to_cpu(stat_info->tmac_frms_oflow) << 32 |
4423 le32_to_cpu(stat_info->tmac_frms);
4425 (u64)le32_to_cpu(stat_info->tmac_data_octets_oflow) << 32 |
4426 le32_to_cpu(stat_info->tmac_data_octets);
4427 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_drop_frms);
4429 (u64)le32_to_cpu(stat_info->tmac_mcst_frms_oflow) << 32 |
4430 le32_to_cpu(stat_info->tmac_mcst_frms);
4432 (u64)le32_to_cpu(stat_info->tmac_bcst_frms_oflow) << 32 |
4433 le32_to_cpu(stat_info->tmac_bcst_frms);
4434 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_pause_ctrl_frms);
4436 (u64)le32_to_cpu(stat_info->tmac_any_err_frms_oflow) << 32 |
4437 le32_to_cpu(stat_info->tmac_any_err_frms);
4438 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_vld_ip_octets);
4440 (u64)le32_to_cpu(stat_info->tmac_vld_ip_oflow) << 32 |
4441 le32_to_cpu(stat_info->tmac_vld_ip);
4443 (u64)le32_to_cpu(stat_info->tmac_drop_ip_oflow) << 32 |
4444 le32_to_cpu(stat_info->tmac_drop_ip);
4446 (u64)le32_to_cpu(stat_info->tmac_icmp_oflow) << 32 |
4447 le32_to_cpu(stat_info->tmac_icmp);
4449 (u64)le32_to_cpu(stat_info->tmac_rst_tcp_oflow) << 32 |
4450 le32_to_cpu(stat_info->tmac_rst_tcp);
4451 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_tcp);
4452 tmp_stats[i++] = (u64)le32_to_cpu(stat_info->tmac_udp_oflow) << 32 |
4453 le32_to_cpu(stat_info->tmac_udp);
4455 (u64)le32_to_cpu(stat_info->rmac_vld_frms_oflow) << 32 |
4456 le32_to_cpu(stat_info->rmac_vld_frms);
4458 (u64)le32_to_cpu(stat_info->rmac_data_octets_oflow) << 32 |
4459 le32_to_cpu(stat_info->rmac_data_octets);
4460 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_fcs_err_frms);
4461 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_drop_frms);
4463 (u64)le32_to_cpu(stat_info->rmac_vld_mcst_frms_oflow) << 32 |
4464 le32_to_cpu(stat_info->rmac_vld_mcst_frms);
4466 (u64)le32_to_cpu(stat_info->rmac_vld_bcst_frms_oflow) << 32 |
4467 le32_to_cpu(stat_info->rmac_vld_bcst_frms);
4468 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_in_rng_len_err_frms);
4469 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_long_frms);
4470 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_pause_ctrl_frms);
4472 (u64)le32_to_cpu(stat_info->rmac_discarded_frms_oflow) << 32 |
4473 le32_to_cpu(stat_info->rmac_discarded_frms);
4475 (u64)le32_to_cpu(stat_info->rmac_usized_frms_oflow) << 32 |
4476 le32_to_cpu(stat_info->rmac_usized_frms);
4478 (u64)le32_to_cpu(stat_info->rmac_osized_frms_oflow) << 32 |
4479 le32_to_cpu(stat_info->rmac_osized_frms);
4481 (u64)le32_to_cpu(stat_info->rmac_frag_frms_oflow) << 32 |
4482 le32_to_cpu(stat_info->rmac_frag_frms);
4484 (u64)le32_to_cpu(stat_info->rmac_jabber_frms_oflow) << 32 |
4485 le32_to_cpu(stat_info->rmac_jabber_frms);
4486 tmp_stats[i++] = (u64)le32_to_cpu(stat_info->rmac_ip_oflow) << 32 |
4487 le32_to_cpu(stat_info->rmac_ip);
4488 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ip_octets);
4489 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_hdr_err_ip);
4490 tmp_stats[i++] = (u64)le32_to_cpu(stat_info->rmac_drop_ip_oflow) << 32 |
4491 le32_to_cpu(stat_info->rmac_drop_ip);
4492 tmp_stats[i++] = (u64)le32_to_cpu(stat_info->rmac_icmp_oflow) << 32 |
4493 le32_to_cpu(stat_info->rmac_icmp);
4494 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_tcp);
4495 tmp_stats[i++] = (u64)le32_to_cpu(stat_info->rmac_udp_oflow) << 32 |
4496 le32_to_cpu(stat_info->rmac_udp);
4498 (u64)le32_to_cpu(stat_info->rmac_err_drp_udp_oflow) << 32 |
4499 le32_to_cpu(stat_info->rmac_err_drp_udp);
4501 (u64)le32_to_cpu(stat_info->rmac_pause_cnt_oflow) << 32 |
4502 le32_to_cpu(stat_info->rmac_pause_cnt);
4504 (u64)le32_to_cpu(stat_info->rmac_accepted_ip_oflow) << 32 |
4505 le32_to_cpu(stat_info->rmac_accepted_ip);
4506 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_tcp);
4508 tmp_stats[i++] = stat_info->sw_stat.single_ecc_errs;
4509 tmp_stats[i++] = stat_info->sw_stat.double_ecc_errs;
4512 int s2io_ethtool_get_regs_len(struct net_device *dev)
4514 return (XENA_REG_SPACE);
4518 u32 s2io_ethtool_get_rx_csum(struct net_device * dev)
4520 nic_t *sp = dev->priv;
4522 return (sp->rx_csum);
4524 int s2io_ethtool_set_rx_csum(struct net_device *dev, u32 data)
4526 nic_t *sp = dev->priv;
4535 int s2io_get_eeprom_len(struct net_device *dev)
4537 return (XENA_EEPROM_SPACE);
4540 int s2io_ethtool_self_test_count(struct net_device *dev)
4542 return (S2IO_TEST_LEN);
4544 void s2io_ethtool_get_strings(struct net_device *dev,
4545 u32 stringset, u8 * data)
4547 switch (stringset) {
4549 memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
4552 memcpy(data, ðtool_stats_keys,
4553 sizeof(ethtool_stats_keys));
4556 static int s2io_ethtool_get_stats_count(struct net_device *dev)
4558 return (S2IO_STAT_LEN);
4561 int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
4564 dev->features |= NETIF_F_IP_CSUM;
4566 dev->features &= ~NETIF_F_IP_CSUM;
4572 static struct ethtool_ops netdev_ethtool_ops = {
4573 .get_settings = s2io_ethtool_gset,
4574 .set_settings = s2io_ethtool_sset,
4575 .get_drvinfo = s2io_ethtool_gdrvinfo,
4576 .get_regs_len = s2io_ethtool_get_regs_len,
4577 .get_regs = s2io_ethtool_gregs,
4578 .get_link = ethtool_op_get_link,
4579 .get_eeprom_len = s2io_get_eeprom_len,
4580 .get_eeprom = s2io_ethtool_geeprom,
4581 .set_eeprom = s2io_ethtool_seeprom,
4582 .get_pauseparam = s2io_ethtool_getpause_data,
4583 .set_pauseparam = s2io_ethtool_setpause_data,
4584 .get_rx_csum = s2io_ethtool_get_rx_csum,
4585 .set_rx_csum = s2io_ethtool_set_rx_csum,
4586 .get_tx_csum = ethtool_op_get_tx_csum,
4587 .set_tx_csum = s2io_ethtool_op_set_tx_csum,
4588 .get_sg = ethtool_op_get_sg,
4589 .set_sg = ethtool_op_set_sg,
4591 .get_tso = ethtool_op_get_tso,
4592 .set_tso = ethtool_op_set_tso,
4594 .self_test_count = s2io_ethtool_self_test_count,
4595 .self_test = s2io_ethtool_test,
4596 .get_strings = s2io_ethtool_get_strings,
4597 .phys_id = s2io_ethtool_idnic,
4598 .get_stats_count = s2io_ethtool_get_stats_count,
4599 .get_ethtool_stats = s2io_get_ethtool_stats
4603 * s2io_ioctl - Entry point for the Ioctl
4604 * @dev : Device pointer.
4605 * @ifr : An IOCTL specefic structure, that can contain a pointer to
4606 * a proprietary structure used to pass information to the driver.
4607 * @cmd : This is used to distinguish between the different commands that
4608 * can be passed to the IOCTL functions.
4610 * Currently there are no special functionality supported in IOCTL, hence
4611 * function always return EOPNOTSUPPORTED
4614 int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
4620 * s2io_change_mtu - entry point to change MTU size for the device.
4621 * @dev : device pointer.
4622 * @new_mtu : the new MTU size for the device.
4623 * Description: A driver entry point to change MTU size for the device.
4624 * Before changing the MTU the device must be stopped.
4626 * 0 on success and an appropriate (-)ve integer as defined in errno.h
4630 int s2io_change_mtu(struct net_device *dev, int new_mtu)
4632 nic_t *sp = dev->priv;
4634 if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) {
4635 DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n",
4641 if (netif_running(dev)) {
4643 netif_stop_queue(dev);
4644 if (s2io_card_up(sp)) {
4645 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
4648 if (netif_queue_stopped(dev))
4649 netif_wake_queue(dev);
4650 } else { /* Device is down */
4651 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4652 u64 val64 = new_mtu;
4654 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
4661 * s2io_tasklet - Bottom half of the ISR.
4662 * @dev_adr : address of the device structure in dma_addr_t format.
4664 * This is the tasklet or the bottom half of the ISR. This is
4665 * an extension of the ISR which is scheduled by the scheduler to be run
4666 * when the load on the CPU is low. All low priority tasks of the ISR can
4667 * be pushed into the tasklet. For now the tasklet is used only to
4668 * replenish the Rx buffers in the Rx buffer descriptors.
4673 static void s2io_tasklet(unsigned long dev_addr)
4675 struct net_device *dev = (struct net_device *) dev_addr;
4676 nic_t *sp = dev->priv;
4678 mac_info_t *mac_control;
4679 struct config_param *config;
4681 mac_control = &sp->mac_control;
4682 config = &sp->config;
4684 if (!TASKLET_IN_USE) {
4685 for (i = 0; i < config->rx_ring_num; i++) {
4686 ret = fill_rx_buffers(sp, i);
4687 if (ret == -ENOMEM) {
4688 DBG_PRINT(ERR_DBG, "%s: Out of ",
4690 DBG_PRINT(ERR_DBG, "memory in tasklet\n");
4692 } else if (ret == -EFILL) {
4694 "%s: Rx Ring %d is full\n",
4699 clear_bit(0, (&sp->tasklet_status));
4704 * s2io_set_link - Set the LInk status
4705 * @data: long pointer to device private structue
4706 * Description: Sets the link status for the adapter
4709 static void s2io_set_link(unsigned long data)
4711 nic_t *nic = (nic_t *) data;
4712 struct net_device *dev = nic->dev;
4713 XENA_dev_config_t __iomem *bar0 = nic->bar0;
4717 if (test_and_set_bit(0, &(nic->link_state))) {
4718 /* The card is being reset, no point doing anything */
4722 subid = nic->pdev->subsystem_device;
4723 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
4725 * Allow a small delay for the NICs self initiated
4726 * cleanup to complete.
4731 val64 = readq(&bar0->adapter_status);
4732 if (verify_xena_quiescence(nic, val64, nic->device_enabled_once)) {
4733 if (LINK_IS_UP(val64)) {
4734 val64 = readq(&bar0->adapter_control);
4735 val64 |= ADAPTER_CNTL_EN;
4736 writeq(val64, &bar0->adapter_control);
4737 if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
4739 val64 = readq(&bar0->gpio_control);
4740 val64 |= GPIO_CTRL_GPIO_0;
4741 writeq(val64, &bar0->gpio_control);
4742 val64 = readq(&bar0->gpio_control);
4744 val64 |= ADAPTER_LED_ON;
4745 writeq(val64, &bar0->adapter_control);
4747 if (s2io_link_fault_indication(nic) ==
4748 MAC_RMAC_ERR_TIMER) {
4749 val64 = readq(&bar0->adapter_status);
4750 if (!LINK_IS_UP(val64)) {
4751 DBG_PRINT(ERR_DBG, "%s:", dev->name);
4752 DBG_PRINT(ERR_DBG, " Link down");
4753 DBG_PRINT(ERR_DBG, "after ");
4754 DBG_PRINT(ERR_DBG, "enabling ");
4755 DBG_PRINT(ERR_DBG, "device \n");
4758 if (nic->device_enabled_once == FALSE) {
4759 nic->device_enabled_once = TRUE;
4761 s2io_link(nic, LINK_UP);
4763 if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
4765 val64 = readq(&bar0->gpio_control);
4766 val64 &= ~GPIO_CTRL_GPIO_0;
4767 writeq(val64, &bar0->gpio_control);
4768 val64 = readq(&bar0->gpio_control);
4770 s2io_link(nic, LINK_DOWN);
4772 } else { /* NIC is not Quiescent. */
4773 DBG_PRINT(ERR_DBG, "%s: Error: ", dev->name);
4774 DBG_PRINT(ERR_DBG, "device is not Quiescent\n");
4775 netif_stop_queue(dev);
4777 clear_bit(0, &(nic->link_state));
4780 static void s2io_card_down(nic_t * sp)
4783 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4784 unsigned long flags;
4785 register u64 val64 = 0;
4787 del_timer_sync(&sp->alarm_timer);
4788 /* If s2io_set_link task is executing, wait till it completes. */
4789 while (test_and_set_bit(0, &(sp->link_state))) {
4792 atomic_set(&sp->card_state, CARD_DOWN);
4794 /* disable Tx and Rx traffic on the NIC */
4798 tasklet_kill(&sp->task);
4800 /* Check if the device is Quiescent and then Reset the NIC */
4802 val64 = readq(&bar0->adapter_status);
4803 if (verify_xena_quiescence(sp, val64, sp->device_enabled_once)) {
4811 "s2io_close:Device not Quiescent ");
4812 DBG_PRINT(ERR_DBG, "adaper status reads 0x%llx\n",
4813 (unsigned long long) val64);
4819 /* Waiting till all Interrupt handlers are complete */
4823 if (!atomic_read(&sp->isr_cnt))
4828 spin_lock_irqsave(&sp->tx_lock, flags);
4829 /* Free all Tx buffers */
4830 free_tx_buffers(sp);
4831 spin_unlock_irqrestore(&sp->tx_lock, flags);
4833 /* Free all Rx buffers */
4834 spin_lock_irqsave(&sp->rx_lock, flags);
4835 free_rx_buffers(sp);
4836 spin_unlock_irqrestore(&sp->rx_lock, flags);
4838 clear_bit(0, &(sp->link_state));
4841 static int s2io_card_up(nic_t * sp)
4844 mac_info_t *mac_control;
4845 struct config_param *config;
4846 struct net_device *dev = (struct net_device *) sp->dev;
4848 /* Initialize the H/W I/O registers */
4849 if (init_nic(sp) != 0) {
4850 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
4856 * Initializing the Rx buffers. For now we are considering only 1
4857 * Rx ring and initializing buffers into 30 Rx blocks
4859 mac_control = &sp->mac_control;
4860 config = &sp->config;
4862 for (i = 0; i < config->rx_ring_num; i++) {
4863 if ((ret = fill_rx_buffers(sp, i))) {
4864 DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
4867 free_rx_buffers(sp);
4870 DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
4871 atomic_read(&sp->rx_bufs_left[i]));
4874 /* Setting its receive mode */
4875 s2io_set_multicast(dev);
4877 /* Enable tasklet for the device */
4878 tasklet_init(&sp->task, s2io_tasklet, (unsigned long) dev);
4880 /* Enable Rx Traffic and interrupts on the NIC */
4881 if (start_nic(sp)) {
4882 DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
4883 tasklet_kill(&sp->task);
4885 free_irq(dev->irq, dev);
4886 free_rx_buffers(sp);
4890 S2IO_TIMER_CONF(sp->alarm_timer, s2io_alarm_handle, sp, (HZ/2));
4892 atomic_set(&sp->card_state, CARD_UP);
4897 * s2io_restart_nic - Resets the NIC.
4898 * @data : long pointer to the device private structure
4900 * This function is scheduled to be run by the s2io_tx_watchdog
4901 * function after 0.5 secs to reset the NIC. The idea is to reduce
4902 * the run time of the watch dog routine which is run holding a
4906 static void s2io_restart_nic(unsigned long data)
4908 struct net_device *dev = (struct net_device *) data;
4909 nic_t *sp = dev->priv;
4912 if (s2io_card_up(sp)) {
4913 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
4916 netif_wake_queue(dev);
4917 DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n",
4923 * s2io_tx_watchdog - Watchdog for transmit side.
4924 * @dev : Pointer to net device structure
4926 * This function is triggered if the Tx Queue is stopped
4927 * for a pre-defined amount of time when the Interface is still up.
4928 * If the Interface is jammed in such a situation, the hardware is
4929 * reset (by s2io_close) and restarted again (by s2io_open) to
4930 * overcome any problem that might have been caused in the hardware.
4935 static void s2io_tx_watchdog(struct net_device *dev)
4937 nic_t *sp = dev->priv;
4939 if (netif_carrier_ok(dev)) {
4940 schedule_work(&sp->rst_timer_task);
4945 * rx_osm_handler - To perform some OS related operations on SKB.
4946 * @sp: private member of the device structure,pointer to s2io_nic structure.
4947 * @skb : the socket buffer pointer.
4948 * @len : length of the packet
4949 * @cksum : FCS checksum of the frame.
4950 * @ring_no : the ring from which this RxD was extracted.
4952 * This function is called by the Tx interrupt serivce routine to perform
4953 * some OS related operations on the SKB before passing it to the upper
4954 * layers. It mainly checks if the checksum is OK, if so adds it to the
4955 * SKBs cksum variable, increments the Rx packet count and passes the SKB
4956 * to the upper layer. If the checksum is wrong, it increments the Rx
4957 * packet error count, frees the SKB and returns error.
4959 * SUCCESS on success and -1 on failure.
4961 static int rx_osm_handler(ring_info_t *ring_data, RxD_t * rxdp)
4963 nic_t *sp = ring_data->nic;
4964 struct net_device *dev = (struct net_device *) sp->dev;
4965 struct sk_buff *skb = (struct sk_buff *)
4966 ((unsigned long) rxdp->Host_Control);
4967 int ring_no = ring_data->ring_no;
4968 u16 l3_csum, l4_csum;
4969 #ifdef CONFIG_2BUFF_MODE
4970 int buf0_len = RXD_GET_BUFFER0_SIZE(rxdp->Control_2);
4971 int buf2_len = RXD_GET_BUFFER2_SIZE(rxdp->Control_2);
4972 int get_block = ring_data->rx_curr_get_info.block_index;
4973 int get_off = ring_data->rx_curr_get_info.offset;
4974 buffAdd_t *ba = &ring_data->ba[get_block][get_off];
4975 unsigned char *buff;
4977 u16 len = (u16) ((RXD_GET_BUFFER0_SIZE(rxdp->Control_2)) >> 48);;
4980 if (rxdp->Control_1 & RXD_T_CODE) {
4981 unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
4982 DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%llx\n",
4985 sp->stats.rx_crc_errors++;
4986 atomic_dec(&sp->rx_bufs_left[ring_no]);
4987 rxdp->Host_Control = 0;
4991 /* Updating statistics */
4992 rxdp->Host_Control = 0;
4994 sp->stats.rx_packets++;
4995 #ifndef CONFIG_2BUFF_MODE
4996 sp->stats.rx_bytes += len;
4998 sp->stats.rx_bytes += buf0_len + buf2_len;
5001 #ifndef CONFIG_2BUFF_MODE
5004 buff = skb_push(skb, buf0_len);
5005 memcpy(buff, ba->ba_0, buf0_len);
5006 skb_put(skb, buf2_len);
5009 if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) &&
5011 l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
5012 l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
5013 if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
5015 * NIC verifies if the Checksum of the received
5016 * frame is Ok or not and accordingly returns
5017 * a flag in the RxD.
5019 skb->ip_summed = CHECKSUM_UNNECESSARY;
5022 * Packet with erroneous checksum, let the
5023 * upper layers deal with it.
5025 skb->ip_summed = CHECKSUM_NONE;
5028 skb->ip_summed = CHECKSUM_NONE;
5031 skb->protocol = eth_type_trans(skb, dev);
5032 #ifdef CONFIG_S2IO_NAPI
5033 if (sp->vlgrp && RXD_GET_VLAN_TAG(rxdp->Control_2)) {
5034 /* Queueing the vlan frame to the upper layer */
5035 vlan_hwaccel_receive_skb(skb, sp->vlgrp,
5036 RXD_GET_VLAN_TAG(rxdp->Control_2));
5038 netif_receive_skb(skb);
5041 if (sp->vlgrp && RXD_GET_VLAN_TAG(rxdp->Control_2)) {
5042 /* Queueing the vlan frame to the upper layer */
5043 vlan_hwaccel_rx(skb, sp->vlgrp,
5044 RXD_GET_VLAN_TAG(rxdp->Control_2));
5049 dev->last_rx = jiffies;
5050 atomic_dec(&sp->rx_bufs_left[ring_no]);
5055 * s2io_link - stops/starts the Tx queue.
5056 * @sp : private member of the device structure, which is a pointer to the
5057 * s2io_nic structure.
5058 * @link : inidicates whether link is UP/DOWN.
5060 * This function stops/starts the Tx queue depending on whether the link
5061 * status of the NIC is is down or up. This is called by the Alarm
5062 * interrupt handler whenever a link change interrupt comes up.
5067 void s2io_link(nic_t * sp, int link)
5069 struct net_device *dev = (struct net_device *) sp->dev;
5071 if (link != sp->last_link_state) {
5072 if (link == LINK_DOWN) {
5073 DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
5074 netif_carrier_off(dev);
5076 DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
5077 netif_carrier_on(dev);
5080 sp->last_link_state = link;
5084 * get_xena_rev_id - to identify revision ID of xena.
5085 * @pdev : PCI Dev structure
5087 * Function to identify the Revision ID of xena.
5089 * returns the revision ID of the device.
5092 int get_xena_rev_id(struct pci_dev *pdev)
5096 ret = pci_read_config_byte(pdev, PCI_REVISION_ID, (u8 *) & id);
5101 * s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
5102 * @sp : private member of the device structure, which is a pointer to the
5103 * s2io_nic structure.
5105 * This function initializes a few of the PCI and PCI-X configuration registers
5106 * with recommended values.
5111 static void s2io_init_pci(nic_t * sp)
5113 u16 pci_cmd = 0, pcix_cmd = 0;
5115 /* Enable Data Parity Error Recovery in PCI-X command register. */
5116 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
5118 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
5120 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
5123 /* Set the PErr Response bit in PCI command register. */
5124 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
5125 pci_write_config_word(sp->pdev, PCI_COMMAND,
5126 (pci_cmd | PCI_COMMAND_PARITY));
5127 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
5129 /* Forcibly disabling relaxed ordering capability of the card. */
5131 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
5133 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
5137 MODULE_AUTHOR("Raghavendra Koushik <raghavendra.koushik@neterion.com>");
5138 MODULE_LICENSE("GPL");
5139 module_param(tx_fifo_num, int, 0);
5140 module_param(rx_ring_num, int, 0);
5141 module_param_array(tx_fifo_len, uint, NULL, 0);
5142 module_param_array(rx_ring_sz, uint, NULL, 0);
5143 module_param_array(rts_frm_len, uint, NULL, 0);
5144 module_param(use_continuous_tx_intrs, int, 1);
5145 module_param(rmac_pause_time, int, 0);
5146 module_param(mc_pause_threshold_q0q3, int, 0);
5147 module_param(mc_pause_threshold_q4q7, int, 0);
5148 module_param(shared_splits, int, 0);
5149 module_param(tmac_util_period, int, 0);
5150 module_param(rmac_util_period, int, 0);
5151 module_param(bimodal, bool, 0);
5152 #ifndef CONFIG_S2IO_NAPI
5153 module_param(indicate_max_pkts, int, 0);
5157 * s2io_init_nic - Initialization of the adapter .
5158 * @pdev : structure containing the PCI related information of the device.
5159 * @pre: List of PCI devices supported by the driver listed in s2io_tbl.
5161 * The function initializes an adapter identified by the pci_dec structure.
5162 * All OS related initialization including memory and device structure and
5163 * initlaization of the device private variable is done. Also the swapper
5164 * control register is initialized to enable read and write into the I/O
5165 * registers of the device.
5167 * returns 0 on success and negative on failure.
5170 static int __devinit
5171 s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
5174 struct net_device *dev;
5176 int dma_flag = FALSE;
5177 u32 mac_up, mac_down;
5178 u64 val64 = 0, tmp64 = 0;
5179 XENA_dev_config_t __iomem *bar0 = NULL;
5181 mac_info_t *mac_control;
5182 struct config_param *config;
5185 #ifdef CONFIG_S2IO_NAPI
5186 DBG_PRINT(ERR_DBG, "NAPI support has been enabled\n");
5189 if ((ret = pci_enable_device(pdev))) {
5191 "s2io_init_nic: pci_enable_device failed\n");
5195 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
5196 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 64bit DMA\n");
5198 if (pci_set_consistent_dma_mask
5199 (pdev, DMA_64BIT_MASK)) {
5201 "Unable to obtain 64bit DMA for \
5202 consistent allocations\n");
5203 pci_disable_device(pdev);
5206 } else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
5207 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 32bit DMA\n");
5209 pci_disable_device(pdev);
5213 if (pci_request_regions(pdev, s2io_driver_name)) {
5214 DBG_PRINT(ERR_DBG, "Request Regions failed\n"),
5215 pci_disable_device(pdev);
5219 dev = alloc_etherdev(sizeof(nic_t));
5221 DBG_PRINT(ERR_DBG, "Device allocation failed\n");
5222 pci_disable_device(pdev);
5223 pci_release_regions(pdev);
5227 pci_set_master(pdev);
5228 pci_set_drvdata(pdev, dev);
5229 SET_MODULE_OWNER(dev);
5230 SET_NETDEV_DEV(dev, &pdev->dev);
5232 /* Private member variable initialized to s2io NIC structure */
5234 memset(sp, 0, sizeof(nic_t));
5237 sp->high_dma_flag = dma_flag;
5238 sp->device_enabled_once = FALSE;
5240 if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) ||
5241 (pdev->device == PCI_DEVICE_ID_HERC_UNI))
5242 sp->device_type = XFRAME_II_DEVICE;
5244 sp->device_type = XFRAME_I_DEVICE;
5246 /* Initialize some PCI/PCI-X fields of the NIC. */
5250 * Setting the device configuration parameters.
5251 * Most of these parameters can be specified by the user during
5252 * module insertion as they are module loadable parameters. If
5253 * these parameters are not not specified during load time, they
5254 * are initialized with default values.
5256 mac_control = &sp->mac_control;
5257 config = &sp->config;
5259 /* Tx side parameters. */
5260 tx_fifo_len[0] = DEFAULT_FIFO_LEN; /* Default value. */
5261 config->tx_fifo_num = tx_fifo_num;
5262 for (i = 0; i < MAX_TX_FIFOS; i++) {
5263 config->tx_cfg[i].fifo_len = tx_fifo_len[i];
5264 config->tx_cfg[i].fifo_priority = i;
5267 /* mapping the QoS priority to the configured fifos */
5268 for (i = 0; i < MAX_TX_FIFOS; i++)
5269 config->fifo_mapping[i] = fifo_map[config->tx_fifo_num][i];
5271 config->tx_intr_type = TXD_INT_TYPE_UTILZ;
5272 for (i = 0; i < config->tx_fifo_num; i++) {
5273 config->tx_cfg[i].f_no_snoop =
5274 (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
5275 if (config->tx_cfg[i].fifo_len < 65) {
5276 config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
5280 config->max_txds = MAX_SKB_FRAGS;
5282 /* Rx side parameters. */
5283 rx_ring_sz[0] = SMALL_BLK_CNT; /* Default value. */
5284 config->rx_ring_num = rx_ring_num;
5285 for (i = 0; i < MAX_RX_RINGS; i++) {
5286 config->rx_cfg[i].num_rxd = rx_ring_sz[i] *
5287 (MAX_RXDS_PER_BLOCK + 1);
5288 config->rx_cfg[i].ring_priority = i;
5291 for (i = 0; i < rx_ring_num; i++) {
5292 config->rx_cfg[i].ring_org = RING_ORG_BUFF1;
5293 config->rx_cfg[i].f_no_snoop =
5294 (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
5297 /* Setting Mac Control parameters */
5298 mac_control->rmac_pause_time = rmac_pause_time;
5299 mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
5300 mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
5303 /* Initialize Ring buffer parameters. */
5304 for (i = 0; i < config->rx_ring_num; i++)
5305 atomic_set(&sp->rx_bufs_left[i], 0);
5307 /* Initialize the number of ISRs currently running */
5308 atomic_set(&sp->isr_cnt, 0);
5310 /* initialize the shared memory used by the NIC and the host */
5311 if (init_shared_mem(sp)) {
5312 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n",
5315 goto mem_alloc_failed;
5318 sp->bar0 = ioremap(pci_resource_start(pdev, 0),
5319 pci_resource_len(pdev, 0));
5321 DBG_PRINT(ERR_DBG, "%s: S2IO: cannot remap io mem1\n",
5324 goto bar0_remap_failed;
5327 sp->bar1 = ioremap(pci_resource_start(pdev, 2),
5328 pci_resource_len(pdev, 2));
5330 DBG_PRINT(ERR_DBG, "%s: S2IO: cannot remap io mem2\n",
5333 goto bar1_remap_failed;
5336 dev->irq = pdev->irq;
5337 dev->base_addr = (unsigned long) sp->bar0;
5339 /* Initializing the BAR1 address as the start of the FIFO pointer. */
5340 for (j = 0; j < MAX_TX_FIFOS; j++) {
5341 mac_control->tx_FIFO_start[j] = (TxFIFO_element_t __iomem *)
5342 (sp->bar1 + (j * 0x00020000));
5345 /* Driver entry points */
5346 dev->open = &s2io_open;
5347 dev->stop = &s2io_close;
5348 dev->hard_start_xmit = &s2io_xmit;
5349 dev->get_stats = &s2io_get_stats;
5350 dev->set_multicast_list = &s2io_set_multicast;
5351 dev->do_ioctl = &s2io_ioctl;
5352 dev->change_mtu = &s2io_change_mtu;
5353 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
5354 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
5355 dev->vlan_rx_register = s2io_vlan_rx_register;
5356 dev->vlan_rx_kill_vid = (void *)s2io_vlan_rx_kill_vid;
5359 * will use eth_mac_addr() for dev->set_mac_address
5360 * mac address will be set every time dev->open() is called
5362 #if defined(CONFIG_S2IO_NAPI)
5363 dev->poll = s2io_poll;
5367 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
5368 if (sp->high_dma_flag == TRUE)
5369 dev->features |= NETIF_F_HIGHDMA;
5371 dev->features |= NETIF_F_TSO;
5374 dev->tx_timeout = &s2io_tx_watchdog;
5375 dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
5376 INIT_WORK(&sp->rst_timer_task,
5377 (void (*)(void *)) s2io_restart_nic, dev);
5378 INIT_WORK(&sp->set_link_task,
5379 (void (*)(void *)) s2io_set_link, sp);
5381 if (!(sp->device_type & XFRAME_II_DEVICE)) {
5382 pci_save_state(sp->pdev);
5385 /* Setting swapper control on the NIC, for proper reset operation */
5386 if (s2io_set_swapper(sp)) {
5387 DBG_PRINT(ERR_DBG, "%s:swapper settings are wrong\n",
5390 goto set_swap_failed;
5393 /* Verify if the Herc works on the slot its placed into */
5394 if (sp->device_type & XFRAME_II_DEVICE) {
5395 mode = s2io_verify_pci_mode(sp);
5397 DBG_PRINT(ERR_DBG, "%s: ", __FUNCTION__);
5398 DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
5400 goto set_swap_failed;
5404 /* Not needed for Herc */
5405 if (sp->device_type & XFRAME_I_DEVICE) {
5407 * Fix for all "FFs" MAC address problems observed on
5410 fix_mac_address(sp);
5415 * MAC address initialization.
5416 * For now only one mac address will be read and used.
5419 val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5420 RMAC_ADDR_CMD_MEM_OFFSET(0 + MAC_MAC_ADDR_START_OFFSET);
5421 writeq(val64, &bar0->rmac_addr_cmd_mem);
5422 wait_for_cmd_complete(sp);
5424 tmp64 = readq(&bar0->rmac_addr_data0_mem);
5425 mac_down = (u32) tmp64;
5426 mac_up = (u32) (tmp64 >> 32);
5428 memset(sp->def_mac_addr[0].mac_addr, 0, sizeof(ETH_ALEN));
5430 sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
5431 sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
5432 sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
5433 sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
5434 sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
5435 sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
5437 /* Set the factory defined MAC address initially */
5438 dev->addr_len = ETH_ALEN;
5439 memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
5442 * Initialize the tasklet status and link state flags
5443 * and the card state parameter
5445 atomic_set(&(sp->card_state), 0);
5446 sp->tasklet_status = 0;
5449 /* Initialize spinlocks */
5450 spin_lock_init(&sp->tx_lock);
5451 #ifndef CONFIG_S2IO_NAPI
5452 spin_lock_init(&sp->put_lock);
5454 spin_lock_init(&sp->rx_lock);
5457 * SXE-002: Configure link and activity LED to init state
5460 subid = sp->pdev->subsystem_device;
5461 if ((subid & 0xFF) >= 0x07) {
5462 val64 = readq(&bar0->gpio_control);
5463 val64 |= 0x0000800000000000ULL;
5464 writeq(val64, &bar0->gpio_control);
5465 val64 = 0x0411040400000000ULL;
5466 writeq(val64, (void __iomem *) bar0 + 0x2700);
5467 val64 = readq(&bar0->gpio_control);
5470 sp->rx_csum = 1; /* Rx chksum verify enabled by default */
5472 if (register_netdev(dev)) {
5473 DBG_PRINT(ERR_DBG, "Device registration failed\n");
5475 goto register_failed;
5478 if (sp->device_type & XFRAME_II_DEVICE) {
5479 DBG_PRINT(ERR_DBG, "%s: Neterion Xframe II 10GbE adapter ",
5481 DBG_PRINT(ERR_DBG, "(rev %d), Driver %s\n",
5482 get_xena_rev_id(sp->pdev),
5483 s2io_driver_version);
5484 DBG_PRINT(ERR_DBG, "MAC ADDR: %02x:%02x:%02x:%02x:%02x:%02x\n",
5485 sp->def_mac_addr[0].mac_addr[0],
5486 sp->def_mac_addr[0].mac_addr[1],
5487 sp->def_mac_addr[0].mac_addr[2],
5488 sp->def_mac_addr[0].mac_addr[3],
5489 sp->def_mac_addr[0].mac_addr[4],
5490 sp->def_mac_addr[0].mac_addr[5]);
5491 int mode = s2io_print_pci_mode(sp);
5493 DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode ");
5495 goto set_swap_failed;
5498 DBG_PRINT(ERR_DBG, "%s: Neterion Xframe I 10GbE adapter ",
5500 DBG_PRINT(ERR_DBG, "(rev %d), Driver %s\n",
5501 get_xena_rev_id(sp->pdev),
5502 s2io_driver_version);
5503 DBG_PRINT(ERR_DBG, "MAC ADDR: %02x:%02x:%02x:%02x:%02x:%02x\n",
5504 sp->def_mac_addr[0].mac_addr[0],
5505 sp->def_mac_addr[0].mac_addr[1],
5506 sp->def_mac_addr[0].mac_addr[2],
5507 sp->def_mac_addr[0].mac_addr[3],
5508 sp->def_mac_addr[0].mac_addr[4],
5509 sp->def_mac_addr[0].mac_addr[5]);
5512 /* Initialize device name */
5513 strcpy(sp->name, dev->name);
5514 if (sp->device_type & XFRAME_II_DEVICE)
5515 strcat(sp->name, ": Neterion Xframe II 10GbE adapter");
5517 strcat(sp->name, ": Neterion Xframe I 10GbE adapter");
5519 /* Initialize bimodal Interrupts */
5520 sp->config.bimodal = bimodal;
5521 if (!(sp->device_type & XFRAME_II_DEVICE) && bimodal) {
5522 sp->config.bimodal = 0;
5523 DBG_PRINT(ERR_DBG,"%s:Bimodal intr not supported by Xframe I\n",
5528 * Make Link state as off at this point, when the Link change
5529 * interrupt comes the state will be automatically changed to
5532 netif_carrier_off(dev);
5543 free_shared_mem(sp);
5544 pci_disable_device(pdev);
5545 pci_release_regions(pdev);
5546 pci_set_drvdata(pdev, NULL);
5553 * s2io_rem_nic - Free the PCI device
5554 * @pdev: structure containing the PCI related information of the device.
5555 * Description: This function is called by the Pci subsystem to release a
5556 * PCI device and free up all resource held up by the device. This could
5557 * be in response to a Hot plug event or when the driver is to be removed
5561 static void __devexit s2io_rem_nic(struct pci_dev *pdev)
5563 struct net_device *dev =
5564 (struct net_device *) pci_get_drvdata(pdev);
5568 DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
5573 unregister_netdev(dev);
5575 free_shared_mem(sp);
5578 pci_disable_device(pdev);
5579 pci_release_regions(pdev);
5580 pci_set_drvdata(pdev, NULL);
5585 * s2io_starter - Entry point for the driver
5586 * Description: This function is the entry point for the driver. It verifies
5587 * the module loadable parameters and initializes PCI configuration space.
5590 int __init s2io_starter(void)
5592 return pci_module_init(&s2io_driver);
5596 * s2io_closer - Cleanup routine for the driver
5597 * Description: This function is the cleanup routine for the driver. It unregist * ers the driver.
5600 void s2io_closer(void)
5602 pci_unregister_driver(&s2io_driver);
5603 DBG_PRINT(INIT_DBG, "cleanup done\n");
5606 module_init(s2io_starter);
5607 module_exit(s2io_closer);