1 /************************************************************************
2 * s2io.c: A Linux PCI-X Ethernet driver for S2IO 10GbE Server NIC
3 * Copyright(c) 2002-2005 Neterion Inc.
5 * This software may be used and distributed according to the terms of
6 * the GNU General Public License (GPL), incorporated herein by reference.
7 * Drivers based on or derived from this code fall under the GPL and must
8 * retain the authorship, copyright and license notice. This file is not
9 * a complete program and may only be used when the entire operating
10 * system is licensed under the GPL.
11 * See the file COPYING in this distribution for more information.
14 * Jeff Garzik : For pointing out the improper error condition
15 * check in the s2io_xmit routine and also some
16 * issues in the Tx watch dog function. Also for
17 * patiently answering all those innumerable
18 * questions regaring the 2.6 porting issues.
19 * Stephen Hemminger : Providing proper 2.6 porting mechanism for some
20 * macros available only in 2.6 Kernel.
21 * Francois Romieu : For pointing out all code part that were
22 * deprecated and also styling related comments.
23 * Grant Grundler : For helping me get rid of some Architecture
25 * Christopher Hellwig : Some more 2.6 specific issues in the driver.
27 * The module loadable parameters that are supported by the driver and a brief
28 * explaination of all the variables.
29 * rx_ring_num : This can be used to program the number of receive rings used
31 * rx_ring_len: This defines the number of descriptors each ring can have. This
32 * is also an array of size 8.
33 * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
34 * tx_fifo_len: This too is an array of 8. Each element defines the number of
35 * Tx descriptors that can be associated with each corresponding FIFO.
36 ************************************************************************/
38 #include <linux/config.h>
39 #include <linux/module.h>
40 #include <linux/types.h>
41 #include <linux/errno.h>
42 #include <linux/ioport.h>
43 #include <linux/pci.h>
44 #include <linux/dma-mapping.h>
45 #include <linux/kernel.h>
46 #include <linux/netdevice.h>
47 #include <linux/etherdevice.h>
48 #include <linux/skbuff.h>
49 #include <linux/init.h>
50 #include <linux/delay.h>
51 #include <linux/stddef.h>
52 #include <linux/ioctl.h>
53 #include <linux/timex.h>
54 #include <linux/sched.h>
55 #include <linux/ethtool.h>
56 #include <linux/version.h>
57 #include <linux/workqueue.h>
59 #include <asm/system.h>
60 #include <asm/uaccess.h>
65 #include "s2io-regs.h"
67 /* S2io Driver name & version. */
68 static char s2io_driver_name[] = "Neterion";
69 static char s2io_driver_version[] = "Version 1.7.7";
71 static inline int RXD_IS_UP2DT(RxD_t *rxdp)
75 ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
76 (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
82 * Cards with following subsystem_id have a link state indication
83 * problem, 600B, 600C, 600D, 640B, 640C and 640D.
84 * macro below identifies these cards given the subsystem_id.
86 #define CARDS_WITH_FAULTY_LINK_INDICATORS(subid) \
87 (((subid >= 0x600B) && (subid <= 0x600D)) || \
88 ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0
90 #define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
91 ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
92 #define TASKLET_IN_USE test_and_set_bit(0, (&sp->tasklet_status))
95 static inline int rx_buffer_level(nic_t * sp, int rxb_size, int ring)
98 mac_info_t *mac_control;
100 mac_control = &sp->mac_control;
101 if ((mac_control->rings[ring].pkt_cnt - rxb_size) > 16) {
103 if ((mac_control->rings[ring].pkt_cnt - rxb_size) <
104 MAX_RXDS_PER_BLOCK) {
112 /* Ethtool related variables and Macros. */
113 static char s2io_gstrings[][ETH_GSTRING_LEN] = {
114 "Register test\t(offline)",
115 "Eeprom test\t(offline)",
116 "Link test\t(online)",
117 "RLDRAM test\t(offline)",
118 "BIST Test\t(offline)"
121 static char ethtool_stats_keys[][ETH_GSTRING_LEN] = {
123 {"tmac_data_octets"},
127 {"tmac_pause_ctrl_frms"},
128 {"tmac_any_err_frms"},
129 {"tmac_vld_ip_octets"},
137 {"rmac_data_octets"},
138 {"rmac_fcs_err_frms"},
140 {"rmac_vld_mcst_frms"},
141 {"rmac_vld_bcst_frms"},
142 {"rmac_in_rng_len_err_frms"},
144 {"rmac_pause_ctrl_frms"},
145 {"rmac_discarded_frms"},
146 {"rmac_usized_frms"},
147 {"rmac_osized_frms"},
149 {"rmac_jabber_frms"},
157 {"rmac_err_drp_udp"},
159 {"rmac_accepted_ip"},
161 {"\n DRIVER STATISTICS"},
162 {"single_bit_ecc_errs"},
163 {"double_bit_ecc_errs"},
166 #define S2IO_STAT_LEN sizeof(ethtool_stats_keys)/ ETH_GSTRING_LEN
167 #define S2IO_STAT_STRINGS_LEN S2IO_STAT_LEN * ETH_GSTRING_LEN
169 #define S2IO_TEST_LEN sizeof(s2io_gstrings) / ETH_GSTRING_LEN
170 #define S2IO_STRINGS_LEN S2IO_TEST_LEN * ETH_GSTRING_LEN
173 * Constants to be programmed into the Xena's registers, to configure
177 #define SWITCH_SIGN 0xA5A5A5A5A5A5A5A5ULL
180 static u64 default_mdio_cfg[] = {
182 0xC001010000000000ULL, 0xC0010100000000E0ULL,
183 0xC0010100008000E4ULL,
184 /* Remove Reset from PMA PLL */
185 0xC001010000000000ULL, 0xC0010100000000E0ULL,
186 0xC0010100000000E4ULL,
190 static u64 default_dtx_cfg[] = {
191 0x8000051500000000ULL, 0x80000515000000E0ULL,
192 0x80000515D93500E4ULL, 0x8001051500000000ULL,
193 0x80010515000000E0ULL, 0x80010515001E00E4ULL,
194 0x8002051500000000ULL, 0x80020515000000E0ULL,
195 0x80020515F21000E4ULL,
196 /* Set PADLOOPBACKN */
197 0x8002051500000000ULL, 0x80020515000000E0ULL,
198 0x80020515B20000E4ULL, 0x8003051500000000ULL,
199 0x80030515000000E0ULL, 0x80030515B20000E4ULL,
200 0x8004051500000000ULL, 0x80040515000000E0ULL,
201 0x80040515B20000E4ULL, 0x8005051500000000ULL,
202 0x80050515000000E0ULL, 0x80050515B20000E4ULL,
204 /* Remove PADLOOPBACKN */
205 0x8002051500000000ULL, 0x80020515000000E0ULL,
206 0x80020515F20000E4ULL, 0x8003051500000000ULL,
207 0x80030515000000E0ULL, 0x80030515F20000E4ULL,
208 0x8004051500000000ULL, 0x80040515000000E0ULL,
209 0x80040515F20000E4ULL, 0x8005051500000000ULL,
210 0x80050515000000E0ULL, 0x80050515F20000E4ULL,
215 * Constants for Fixing the MacAddress problem seen mostly on
218 static u64 fix_mac[] = {
219 0x0060000000000000ULL, 0x0060600000000000ULL,
220 0x0040600000000000ULL, 0x0000600000000000ULL,
221 0x0020600000000000ULL, 0x0060600000000000ULL,
222 0x0020600000000000ULL, 0x0060600000000000ULL,
223 0x0020600000000000ULL, 0x0060600000000000ULL,
224 0x0020600000000000ULL, 0x0060600000000000ULL,
225 0x0020600000000000ULL, 0x0060600000000000ULL,
226 0x0020600000000000ULL, 0x0060600000000000ULL,
227 0x0020600000000000ULL, 0x0060600000000000ULL,
228 0x0020600000000000ULL, 0x0060600000000000ULL,
229 0x0020600000000000ULL, 0x0060600000000000ULL,
230 0x0020600000000000ULL, 0x0060600000000000ULL,
231 0x0020600000000000ULL, 0x0000600000000000ULL,
232 0x0040600000000000ULL, 0x0060600000000000ULL,
236 /* Module Loadable parameters. */
237 static unsigned int tx_fifo_num = 1;
238 static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
239 {[0 ...(MAX_TX_FIFOS - 1)] = 0 };
240 static unsigned int rx_ring_num = 1;
241 static unsigned int rx_ring_sz[MAX_RX_RINGS] =
242 {[0 ...(MAX_RX_RINGS - 1)] = 0 };
243 static unsigned int rts_frm_len[MAX_RX_RINGS] =
244 {[0 ...(MAX_RX_RINGS - 1)] = 0 };
245 static unsigned int use_continuous_tx_intrs = 1;
246 static unsigned int rmac_pause_time = 65535;
247 static unsigned int mc_pause_threshold_q0q3 = 187;
248 static unsigned int mc_pause_threshold_q4q7 = 187;
249 static unsigned int shared_splits;
250 static unsigned int tmac_util_period = 5;
251 static unsigned int rmac_util_period = 5;
252 #ifndef CONFIG_S2IO_NAPI
253 static unsigned int indicate_max_pkts;
258 * This table lists all the devices that this driver supports.
260 static struct pci_device_id s2io_tbl[] __devinitdata = {
261 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
262 PCI_ANY_ID, PCI_ANY_ID},
263 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
264 PCI_ANY_ID, PCI_ANY_ID},
265 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
266 PCI_ANY_ID, PCI_ANY_ID},
267 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
268 PCI_ANY_ID, PCI_ANY_ID},
272 MODULE_DEVICE_TABLE(pci, s2io_tbl);
274 static struct pci_driver s2io_driver = {
276 .id_table = s2io_tbl,
277 .probe = s2io_init_nic,
278 .remove = __devexit_p(s2io_rem_nic),
281 /* A simplifier macro used both by init and free shared_mem Fns(). */
282 #define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
285 * init_shared_mem - Allocation and Initialization of Memory
286 * @nic: Device private variable.
287 * Description: The function allocates all the memory areas shared
288 * between the NIC and the driver. This includes Tx descriptors,
289 * Rx descriptors and the statistics block.
292 static int init_shared_mem(struct s2io_nic *nic)
295 void *tmp_v_addr, *tmp_v_addr_next;
296 dma_addr_t tmp_p_addr, tmp_p_addr_next;
297 RxD_block_t *pre_rxd_blk = NULL;
298 int i, j, blk_cnt, rx_sz, tx_sz;
299 int lst_size, lst_per_page;
300 struct net_device *dev = nic->dev;
301 #ifdef CONFIG_2BUFF_MODE
306 mac_info_t *mac_control;
307 struct config_param *config;
309 mac_control = &nic->mac_control;
310 config = &nic->config;
313 /* Allocation and initialization of TXDLs in FIOFs */
315 for (i = 0; i < config->tx_fifo_num; i++) {
316 size += config->tx_cfg[i].fifo_len;
318 if (size > MAX_AVAILABLE_TXDS) {
319 DBG_PRINT(ERR_DBG, "%s: Total number of Tx FIFOs ",
321 DBG_PRINT(ERR_DBG, "exceeds the maximum value ");
322 DBG_PRINT(ERR_DBG, "that can be used\n");
326 lst_size = (sizeof(TxD_t) * config->max_txds);
327 tx_sz = lst_size * size;
328 lst_per_page = PAGE_SIZE / lst_size;
330 for (i = 0; i < config->tx_fifo_num; i++) {
331 int fifo_len = config->tx_cfg[i].fifo_len;
332 int list_holder_size = fifo_len * sizeof(list_info_hold_t);
333 mac_control->fifos[i].list_info = kmalloc(list_holder_size,
335 if (!mac_control->fifos[i].list_info) {
337 "Malloc failed for list_info\n");
340 memset(mac_control->fifos[i].list_info, 0, list_holder_size);
342 for (i = 0; i < config->tx_fifo_num; i++) {
343 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
345 mac_control->fifos[i].tx_curr_put_info.offset = 0;
346 mac_control->fifos[i].tx_curr_put_info.fifo_len =
347 config->tx_cfg[i].fifo_len - 1;
348 mac_control->fifos[i].tx_curr_get_info.offset = 0;
349 mac_control->fifos[i].tx_curr_get_info.fifo_len =
350 config->tx_cfg[i].fifo_len - 1;
351 mac_control->fifos[i].fifo_no = i;
352 mac_control->fifos[i].nic = nic;
353 mac_control->fifos[i].max_txds = MAX_SKB_FRAGS;
355 for (j = 0; j < page_num; j++) {
359 tmp_v = pci_alloc_consistent(nic->pdev,
363 "pci_alloc_consistent ");
364 DBG_PRINT(ERR_DBG, "failed for TxDL\n");
367 while (k < lst_per_page) {
368 int l = (j * lst_per_page) + k;
369 if (l == config->tx_cfg[i].fifo_len)
371 mac_control->fifos[i].list_info[l].list_virt_addr =
372 tmp_v + (k * lst_size);
373 mac_control->fifos[i].list_info[l].list_phy_addr =
374 tmp_p + (k * lst_size);
380 /* Allocation and initialization of RXDs in Rings */
382 for (i = 0; i < config->rx_ring_num; i++) {
383 if (config->rx_cfg[i].num_rxd % (MAX_RXDS_PER_BLOCK + 1)) {
384 DBG_PRINT(ERR_DBG, "%s: RxD count of ", dev->name);
385 DBG_PRINT(ERR_DBG, "Ring%d is not a multiple of ",
387 DBG_PRINT(ERR_DBG, "RxDs per Block");
390 size += config->rx_cfg[i].num_rxd;
391 mac_control->rings[i].block_count =
392 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
393 mac_control->rings[i].pkt_cnt =
394 config->rx_cfg[i].num_rxd - mac_control->rings[i].block_count;
396 size = (size * (sizeof(RxD_t)));
399 for (i = 0; i < config->rx_ring_num; i++) {
400 mac_control->rings[i].rx_curr_get_info.block_index = 0;
401 mac_control->rings[i].rx_curr_get_info.offset = 0;
402 mac_control->rings[i].rx_curr_get_info.ring_len =
403 config->rx_cfg[i].num_rxd - 1;
404 mac_control->rings[i].rx_curr_put_info.block_index = 0;
405 mac_control->rings[i].rx_curr_put_info.offset = 0;
406 mac_control->rings[i].rx_curr_put_info.ring_len =
407 config->rx_cfg[i].num_rxd - 1;
408 mac_control->rings[i].nic = nic;
409 mac_control->rings[i].ring_no = i;
412 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
413 /* Allocating all the Rx blocks */
414 for (j = 0; j < blk_cnt; j++) {
415 #ifndef CONFIG_2BUFF_MODE
416 size = (MAX_RXDS_PER_BLOCK + 1) * (sizeof(RxD_t));
418 size = SIZE_OF_BLOCK;
420 tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
422 if (tmp_v_addr == NULL) {
424 * In case of failure, free_shared_mem()
425 * is called, which should free any
426 * memory that was alloced till the
429 mac_control->rings[i].rx_blocks[j].block_virt_addr =
433 memset(tmp_v_addr, 0, size);
434 mac_control->rings[i].rx_blocks[j].block_virt_addr =
436 mac_control->rings[i].rx_blocks[j].block_dma_addr =
439 /* Interlinking all Rx Blocks */
440 for (j = 0; j < blk_cnt; j++) {
442 mac_control->rings[i].rx_blocks[j].block_virt_addr;
444 mac_control->rings[i].rx_blocks[(j + 1) %
445 blk_cnt].block_virt_addr;
447 mac_control->rings[i].rx_blocks[j].block_dma_addr;
449 mac_control->rings[i].rx_blocks[(j + 1) %
450 blk_cnt].block_dma_addr;
452 pre_rxd_blk = (RxD_block_t *) tmp_v_addr;
453 pre_rxd_blk->reserved_1 = END_OF_BLOCK; /* last RxD
456 #ifndef CONFIG_2BUFF_MODE
457 pre_rxd_blk->reserved_2_pNext_RxD_block =
458 (unsigned long) tmp_v_addr_next;
460 pre_rxd_blk->pNext_RxD_Blk_physical =
461 (u64) tmp_p_addr_next;
465 #ifdef CONFIG_2BUFF_MODE
467 * Allocation of Storages for buffer addresses in 2BUFF mode
468 * and the buffers as well.
470 for (i = 0; i < config->rx_ring_num; i++) {
472 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
473 mac_control->rings[i].ba = kmalloc((sizeof(buffAdd_t *) * blk_cnt),
475 if (!mac_control->rings[i].ba)
477 for (j = 0; j < blk_cnt; j++) {
479 mac_control->rings[i].ba[j] = kmalloc((sizeof(buffAdd_t) *
480 (MAX_RXDS_PER_BLOCK + 1)),
482 if (!mac_control->rings[i].ba[j])
484 while (k != MAX_RXDS_PER_BLOCK) {
485 ba = &mac_control->rings[i].ba[j][k];
487 ba->ba_0_org = (void *) kmalloc
488 (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL);
491 tmp = (u64) ba->ba_0_org;
493 tmp &= ~((u64) ALIGN_SIZE);
494 ba->ba_0 = (void *) tmp;
496 ba->ba_1_org = (void *) kmalloc
497 (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL);
500 tmp = (u64) ba->ba_1_org;
502 tmp &= ~((u64) ALIGN_SIZE);
503 ba->ba_1 = (void *) tmp;
510 /* Allocation and initialization of Statistics block */
511 size = sizeof(StatInfo_t);
512 mac_control->stats_mem = pci_alloc_consistent
513 (nic->pdev, size, &mac_control->stats_mem_phy);
515 if (!mac_control->stats_mem) {
517 * In case of failure, free_shared_mem() is called, which
518 * should free any memory that was alloced till the
523 mac_control->stats_mem_sz = size;
525 tmp_v_addr = mac_control->stats_mem;
526 mac_control->stats_info = (StatInfo_t *) tmp_v_addr;
527 memset(tmp_v_addr, 0, size);
528 DBG_PRINT(INIT_DBG, "%s:Ring Mem PHY: 0x%llx\n", dev->name,
529 (unsigned long long) tmp_p_addr);
535 * free_shared_mem - Free the allocated Memory
536 * @nic: Device private variable.
537 * Description: This function is to free all memory locations allocated by
538 * the init_shared_mem() function and return it to the kernel.
541 static void free_shared_mem(struct s2io_nic *nic)
543 int i, j, blk_cnt, size;
545 dma_addr_t tmp_p_addr;
546 mac_info_t *mac_control;
547 struct config_param *config;
548 int lst_size, lst_per_page;
554 mac_control = &nic->mac_control;
555 config = &nic->config;
557 lst_size = (sizeof(TxD_t) * config->max_txds);
558 lst_per_page = PAGE_SIZE / lst_size;
560 for (i = 0; i < config->tx_fifo_num; i++) {
561 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
563 for (j = 0; j < page_num; j++) {
564 int mem_blks = (j * lst_per_page);
565 if (!mac_control->fifos[i].list_info[mem_blks].
568 pci_free_consistent(nic->pdev, PAGE_SIZE,
569 mac_control->fifos[i].
572 mac_control->fifos[i].
576 kfree(mac_control->fifos[i].list_info);
579 #ifndef CONFIG_2BUFF_MODE
580 size = (MAX_RXDS_PER_BLOCK + 1) * (sizeof(RxD_t));
582 size = SIZE_OF_BLOCK;
584 for (i = 0; i < config->rx_ring_num; i++) {
585 blk_cnt = mac_control->rings[i].block_count;
586 for (j = 0; j < blk_cnt; j++) {
587 tmp_v_addr = mac_control->rings[i].rx_blocks[j].
589 tmp_p_addr = mac_control->rings[i].rx_blocks[j].
591 if (tmp_v_addr == NULL)
593 pci_free_consistent(nic->pdev, size,
594 tmp_v_addr, tmp_p_addr);
598 #ifdef CONFIG_2BUFF_MODE
599 /* Freeing buffer storage addresses in 2BUFF mode. */
600 for (i = 0; i < config->rx_ring_num; i++) {
602 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
603 for (j = 0; j < blk_cnt; j++) {
605 if (!mac_control->rings[i].ba[j])
607 while (k != MAX_RXDS_PER_BLOCK) {
608 buffAdd_t *ba = &mac_control->rings[i].ba[j][k];
613 kfree(mac_control->rings[i].ba[j]);
615 if (mac_control->rings[i].ba)
616 kfree(mac_control->rings[i].ba);
620 if (mac_control->stats_mem) {
621 pci_free_consistent(nic->pdev,
622 mac_control->stats_mem_sz,
623 mac_control->stats_mem,
624 mac_control->stats_mem_phy);
629 * init_nic - Initialization of hardware
630 * @nic: device peivate variable
631 * Description: The function sequentially configures every block
632 * of the H/W from their reset values.
633 * Return Value: SUCCESS on success and
634 * '-1' on failure (endian settings incorrect).
637 static int init_nic(struct s2io_nic *nic)
639 XENA_dev_config_t __iomem *bar0 = nic->bar0;
640 struct net_device *dev = nic->dev;
641 register u64 val64 = 0;
645 mac_info_t *mac_control;
646 struct config_param *config;
647 int mdio_cnt = 0, dtx_cnt = 0;
648 unsigned long long mem_share;
651 mac_control = &nic->mac_control;
652 config = &nic->config;
654 /* to set the swapper controle on the card */
655 if(s2io_set_swapper(nic)) {
656 DBG_PRINT(ERR_DBG,"ERROR: Setting Swapper failed\n");
660 /* Remove XGXS from reset state */
662 writeq(val64, &bar0->sw_reset);
664 val64 = readq(&bar0->sw_reset);
666 /* Enable Receiving broadcasts */
667 add = &bar0->mac_cfg;
668 val64 = readq(&bar0->mac_cfg);
669 val64 |= MAC_RMAC_BCAST_ENABLE;
670 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
671 writel((u32) val64, add);
672 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
673 writel((u32) (val64 >> 32), (add + 4));
675 /* Read registers in all blocks */
676 val64 = readq(&bar0->mac_int_mask);
677 val64 = readq(&bar0->mc_int_mask);
678 val64 = readq(&bar0->xgxs_int_mask);
682 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
685 * Configuring the XAUI Interface of Xena.
686 * ***************************************
687 * To Configure the Xena's XAUI, one has to write a series
688 * of 64 bit values into two registers in a particular
689 * sequence. Hence a macro 'SWITCH_SIGN' has been defined
690 * which will be defined in the array of configuration values
691 * (default_dtx_cfg & default_mdio_cfg) at appropriate places
692 * to switch writing from one regsiter to another. We continue
693 * writing these values until we encounter the 'END_SIGN' macro.
694 * For example, After making a series of 21 writes into
695 * dtx_control register the 'SWITCH_SIGN' appears and hence we
696 * start writing into mdio_control until we encounter END_SIGN.
700 while (default_dtx_cfg[dtx_cnt] != END_SIGN) {
701 if (default_dtx_cfg[dtx_cnt] == SWITCH_SIGN) {
705 SPECIAL_REG_WRITE(default_dtx_cfg[dtx_cnt],
706 &bar0->dtx_control, UF);
707 val64 = readq(&bar0->dtx_control);
711 while (default_mdio_cfg[mdio_cnt] != END_SIGN) {
712 if (default_mdio_cfg[mdio_cnt] == SWITCH_SIGN) {
716 SPECIAL_REG_WRITE(default_mdio_cfg[mdio_cnt],
717 &bar0->mdio_control, UF);
718 val64 = readq(&bar0->mdio_control);
721 if ((default_dtx_cfg[dtx_cnt] == END_SIGN) &&
722 (default_mdio_cfg[mdio_cnt] == END_SIGN)) {
729 /* Tx DMA Initialization */
731 writeq(val64, &bar0->tx_fifo_partition_0);
732 writeq(val64, &bar0->tx_fifo_partition_1);
733 writeq(val64, &bar0->tx_fifo_partition_2);
734 writeq(val64, &bar0->tx_fifo_partition_3);
737 for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
739 vBIT(config->tx_cfg[i].fifo_len - 1, ((i * 32) + 19),
740 13) | vBIT(config->tx_cfg[i].fifo_priority,
743 if (i == (config->tx_fifo_num - 1)) {
750 writeq(val64, &bar0->tx_fifo_partition_0);
754 writeq(val64, &bar0->tx_fifo_partition_1);
758 writeq(val64, &bar0->tx_fifo_partition_2);
762 writeq(val64, &bar0->tx_fifo_partition_3);
767 /* Enable Tx FIFO partition 0. */
768 val64 = readq(&bar0->tx_fifo_partition_0);
769 val64 |= BIT(0); /* To enable the FIFO partition. */
770 writeq(val64, &bar0->tx_fifo_partition_0);
773 * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
774 * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
776 if (get_xena_rev_id(nic->pdev) < 4)
777 writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
779 val64 = readq(&bar0->tx_fifo_partition_0);
780 DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
781 &bar0->tx_fifo_partition_0, (unsigned long long) val64);
784 * Initialization of Tx_PA_CONFIG register to ignore packet
785 * integrity checking.
787 val64 = readq(&bar0->tx_pa_cfg);
788 val64 |= TX_PA_CFG_IGNORE_FRM_ERR | TX_PA_CFG_IGNORE_SNAP_OUI |
789 TX_PA_CFG_IGNORE_LLC_CTRL | TX_PA_CFG_IGNORE_L2_ERR;
790 writeq(val64, &bar0->tx_pa_cfg);
792 /* Rx DMA intialization. */
794 for (i = 0; i < config->rx_ring_num; i++) {
796 vBIT(config->rx_cfg[i].ring_priority, (5 + (i * 8)),
799 writeq(val64, &bar0->rx_queue_priority);
802 * Allocating equal share of memory to all the
807 for (i = 0; i < config->rx_ring_num; i++) {
810 mem_share = (mem_size / config->rx_ring_num +
811 mem_size % config->rx_ring_num);
812 val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
815 mem_share = (mem_size / config->rx_ring_num);
816 val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
819 mem_share = (mem_size / config->rx_ring_num);
820 val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
823 mem_share = (mem_size / config->rx_ring_num);
824 val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
827 mem_share = (mem_size / config->rx_ring_num);
828 val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
831 mem_share = (mem_size / config->rx_ring_num);
832 val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
835 mem_share = (mem_size / config->rx_ring_num);
836 val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
839 mem_share = (mem_size / config->rx_ring_num);
840 val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
844 writeq(val64, &bar0->rx_queue_cfg);
847 * Filling Tx round robin registers
848 * as per the number of FIFOs
850 switch (config->tx_fifo_num) {
852 val64 = 0x0000000000000000ULL;
853 writeq(val64, &bar0->tx_w_round_robin_0);
854 writeq(val64, &bar0->tx_w_round_robin_1);
855 writeq(val64, &bar0->tx_w_round_robin_2);
856 writeq(val64, &bar0->tx_w_round_robin_3);
857 writeq(val64, &bar0->tx_w_round_robin_4);
860 val64 = 0x0000010000010000ULL;
861 writeq(val64, &bar0->tx_w_round_robin_0);
862 val64 = 0x0100000100000100ULL;
863 writeq(val64, &bar0->tx_w_round_robin_1);
864 val64 = 0x0001000001000001ULL;
865 writeq(val64, &bar0->tx_w_round_robin_2);
866 val64 = 0x0000010000010000ULL;
867 writeq(val64, &bar0->tx_w_round_robin_3);
868 val64 = 0x0100000000000000ULL;
869 writeq(val64, &bar0->tx_w_round_robin_4);
872 val64 = 0x0001000102000001ULL;
873 writeq(val64, &bar0->tx_w_round_robin_0);
874 val64 = 0x0001020000010001ULL;
875 writeq(val64, &bar0->tx_w_round_robin_1);
876 val64 = 0x0200000100010200ULL;
877 writeq(val64, &bar0->tx_w_round_robin_2);
878 val64 = 0x0001000102000001ULL;
879 writeq(val64, &bar0->tx_w_round_robin_3);
880 val64 = 0x0001020000000000ULL;
881 writeq(val64, &bar0->tx_w_round_robin_4);
884 val64 = 0x0001020300010200ULL;
885 writeq(val64, &bar0->tx_w_round_robin_0);
886 val64 = 0x0100000102030001ULL;
887 writeq(val64, &bar0->tx_w_round_robin_1);
888 val64 = 0x0200010000010203ULL;
889 writeq(val64, &bar0->tx_w_round_robin_2);
890 val64 = 0x0001020001000001ULL;
891 writeq(val64, &bar0->tx_w_round_robin_3);
892 val64 = 0x0203000100000000ULL;
893 writeq(val64, &bar0->tx_w_round_robin_4);
896 val64 = 0x0001000203000102ULL;
897 writeq(val64, &bar0->tx_w_round_robin_0);
898 val64 = 0x0001020001030004ULL;
899 writeq(val64, &bar0->tx_w_round_robin_1);
900 val64 = 0x0001000203000102ULL;
901 writeq(val64, &bar0->tx_w_round_robin_2);
902 val64 = 0x0001020001030004ULL;
903 writeq(val64, &bar0->tx_w_round_robin_3);
904 val64 = 0x0001000000000000ULL;
905 writeq(val64, &bar0->tx_w_round_robin_4);
908 val64 = 0x0001020304000102ULL;
909 writeq(val64, &bar0->tx_w_round_robin_0);
910 val64 = 0x0304050001020001ULL;
911 writeq(val64, &bar0->tx_w_round_robin_1);
912 val64 = 0x0203000100000102ULL;
913 writeq(val64, &bar0->tx_w_round_robin_2);
914 val64 = 0x0304000102030405ULL;
915 writeq(val64, &bar0->tx_w_round_robin_3);
916 val64 = 0x0001000200000000ULL;
917 writeq(val64, &bar0->tx_w_round_robin_4);
920 val64 = 0x0001020001020300ULL;
921 writeq(val64, &bar0->tx_w_round_robin_0);
922 val64 = 0x0102030400010203ULL;
923 writeq(val64, &bar0->tx_w_round_robin_1);
924 val64 = 0x0405060001020001ULL;
925 writeq(val64, &bar0->tx_w_round_robin_2);
926 val64 = 0x0304050000010200ULL;
927 writeq(val64, &bar0->tx_w_round_robin_3);
928 val64 = 0x0102030000000000ULL;
929 writeq(val64, &bar0->tx_w_round_robin_4);
932 val64 = 0x0001020300040105ULL;
933 writeq(val64, &bar0->tx_w_round_robin_0);
934 val64 = 0x0200030106000204ULL;
935 writeq(val64, &bar0->tx_w_round_robin_1);
936 val64 = 0x0103000502010007ULL;
937 writeq(val64, &bar0->tx_w_round_robin_2);
938 val64 = 0x0304010002060500ULL;
939 writeq(val64, &bar0->tx_w_round_robin_3);
940 val64 = 0x0103020400000000ULL;
941 writeq(val64, &bar0->tx_w_round_robin_4);
945 /* Filling the Rx round robin registers as per the
946 * number of Rings and steering based on QoS.
948 switch (config->rx_ring_num) {
950 val64 = 0x8080808080808080ULL;
951 writeq(val64, &bar0->rts_qos_steering);
954 val64 = 0x0000010000010000ULL;
955 writeq(val64, &bar0->rx_w_round_robin_0);
956 val64 = 0x0100000100000100ULL;
957 writeq(val64, &bar0->rx_w_round_robin_1);
958 val64 = 0x0001000001000001ULL;
959 writeq(val64, &bar0->rx_w_round_robin_2);
960 val64 = 0x0000010000010000ULL;
961 writeq(val64, &bar0->rx_w_round_robin_3);
962 val64 = 0x0100000000000000ULL;
963 writeq(val64, &bar0->rx_w_round_robin_4);
965 val64 = 0x8080808040404040ULL;
966 writeq(val64, &bar0->rts_qos_steering);
969 val64 = 0x0001000102000001ULL;
970 writeq(val64, &bar0->rx_w_round_robin_0);
971 val64 = 0x0001020000010001ULL;
972 writeq(val64, &bar0->rx_w_round_robin_1);
973 val64 = 0x0200000100010200ULL;
974 writeq(val64, &bar0->rx_w_round_robin_2);
975 val64 = 0x0001000102000001ULL;
976 writeq(val64, &bar0->rx_w_round_robin_3);
977 val64 = 0x0001020000000000ULL;
978 writeq(val64, &bar0->rx_w_round_robin_4);
980 val64 = 0x8080804040402020ULL;
981 writeq(val64, &bar0->rts_qos_steering);
984 val64 = 0x0001020300010200ULL;
985 writeq(val64, &bar0->rx_w_round_robin_0);
986 val64 = 0x0100000102030001ULL;
987 writeq(val64, &bar0->rx_w_round_robin_1);
988 val64 = 0x0200010000010203ULL;
989 writeq(val64, &bar0->rx_w_round_robin_2);
990 val64 = 0x0001020001000001ULL;
991 writeq(val64, &bar0->rx_w_round_robin_3);
992 val64 = 0x0203000100000000ULL;
993 writeq(val64, &bar0->rx_w_round_robin_4);
995 val64 = 0x8080404020201010ULL;
996 writeq(val64, &bar0->rts_qos_steering);
999 val64 = 0x0001000203000102ULL;
1000 writeq(val64, &bar0->rx_w_round_robin_0);
1001 val64 = 0x0001020001030004ULL;
1002 writeq(val64, &bar0->rx_w_round_robin_1);
1003 val64 = 0x0001000203000102ULL;
1004 writeq(val64, &bar0->rx_w_round_robin_2);
1005 val64 = 0x0001020001030004ULL;
1006 writeq(val64, &bar0->rx_w_round_robin_3);
1007 val64 = 0x0001000000000000ULL;
1008 writeq(val64, &bar0->rx_w_round_robin_4);
1010 val64 = 0x8080404020201008ULL;
1011 writeq(val64, &bar0->rts_qos_steering);
1014 val64 = 0x0001020304000102ULL;
1015 writeq(val64, &bar0->rx_w_round_robin_0);
1016 val64 = 0x0304050001020001ULL;
1017 writeq(val64, &bar0->rx_w_round_robin_1);
1018 val64 = 0x0203000100000102ULL;
1019 writeq(val64, &bar0->rx_w_round_robin_2);
1020 val64 = 0x0304000102030405ULL;
1021 writeq(val64, &bar0->rx_w_round_robin_3);
1022 val64 = 0x0001000200000000ULL;
1023 writeq(val64, &bar0->rx_w_round_robin_4);
1025 val64 = 0x8080404020100804ULL;
1026 writeq(val64, &bar0->rts_qos_steering);
1029 val64 = 0x0001020001020300ULL;
1030 writeq(val64, &bar0->rx_w_round_robin_0);
1031 val64 = 0x0102030400010203ULL;
1032 writeq(val64, &bar0->rx_w_round_robin_1);
1033 val64 = 0x0405060001020001ULL;
1034 writeq(val64, &bar0->rx_w_round_robin_2);
1035 val64 = 0x0304050000010200ULL;
1036 writeq(val64, &bar0->rx_w_round_robin_3);
1037 val64 = 0x0102030000000000ULL;
1038 writeq(val64, &bar0->rx_w_round_robin_4);
1040 val64 = 0x8080402010080402ULL;
1041 writeq(val64, &bar0->rts_qos_steering);
1044 val64 = 0x0001020300040105ULL;
1045 writeq(val64, &bar0->rx_w_round_robin_0);
1046 val64 = 0x0200030106000204ULL;
1047 writeq(val64, &bar0->rx_w_round_robin_1);
1048 val64 = 0x0103000502010007ULL;
1049 writeq(val64, &bar0->rx_w_round_robin_2);
1050 val64 = 0x0304010002060500ULL;
1051 writeq(val64, &bar0->rx_w_round_robin_3);
1052 val64 = 0x0103020400000000ULL;
1053 writeq(val64, &bar0->rx_w_round_robin_4);
1055 val64 = 0x8040201008040201ULL;
1056 writeq(val64, &bar0->rts_qos_steering);
1062 for (i = 0; i < 8; i++)
1063 writeq(val64, &bar0->rts_frm_len_n[i]);
1065 /* Set the default rts frame length for the rings configured */
1066 val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1067 for (i = 0 ; i < config->rx_ring_num ; i++)
1068 writeq(val64, &bar0->rts_frm_len_n[i]);
1070 /* Set the frame length for the configured rings
1071 * desired by the user
1073 for (i = 0; i < config->rx_ring_num; i++) {
1074 /* If rts_frm_len[i] == 0 then it is assumed that user not
1075 * specified frame length steering.
1076 * If the user provides the frame length then program
1077 * the rts_frm_len register for those values or else
1078 * leave it as it is.
1080 if (rts_frm_len[i] != 0) {
1081 writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
1082 &bar0->rts_frm_len_n[i]);
1086 /* Program statistics memory */
1087 writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1090 * Initializing the sampling rate for the device to calculate the
1091 * bandwidth utilization.
1093 val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
1094 MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1095 writeq(val64, &bar0->mac_link_util);
1099 * Initializing the Transmit and Receive Traffic Interrupt
1103 * TTI Initialization. Default Tx timer gets us about
1104 * 250 interrupts per sec. Continuous interrupts are enabled
1107 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078) |
1108 TTI_DATA1_MEM_TX_URNG_A(0xA) |
1109 TTI_DATA1_MEM_TX_URNG_B(0x10) |
1110 TTI_DATA1_MEM_TX_URNG_C(0x30) | TTI_DATA1_MEM_TX_TIMER_AC_EN;
1111 if (use_continuous_tx_intrs)
1112 val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
1113 writeq(val64, &bar0->tti_data1_mem);
1115 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1116 TTI_DATA2_MEM_TX_UFC_B(0x20) |
1117 TTI_DATA2_MEM_TX_UFC_C(0x70) | TTI_DATA2_MEM_TX_UFC_D(0x80);
1118 writeq(val64, &bar0->tti_data2_mem);
1120 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1121 writeq(val64, &bar0->tti_command_mem);
1124 * Once the operation completes, the Strobe bit of the command
1125 * register will be reset. We poll for this particular condition
1126 * We wait for a maximum of 500ms for the operation to complete,
1127 * if it's not complete by then we return error.
1131 val64 = readq(&bar0->tti_command_mem);
1132 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1136 DBG_PRINT(ERR_DBG, "%s: TTI init Failed\n",
1144 /* RTI Initialization */
1145 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF) |
1146 RTI_DATA1_MEM_RX_URNG_A(0xA) |
1147 RTI_DATA1_MEM_RX_URNG_B(0x10) |
1148 RTI_DATA1_MEM_RX_URNG_C(0x30) | RTI_DATA1_MEM_RX_TIMER_AC_EN;
1150 writeq(val64, &bar0->rti_data1_mem);
1152 val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
1153 RTI_DATA2_MEM_RX_UFC_B(0x2) |
1154 RTI_DATA2_MEM_RX_UFC_C(0x40) | RTI_DATA2_MEM_RX_UFC_D(0x80);
1155 writeq(val64, &bar0->rti_data2_mem);
1157 val64 = RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE_NEW_CMD;
1158 writeq(val64, &bar0->rti_command_mem);
1161 * Once the operation completes, the Strobe bit of the
1162 * command register will be reset. We poll for this
1163 * particular condition. We wait for a maximum of 500ms
1164 * for the operation to complete, if it's not complete
1165 * by then we return error.
1169 val64 = readq(&bar0->rti_command_mem);
1170 if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD)) {
1174 DBG_PRINT(ERR_DBG, "%s: RTI init Failed\n",
1183 * Initializing proper values as Pause threshold into all
1184 * the 8 Queues on Rx side.
1186 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1187 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1189 /* Disable RMAC PAD STRIPPING */
1190 add = (void *) &bar0->mac_cfg;
1191 val64 = readq(&bar0->mac_cfg);
1192 val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1193 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1194 writel((u32) (val64), add);
1195 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1196 writel((u32) (val64 >> 32), (add + 4));
1197 val64 = readq(&bar0->mac_cfg);
1200 * Set the time value to be inserted in the pause frame
1201 * generated by xena.
1203 val64 = readq(&bar0->rmac_pause_cfg);
1204 val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1205 val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1206 writeq(val64, &bar0->rmac_pause_cfg);
1209 * Set the Threshold Limit for Generating the pause frame
1210 * If the amount of data in any Queue exceeds ratio of
1211 * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1212 * pause frame is generated
1215 for (i = 0; i < 4; i++) {
1217 (((u64) 0xFF00 | nic->mac_control.
1218 mc_pause_threshold_q0q3)
1221 writeq(val64, &bar0->mc_pause_thresh_q0q3);
1224 for (i = 0; i < 4; i++) {
1226 (((u64) 0xFF00 | nic->mac_control.
1227 mc_pause_threshold_q4q7)
1230 writeq(val64, &bar0->mc_pause_thresh_q4q7);
1233 * TxDMA will stop Read request if the number of read split has
1234 * exceeded the limit pointed by shared_splits
1236 val64 = readq(&bar0->pic_control);
1237 val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1238 writeq(val64, &bar0->pic_control);
1244 * en_dis_able_nic_intrs - Enable or Disable the interrupts
1245 * @nic: device private variable,
1246 * @mask: A mask indicating which Intr block must be modified and,
1247 * @flag: A flag indicating whether to enable or disable the Intrs.
1248 * Description: This function will either disable or enable the interrupts
1249 * depending on the flag argument. The mask argument can be used to
1250 * enable/disable any Intr block.
1251 * Return Value: NONE.
1254 static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1256 XENA_dev_config_t __iomem *bar0 = nic->bar0;
1257 register u64 val64 = 0, temp64 = 0;
1259 /* Top level interrupt classification */
1260 /* PIC Interrupts */
1261 if ((mask & (TX_PIC_INTR | RX_PIC_INTR))) {
1262 /* Enable PIC Intrs in the general intr mask register */
1263 val64 = TXPIC_INT_M | PIC_RX_INT_M;
1264 if (flag == ENABLE_INTRS) {
1265 temp64 = readq(&bar0->general_int_mask);
1266 temp64 &= ~((u64) val64);
1267 writeq(temp64, &bar0->general_int_mask);
1269 * Disabled all PCIX, Flash, MDIO, IIC and GPIO
1270 * interrupts for now.
1273 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1275 * No MSI Support is available presently, so TTI and
1276 * RTI interrupts are also disabled.
1278 } else if (flag == DISABLE_INTRS) {
1280 * Disable PIC Intrs in the general
1281 * intr mask register
1283 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1284 temp64 = readq(&bar0->general_int_mask);
1286 writeq(val64, &bar0->general_int_mask);
1290 /* DMA Interrupts */
1291 /* Enabling/Disabling Tx DMA interrupts */
1292 if (mask & TX_DMA_INTR) {
1293 /* Enable TxDMA Intrs in the general intr mask register */
1294 val64 = TXDMA_INT_M;
1295 if (flag == ENABLE_INTRS) {
1296 temp64 = readq(&bar0->general_int_mask);
1297 temp64 &= ~((u64) val64);
1298 writeq(temp64, &bar0->general_int_mask);
1300 * Keep all interrupts other than PFC interrupt
1301 * and PCC interrupt disabled in DMA level.
1303 val64 = DISABLE_ALL_INTRS & ~(TXDMA_PFC_INT_M |
1305 writeq(val64, &bar0->txdma_int_mask);
1307 * Enable only the MISC error 1 interrupt in PFC block
1309 val64 = DISABLE_ALL_INTRS & (~PFC_MISC_ERR_1);
1310 writeq(val64, &bar0->pfc_err_mask);
1312 * Enable only the FB_ECC error interrupt in PCC block
1314 val64 = DISABLE_ALL_INTRS & (~PCC_FB_ECC_ERR);
1315 writeq(val64, &bar0->pcc_err_mask);
1316 } else if (flag == DISABLE_INTRS) {
1318 * Disable TxDMA Intrs in the general intr mask
1321 writeq(DISABLE_ALL_INTRS, &bar0->txdma_int_mask);
1322 writeq(DISABLE_ALL_INTRS, &bar0->pfc_err_mask);
1323 temp64 = readq(&bar0->general_int_mask);
1325 writeq(val64, &bar0->general_int_mask);
1329 /* Enabling/Disabling Rx DMA interrupts */
1330 if (mask & RX_DMA_INTR) {
1331 /* Enable RxDMA Intrs in the general intr mask register */
1332 val64 = RXDMA_INT_M;
1333 if (flag == ENABLE_INTRS) {
1334 temp64 = readq(&bar0->general_int_mask);
1335 temp64 &= ~((u64) val64);
1336 writeq(temp64, &bar0->general_int_mask);
1338 * All RxDMA block interrupts are disabled for now
1341 writeq(DISABLE_ALL_INTRS, &bar0->rxdma_int_mask);
1342 } else if (flag == DISABLE_INTRS) {
1344 * Disable RxDMA Intrs in the general intr mask
1347 writeq(DISABLE_ALL_INTRS, &bar0->rxdma_int_mask);
1348 temp64 = readq(&bar0->general_int_mask);
1350 writeq(val64, &bar0->general_int_mask);
1354 /* MAC Interrupts */
1355 /* Enabling/Disabling MAC interrupts */
1356 if (mask & (TX_MAC_INTR | RX_MAC_INTR)) {
1357 val64 = TXMAC_INT_M | RXMAC_INT_M;
1358 if (flag == ENABLE_INTRS) {
1359 temp64 = readq(&bar0->general_int_mask);
1360 temp64 &= ~((u64) val64);
1361 writeq(temp64, &bar0->general_int_mask);
1363 * All MAC block error interrupts are disabled for now
1364 * except the link status change interrupt.
1367 val64 = MAC_INT_STATUS_RMAC_INT;
1368 temp64 = readq(&bar0->mac_int_mask);
1369 temp64 &= ~((u64) val64);
1370 writeq(temp64, &bar0->mac_int_mask);
1372 val64 = readq(&bar0->mac_rmac_err_mask);
1373 val64 &= ~((u64) RMAC_LINK_STATE_CHANGE_INT);
1374 writeq(val64, &bar0->mac_rmac_err_mask);
1375 } else if (flag == DISABLE_INTRS) {
1377 * Disable MAC Intrs in the general intr mask register
1379 writeq(DISABLE_ALL_INTRS, &bar0->mac_int_mask);
1380 writeq(DISABLE_ALL_INTRS,
1381 &bar0->mac_rmac_err_mask);
1383 temp64 = readq(&bar0->general_int_mask);
1385 writeq(val64, &bar0->general_int_mask);
1389 /* XGXS Interrupts */
1390 if (mask & (TX_XGXS_INTR | RX_XGXS_INTR)) {
1391 val64 = TXXGXS_INT_M | RXXGXS_INT_M;
1392 if (flag == ENABLE_INTRS) {
1393 temp64 = readq(&bar0->general_int_mask);
1394 temp64 &= ~((u64) val64);
1395 writeq(temp64, &bar0->general_int_mask);
1397 * All XGXS block error interrupts are disabled for now
1400 writeq(DISABLE_ALL_INTRS, &bar0->xgxs_int_mask);
1401 } else if (flag == DISABLE_INTRS) {
1403 * Disable MC Intrs in the general intr mask register
1405 writeq(DISABLE_ALL_INTRS, &bar0->xgxs_int_mask);
1406 temp64 = readq(&bar0->general_int_mask);
1408 writeq(val64, &bar0->general_int_mask);
1412 /* Memory Controller(MC) interrupts */
1413 if (mask & MC_INTR) {
1415 if (flag == ENABLE_INTRS) {
1416 temp64 = readq(&bar0->general_int_mask);
1417 temp64 &= ~((u64) val64);
1418 writeq(temp64, &bar0->general_int_mask);
1420 * Enable all MC Intrs.
1422 writeq(0x0, &bar0->mc_int_mask);
1423 writeq(0x0, &bar0->mc_err_mask);
1424 } else if (flag == DISABLE_INTRS) {
1426 * Disable MC Intrs in the general intr mask register
1428 writeq(DISABLE_ALL_INTRS, &bar0->mc_int_mask);
1429 temp64 = readq(&bar0->general_int_mask);
1431 writeq(val64, &bar0->general_int_mask);
1436 /* Tx traffic interrupts */
1437 if (mask & TX_TRAFFIC_INTR) {
1438 val64 = TXTRAFFIC_INT_M;
1439 if (flag == ENABLE_INTRS) {
1440 temp64 = readq(&bar0->general_int_mask);
1441 temp64 &= ~((u64) val64);
1442 writeq(temp64, &bar0->general_int_mask);
1444 * Enable all the Tx side interrupts
1445 * writing 0 Enables all 64 TX interrupt levels
1447 writeq(0x0, &bar0->tx_traffic_mask);
1448 } else if (flag == DISABLE_INTRS) {
1450 * Disable Tx Traffic Intrs in the general intr mask
1453 writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
1454 temp64 = readq(&bar0->general_int_mask);
1456 writeq(val64, &bar0->general_int_mask);
1460 /* Rx traffic interrupts */
1461 if (mask & RX_TRAFFIC_INTR) {
1462 val64 = RXTRAFFIC_INT_M;
1463 if (flag == ENABLE_INTRS) {
1464 temp64 = readq(&bar0->general_int_mask);
1465 temp64 &= ~((u64) val64);
1466 writeq(temp64, &bar0->general_int_mask);
1467 /* writing 0 Enables all 8 RX interrupt levels */
1468 writeq(0x0, &bar0->rx_traffic_mask);
1469 } else if (flag == DISABLE_INTRS) {
1471 * Disable Rx Traffic Intrs in the general intr mask
1474 writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
1475 temp64 = readq(&bar0->general_int_mask);
1477 writeq(val64, &bar0->general_int_mask);
1482 static int check_prc_pcc_state(u64 val64, int flag, int rev_id)
1486 if (flag == FALSE) {
1488 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) &&
1489 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1490 ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
1494 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) &&
1495 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1496 ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
1502 if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
1503 ADAPTER_STATUS_RMAC_PCC_IDLE) &&
1504 (!(val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ||
1505 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1506 ADAPTER_STATUS_RC_PRC_QUIESCENT))) {
1510 if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
1511 ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) &&
1512 (!(val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ||
1513 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1514 ADAPTER_STATUS_RC_PRC_QUIESCENT))) {
1523 * verify_xena_quiescence - Checks whether the H/W is ready
1524 * @val64 : Value read from adapter status register.
1525 * @flag : indicates if the adapter enable bit was ever written once
1527 * Description: Returns whether the H/W is ready to go or not. Depending
1528 * on whether adapter enable bit was written or not the comparison
1529 * differs and the calling function passes the input argument flag to
1531 * Return: 1 If xena is quiescence
1532 * 0 If Xena is not quiescence
1535 static int verify_xena_quiescence(nic_t *sp, u64 val64, int flag)
1538 u64 tmp64 = ~((u64) val64);
1539 int rev_id = get_xena_rev_id(sp->pdev);
1543 (ADAPTER_STATUS_TDMA_READY | ADAPTER_STATUS_RDMA_READY |
1544 ADAPTER_STATUS_PFC_READY | ADAPTER_STATUS_TMAC_BUF_EMPTY |
1545 ADAPTER_STATUS_PIC_QUIESCENT | ADAPTER_STATUS_MC_DRAM_READY |
1546 ADAPTER_STATUS_MC_QUEUES_READY | ADAPTER_STATUS_M_PLL_LOCK |
1547 ADAPTER_STATUS_P_PLL_LOCK))) {
1548 ret = check_prc_pcc_state(val64, flag, rev_id);
1555 * fix_mac_address - Fix for Mac addr problem on Alpha platforms
1556 * @sp: Pointer to device specifc structure
1558 * New procedure to clear mac address reading problems on Alpha platforms
1562 void fix_mac_address(nic_t * sp)
1564 XENA_dev_config_t __iomem *bar0 = sp->bar0;
1568 while (fix_mac[i] != END_SIGN) {
1569 writeq(fix_mac[i++], &bar0->gpio_control);
1571 val64 = readq(&bar0->gpio_control);
1576 * start_nic - Turns the device on
1577 * @nic : device private variable.
1579 * This function actually turns the device on. Before this function is
1580 * called,all Registers are configured from their reset states
1581 * and shared memory is allocated but the NIC is still quiescent. On
1582 * calling this function, the device interrupts are cleared and the NIC is
1583 * literally switched on by writing into the adapter control register.
1585 * SUCCESS on success and -1 on failure.
1588 static int start_nic(struct s2io_nic *nic)
1590 XENA_dev_config_t __iomem *bar0 = nic->bar0;
1591 struct net_device *dev = nic->dev;
1592 register u64 val64 = 0;
1595 mac_info_t *mac_control;
1596 struct config_param *config;
1598 mac_control = &nic->mac_control;
1599 config = &nic->config;
1601 /* PRC Initialization and configuration */
1602 for (i = 0; i < config->rx_ring_num; i++) {
1603 writeq((u64) mac_control->rings[i].rx_blocks[0].block_dma_addr,
1604 &bar0->prc_rxd0_n[i]);
1606 val64 = readq(&bar0->prc_ctrl_n[i]);
1607 #ifndef CONFIG_2BUFF_MODE
1608 val64 |= PRC_CTRL_RC_ENABLED;
1610 val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
1612 writeq(val64, &bar0->prc_ctrl_n[i]);
1615 #ifdef CONFIG_2BUFF_MODE
1616 /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
1617 val64 = readq(&bar0->rx_pa_cfg);
1618 val64 |= RX_PA_CFG_IGNORE_L2_ERR;
1619 writeq(val64, &bar0->rx_pa_cfg);
1623 * Enabling MC-RLDRAM. After enabling the device, we timeout
1624 * for around 100ms, which is approximately the time required
1625 * for the device to be ready for operation.
1627 val64 = readq(&bar0->mc_rldram_mrs);
1628 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
1629 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
1630 val64 = readq(&bar0->mc_rldram_mrs);
1632 msleep(100); /* Delay by around 100 ms. */
1634 /* Enabling ECC Protection. */
1635 val64 = readq(&bar0->adapter_control);
1636 val64 &= ~ADAPTER_ECC_EN;
1637 writeq(val64, &bar0->adapter_control);
1640 * Clearing any possible Link state change interrupts that
1641 * could have popped up just before Enabling the card.
1643 val64 = readq(&bar0->mac_rmac_err_reg);
1645 writeq(val64, &bar0->mac_rmac_err_reg);
1648 * Verify if the device is ready to be enabled, if so enable
1651 val64 = readq(&bar0->adapter_status);
1652 if (!verify_xena_quiescence(nic, val64, nic->device_enabled_once)) {
1653 DBG_PRINT(ERR_DBG, "%s: device is not ready, ", dev->name);
1654 DBG_PRINT(ERR_DBG, "Adapter status reads: 0x%llx\n",
1655 (unsigned long long) val64);
1659 /* Enable select interrupts */
1660 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR | TX_MAC_INTR |
1661 RX_MAC_INTR | MC_INTR;
1662 en_dis_able_nic_intrs(nic, interruptible, ENABLE_INTRS);
1665 * With some switches, link might be already up at this point.
1666 * Because of this weird behavior, when we enable laser,
1667 * we may not get link. We need to handle this. We cannot
1668 * figure out which switch is misbehaving. So we are forced to
1669 * make a global change.
1672 /* Enabling Laser. */
1673 val64 = readq(&bar0->adapter_control);
1674 val64 |= ADAPTER_EOI_TX_ON;
1675 writeq(val64, &bar0->adapter_control);
1677 /* SXE-002: Initialize link and activity LED */
1678 subid = nic->pdev->subsystem_device;
1679 if ((subid & 0xFF) >= 0x07) {
1680 val64 = readq(&bar0->gpio_control);
1681 val64 |= 0x0000800000000000ULL;
1682 writeq(val64, &bar0->gpio_control);
1683 val64 = 0x0411040400000000ULL;
1684 writeq(val64, (void __iomem *) ((u8 *) bar0 + 0x2700));
1688 * Don't see link state interrupts on certain switches, so
1689 * directly scheduling a link state task from here.
1691 schedule_work(&nic->set_link_task);
1697 * free_tx_buffers - Free all queued Tx buffers
1698 * @nic : device private variable.
1700 * Free all queued Tx buffers.
1701 * Return Value: void
1704 static void free_tx_buffers(struct s2io_nic *nic)
1706 struct net_device *dev = nic->dev;
1707 struct sk_buff *skb;
1710 mac_info_t *mac_control;
1711 struct config_param *config;
1714 mac_control = &nic->mac_control;
1715 config = &nic->config;
1717 for (i = 0; i < config->tx_fifo_num; i++) {
1718 for (j = 0; j < config->tx_cfg[i].fifo_len - 1; j++) {
1719 txdp = (TxD_t *) mac_control->fifos[i].list_info[j].
1722 (struct sk_buff *) ((unsigned long) txdp->
1725 memset(txdp, 0, sizeof(TxD_t));
1729 memset(txdp, 0, sizeof(TxD_t));
1733 "%s:forcibly freeing %d skbs on FIFO%d\n",
1735 mac_control->fifos[i].tx_curr_get_info.offset = 0;
1736 mac_control->fifos[i].tx_curr_put_info.offset = 0;
1741 * stop_nic - To stop the nic
1742 * @nic ; device private variable.
1744 * This function does exactly the opposite of what the start_nic()
1745 * function does. This function is called to stop the device.
1750 static void stop_nic(struct s2io_nic *nic)
1752 XENA_dev_config_t __iomem *bar0 = nic->bar0;
1753 register u64 val64 = 0;
1754 u16 interruptible, i;
1755 mac_info_t *mac_control;
1756 struct config_param *config;
1758 mac_control = &nic->mac_control;
1759 config = &nic->config;
1761 /* Disable all interrupts */
1762 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR | TX_MAC_INTR |
1763 RX_MAC_INTR | MC_INTR;
1764 en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
1767 for (i = 0; i < config->rx_ring_num; i++) {
1768 val64 = readq(&bar0->prc_ctrl_n[i]);
1769 val64 &= ~((u64) PRC_CTRL_RC_ENABLED);
1770 writeq(val64, &bar0->prc_ctrl_n[i]);
1775 * fill_rx_buffers - Allocates the Rx side skbs
1776 * @nic: device private variable
1777 * @ring_no: ring number
1779 * The function allocates Rx side skbs and puts the physical
1780 * address of these buffers into the RxD buffer pointers, so that the NIC
1781 * can DMA the received frame into these locations.
1782 * The NIC supports 3 receive modes, viz
1784 * 2. three buffer and
1785 * 3. Five buffer modes.
1786 * Each mode defines how many fragments the received frame will be split
1787 * up into by the NIC. The frame is split into L3 header, L4 Header,
1788 * L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
1789 * is split into 3 fragments. As of now only single buffer mode is
1792 * SUCCESS on success or an appropriate -ve value on failure.
1795 int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
1797 struct net_device *dev = nic->dev;
1798 struct sk_buff *skb;
1800 int off, off1, size, block_no, block_no1;
1801 int offset, offset1;
1804 mac_info_t *mac_control;
1805 struct config_param *config;
1806 #ifdef CONFIG_2BUFF_MODE
1811 dma_addr_t rxdpphys;
1813 #ifndef CONFIG_S2IO_NAPI
1814 unsigned long flags;
1817 mac_control = &nic->mac_control;
1818 config = &nic->config;
1819 alloc_cnt = mac_control->rings[ring_no].pkt_cnt -
1820 atomic_read(&nic->rx_bufs_left[ring_no]);
1821 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
1822 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
1824 while (alloc_tab < alloc_cnt) {
1825 block_no = mac_control->rings[ring_no].rx_curr_put_info.
1827 block_no1 = mac_control->rings[ring_no].rx_curr_get_info.
1829 off = mac_control->rings[ring_no].rx_curr_put_info.offset;
1830 off1 = mac_control->rings[ring_no].rx_curr_get_info.offset;
1831 #ifndef CONFIG_2BUFF_MODE
1832 offset = block_no * (MAX_RXDS_PER_BLOCK + 1) + off;
1833 offset1 = block_no1 * (MAX_RXDS_PER_BLOCK + 1) + off1;
1835 offset = block_no * (MAX_RXDS_PER_BLOCK) + off;
1836 offset1 = block_no1 * (MAX_RXDS_PER_BLOCK) + off1;
1839 rxdp = mac_control->rings[ring_no].rx_blocks[block_no].
1840 block_virt_addr + off;
1841 if ((offset == offset1) && (rxdp->Host_Control)) {
1842 DBG_PRINT(INTR_DBG, "%s: Get and Put", dev->name);
1843 DBG_PRINT(INTR_DBG, " info equated\n");
1846 #ifndef CONFIG_2BUFF_MODE
1847 if (rxdp->Control_1 == END_OF_BLOCK) {
1848 mac_control->rings[ring_no].rx_curr_put_info.
1850 mac_control->rings[ring_no].rx_curr_put_info.
1851 block_index %= mac_control->rings[ring_no].block_count;
1852 block_no = mac_control->rings[ring_no].rx_curr_put_info.
1855 off %= (MAX_RXDS_PER_BLOCK + 1);
1856 mac_control->rings[ring_no].rx_curr_put_info.offset =
1858 rxdp = (RxD_t *) ((unsigned long) rxdp->Control_2);
1859 DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
1862 #ifndef CONFIG_S2IO_NAPI
1863 spin_lock_irqsave(&nic->put_lock, flags);
1864 mac_control->rings[ring_no].put_pos =
1865 (block_no * (MAX_RXDS_PER_BLOCK + 1)) + off;
1866 spin_unlock_irqrestore(&nic->put_lock, flags);
1869 if (rxdp->Host_Control == END_OF_BLOCK) {
1870 mac_control->rings[ring_no].rx_curr_put_info.
1872 mac_control->rings[ring_no].rx_curr_put_info.block_index
1873 %= mac_control->rings[ring_no].block_count;
1874 block_no = mac_control->rings[ring_no].rx_curr_put_info
1877 DBG_PRINT(INTR_DBG, "%s: block%d at: 0x%llx\n",
1878 dev->name, block_no,
1879 (unsigned long long) rxdp->Control_1);
1880 mac_control->rings[ring_no].rx_curr_put_info.offset =
1882 rxdp = mac_control->rings[ring_no].rx_blocks[block_no].
1885 #ifndef CONFIG_S2IO_NAPI
1886 spin_lock_irqsave(&nic->put_lock, flags);
1887 mac_control->rings[ring_no].put_pos = (block_no *
1888 (MAX_RXDS_PER_BLOCK + 1)) + off;
1889 spin_unlock_irqrestore(&nic->put_lock, flags);
1893 #ifndef CONFIG_2BUFF_MODE
1894 if (rxdp->Control_1 & RXD_OWN_XENA)
1896 if (rxdp->Control_2 & BIT(0))
1899 mac_control->rings[ring_no].rx_curr_put_info.
1903 #ifdef CONFIG_2BUFF_MODE
1905 * RxDs Spanning cache lines will be replenished only
1906 * if the succeeding RxD is also owned by Host. It
1907 * will always be the ((8*i)+3) and ((8*i)+6)
1908 * descriptors for the 48 byte descriptor. The offending
1909 * decsriptor is of-course the 3rd descriptor.
1911 rxdpphys = mac_control->rings[ring_no].rx_blocks[block_no].
1912 block_dma_addr + (off * sizeof(RxD_t));
1913 if (((u64) (rxdpphys)) % 128 > 80) {
1914 rxdpnext = mac_control->rings[ring_no].rx_blocks[block_no].
1915 block_virt_addr + (off + 1);
1916 if (rxdpnext->Host_Control == END_OF_BLOCK) {
1917 nextblk = (block_no + 1) %
1918 (mac_control->rings[ring_no].block_count);
1919 rxdpnext = mac_control->rings[ring_no].rx_blocks
1920 [nextblk].block_virt_addr;
1922 if (rxdpnext->Control_2 & BIT(0))
1927 #ifndef CONFIG_2BUFF_MODE
1928 skb = dev_alloc_skb(size + NET_IP_ALIGN);
1930 skb = dev_alloc_skb(dev->mtu + ALIGN_SIZE + BUF0_LEN + 4);
1933 DBG_PRINT(ERR_DBG, "%s: Out of ", dev->name);
1934 DBG_PRINT(ERR_DBG, "memory to allocate SKBs\n");
1937 #ifndef CONFIG_2BUFF_MODE
1938 skb_reserve(skb, NET_IP_ALIGN);
1939 memset(rxdp, 0, sizeof(RxD_t));
1940 rxdp->Buffer0_ptr = pci_map_single
1941 (nic->pdev, skb->data, size, PCI_DMA_FROMDEVICE);
1942 rxdp->Control_2 &= (~MASK_BUFFER0_SIZE);
1943 rxdp->Control_2 |= SET_BUFFER0_SIZE(size);
1944 rxdp->Host_Control = (unsigned long) (skb);
1945 rxdp->Control_1 |= RXD_OWN_XENA;
1947 off %= (MAX_RXDS_PER_BLOCK + 1);
1948 mac_control->rings[ring_no].rx_curr_put_info.offset = off;
1950 ba = &mac_control->rings[ring_no].ba[block_no][off];
1951 skb_reserve(skb, BUF0_LEN);
1952 tmp = ((unsigned long) skb->data & ALIGN_SIZE);
1954 skb_reserve(skb, (ALIGN_SIZE + 1) - tmp);
1956 memset(rxdp, 0, sizeof(RxD_t));
1957 rxdp->Buffer2_ptr = pci_map_single
1958 (nic->pdev, skb->data, dev->mtu + BUF0_LEN + 4,
1959 PCI_DMA_FROMDEVICE);
1961 pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN,
1962 PCI_DMA_FROMDEVICE);
1964 pci_map_single(nic->pdev, ba->ba_1, BUF1_LEN,
1965 PCI_DMA_FROMDEVICE);
1967 rxdp->Control_2 = SET_BUFFER2_SIZE(dev->mtu + 4);
1968 rxdp->Control_2 |= SET_BUFFER0_SIZE(BUF0_LEN);
1969 rxdp->Control_2 |= SET_BUFFER1_SIZE(1); /* dummy. */
1970 rxdp->Control_2 |= BIT(0); /* Set Buffer_Empty bit. */
1971 rxdp->Host_Control = (u64) ((unsigned long) (skb));
1972 rxdp->Control_1 |= RXD_OWN_XENA;
1974 mac_control->rings[ring_no].rx_curr_put_info.offset = off;
1976 rxdp->Control_2 |= SET_RXD_MARKER;
1978 atomic_inc(&nic->rx_bufs_left[ring_no]);
1987 * free_rx_buffers - Frees all Rx buffers
1988 * @sp: device private variable.
1990 * This function will free all Rx buffers allocated by host.
1995 static void free_rx_buffers(struct s2io_nic *sp)
1997 struct net_device *dev = sp->dev;
1998 int i, j, blk = 0, off, buf_cnt = 0;
2000 struct sk_buff *skb;
2001 mac_info_t *mac_control;
2002 struct config_param *config;
2003 #ifdef CONFIG_2BUFF_MODE
2007 mac_control = &sp->mac_control;
2008 config = &sp->config;
2010 for (i = 0; i < config->rx_ring_num; i++) {
2011 for (j = 0, blk = 0; j < config->rx_cfg[i].num_rxd; j++) {
2012 off = j % (MAX_RXDS_PER_BLOCK + 1);
2013 rxdp = mac_control->rings[i].rx_blocks[blk].
2014 block_virt_addr + off;
2016 #ifndef CONFIG_2BUFF_MODE
2017 if (rxdp->Control_1 == END_OF_BLOCK) {
2019 (RxD_t *) ((unsigned long) rxdp->
2025 if (rxdp->Host_Control == END_OF_BLOCK) {
2031 if (!(rxdp->Control_1 & RXD_OWN_XENA)) {
2032 memset(rxdp, 0, sizeof(RxD_t));
2037 (struct sk_buff *) ((unsigned long) rxdp->
2040 #ifndef CONFIG_2BUFF_MODE
2041 pci_unmap_single(sp->pdev, (dma_addr_t)
2044 HEADER_ETHERNET_II_802_3_SIZE
2045 + HEADER_802_2_SIZE +
2047 PCI_DMA_FROMDEVICE);
2049 ba = &mac_control->rings[i].ba[blk][off];
2050 pci_unmap_single(sp->pdev, (dma_addr_t)
2053 PCI_DMA_FROMDEVICE);
2054 pci_unmap_single(sp->pdev, (dma_addr_t)
2057 PCI_DMA_FROMDEVICE);
2058 pci_unmap_single(sp->pdev, (dma_addr_t)
2060 dev->mtu + BUF0_LEN + 4,
2061 PCI_DMA_FROMDEVICE);
2064 atomic_dec(&sp->rx_bufs_left[i]);
2067 memset(rxdp, 0, sizeof(RxD_t));
2069 mac_control->rings[i].rx_curr_put_info.block_index = 0;
2070 mac_control->rings[i].rx_curr_get_info.block_index = 0;
2071 mac_control->rings[i].rx_curr_put_info.offset = 0;
2072 mac_control->rings[i].rx_curr_get_info.offset = 0;
2073 atomic_set(&sp->rx_bufs_left[i], 0);
2074 DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n",
2075 dev->name, buf_cnt, i);
2080 * s2io_poll - Rx interrupt handler for NAPI support
2081 * @dev : pointer to the device structure.
2082 * @budget : The number of packets that were budgeted to be processed
2083 * during one pass through the 'Poll" function.
2085 * Comes into picture only if NAPI support has been incorporated. It does
2086 * the same thing that rx_intr_handler does, but not in a interrupt context
2087 * also It will process only a given number of packets.
2089 * 0 on success and 1 if there are No Rx packets to be processed.
2092 #if defined(CONFIG_S2IO_NAPI)
2093 static int s2io_poll(struct net_device *dev, int *budget)
2095 nic_t *nic = dev->priv;
2096 int pkt_cnt = 0, org_pkts_to_process;
2097 mac_info_t *mac_control;
2098 struct config_param *config;
2099 XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0;
2103 atomic_inc(&nic->isr_cnt);
2104 mac_control = &nic->mac_control;
2105 config = &nic->config;
2107 nic->pkts_to_process = *budget;
2108 if (nic->pkts_to_process > dev->quota)
2109 nic->pkts_to_process = dev->quota;
2110 org_pkts_to_process = nic->pkts_to_process;
2112 val64 = readq(&bar0->rx_traffic_int);
2113 writeq(val64, &bar0->rx_traffic_int);
2115 for (i = 0; i < config->rx_ring_num; i++) {
2116 rx_intr_handler(&mac_control->rings[i]);
2117 pkt_cnt = org_pkts_to_process - nic->pkts_to_process;
2118 if (!nic->pkts_to_process) {
2119 /* Quota for the current iteration has been met */
2126 dev->quota -= pkt_cnt;
2128 netif_rx_complete(dev);
2130 for (i = 0; i < config->rx_ring_num; i++) {
2131 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2132 DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2133 DBG_PRINT(ERR_DBG, " in Rx Poll!!\n");
2137 /* Re enable the Rx interrupts. */
2138 en_dis_able_nic_intrs(nic, RX_TRAFFIC_INTR, ENABLE_INTRS);
2139 atomic_dec(&nic->isr_cnt);
2143 dev->quota -= pkt_cnt;
2146 for (i = 0; i < config->rx_ring_num; i++) {
2147 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2148 DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2149 DBG_PRINT(ERR_DBG, " in Rx Poll!!\n");
2153 atomic_dec(&nic->isr_cnt);
2159 * rx_intr_handler - Rx interrupt handler
2160 * @nic: device private variable.
2162 * If the interrupt is because of a received frame or if the
2163 * receive ring contains fresh as yet un-processed frames,this function is
2164 * called. It picks out the RxD at which place the last Rx processing had
2165 * stopped and sends the skb to the OSM's Rx handler and then increments
2170 static void rx_intr_handler(ring_info_t *ring_data)
2172 nic_t *nic = ring_data->nic;
2173 struct net_device *dev = (struct net_device *) nic->dev;
2174 XENA_dev_config_t __iomem *bar0 = nic->bar0;
2175 int get_block, get_offset, put_block, put_offset, ring_bufs;
2176 rx_curr_get_info_t get_info, put_info;
2178 struct sk_buff *skb;
2179 #ifndef CONFIG_S2IO_NAPI
2184 spin_lock(&nic->rx_lock);
2185 if (atomic_read(&nic->card_state) == CARD_DOWN) {
2186 DBG_PRINT(ERR_DBG, "%s: %s going down for reset\n",
2187 __FUNCTION__, dev->name);
2188 spin_unlock(&nic->rx_lock);
2192 * rx_traffic_int reg is an R1 register, hence we read and write
2193 * back the same value in the register to clear it
2195 val64 = readq(&bar0->tx_traffic_int);
2196 writeq(val64, &bar0->tx_traffic_int);
2198 get_info = ring_data->rx_curr_get_info;
2199 get_block = get_info.block_index;
2200 put_info = ring_data->rx_curr_put_info;
2201 put_block = put_info.block_index;
2202 ring_bufs = get_info.ring_len+1;
2203 rxdp = ring_data->rx_blocks[get_block].block_virt_addr +
2205 get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
2207 #ifndef CONFIG_S2IO_NAPI
2208 spin_lock(&nic->put_lock);
2209 put_offset = ring_data->put_pos;
2210 spin_unlock(&nic->put_lock);
2212 put_offset = (put_block * (MAX_RXDS_PER_BLOCK + 1)) +
2215 while (RXD_IS_UP2DT(rxdp) &&
2216 (((get_offset + 1) % ring_bufs) != put_offset)) {
2217 skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control);
2219 DBG_PRINT(ERR_DBG, "%s: The skb is ",
2221 DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
2222 spin_unlock(&nic->rx_lock);
2225 #ifndef CONFIG_2BUFF_MODE
2226 pci_unmap_single(nic->pdev, (dma_addr_t)
2229 HEADER_ETHERNET_II_802_3_SIZE +
2232 PCI_DMA_FROMDEVICE);
2234 pci_unmap_single(nic->pdev, (dma_addr_t)
2236 BUF0_LEN, PCI_DMA_FROMDEVICE);
2237 pci_unmap_single(nic->pdev, (dma_addr_t)
2239 BUF1_LEN, PCI_DMA_FROMDEVICE);
2240 pci_unmap_single(nic->pdev, (dma_addr_t)
2242 dev->mtu + BUF0_LEN + 4,
2243 PCI_DMA_FROMDEVICE);
2245 rx_osm_handler(ring_data, rxdp);
2247 ring_data->rx_curr_get_info.offset =
2249 rxdp = ring_data->rx_blocks[get_block].block_virt_addr +
2251 if (get_info.offset &&
2252 (!(get_info.offset % MAX_RXDS_PER_BLOCK))) {
2253 get_info.offset = 0;
2254 ring_data->rx_curr_get_info.offset
2257 get_block %= ring_data->block_count;
2258 ring_data->rx_curr_get_info.block_index
2260 rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
2263 get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
2265 #ifdef CONFIG_S2IO_NAPI
2266 nic->pkts_to_process -= 1;
2267 if (!nic->pkts_to_process)
2271 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
2275 spin_unlock(&nic->rx_lock);
2279 * tx_intr_handler - Transmit interrupt handler
2280 * @nic : device private variable
2282 * If an interrupt was raised to indicate DMA complete of the
2283 * Tx packet, this function is called. It identifies the last TxD
2284 * whose buffer was freed and frees all skbs whose data have already
2285 * DMA'ed into the NICs internal memory.
2290 static void tx_intr_handler(fifo_info_t *fifo_data)
2292 nic_t *nic = fifo_data->nic;
2293 XENA_dev_config_t __iomem *bar0 = nic->bar0;
2294 struct net_device *dev = (struct net_device *) nic->dev;
2295 tx_curr_get_info_t get_info, put_info;
2296 struct sk_buff *skb;
2299 register u64 val64 = 0;
2302 * tx_traffic_int reg is an R1 register, hence we read and write
2303 * back the same value in the register to clear it
2305 val64 = readq(&bar0->tx_traffic_int);
2306 writeq(val64, &bar0->tx_traffic_int);
2308 get_info = fifo_data->tx_curr_get_info;
2309 put_info = fifo_data->tx_curr_put_info;
2310 txdlp = (TxD_t *) fifo_data->list_info[get_info.offset].
2312 while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
2313 (get_info.offset != put_info.offset) &&
2314 (txdlp->Host_Control)) {
2315 /* Check for TxD errors */
2316 if (txdlp->Control_1 & TXD_T_CODE) {
2317 unsigned long long err;
2318 err = txdlp->Control_1 & TXD_T_CODE;
2319 DBG_PRINT(ERR_DBG, "***TxD error %llx\n",
2323 skb = (struct sk_buff *) ((unsigned long)
2324 txdlp->Host_Control);
2326 DBG_PRINT(ERR_DBG, "%s: Null skb ",
2328 DBG_PRINT(ERR_DBG, "in Tx Free Intr\n");
2332 frg_cnt = skb_shinfo(skb)->nr_frags;
2333 nic->tx_pkt_count++;
2335 pci_unmap_single(nic->pdev, (dma_addr_t)
2336 txdlp->Buffer_Pointer,
2337 skb->len - skb->data_len,
2343 for (j = 0; j < frg_cnt; j++, txdlp++) {
2345 &skb_shinfo(skb)->frags[j];
2346 pci_unmap_page(nic->pdev,
2356 (sizeof(TxD_t) * fifo_data->max_txds));
2358 /* Updating the statistics block */
2359 nic->stats.tx_bytes += skb->len;
2360 dev_kfree_skb_irq(skb);
2363 get_info.offset %= get_info.fifo_len + 1;
2364 txdlp = (TxD_t *) fifo_data->list_info
2365 [get_info.offset].list_virt_addr;
2366 fifo_data->tx_curr_get_info.offset =
2370 spin_lock(&nic->tx_lock);
2371 if (netif_queue_stopped(dev))
2372 netif_wake_queue(dev);
2373 spin_unlock(&nic->tx_lock);
2377 * alarm_intr_handler - Alarm Interrrupt handler
2378 * @nic: device private variable
2379 * Description: If the interrupt was neither because of Rx packet or Tx
2380 * complete, this function is called. If the interrupt was to indicate
2381 * a loss of link, the OSM link status handler is invoked for any other
2382 * alarm interrupt the block that raised the interrupt is displayed
2383 * and a H/W reset is issued.
2388 static void alarm_intr_handler(struct s2io_nic *nic)
2390 struct net_device *dev = (struct net_device *) nic->dev;
2391 XENA_dev_config_t __iomem *bar0 = nic->bar0;
2392 register u64 val64 = 0, err_reg = 0;
2394 /* Handling link status change error Intr */
2395 err_reg = readq(&bar0->mac_rmac_err_reg);
2396 writeq(err_reg, &bar0->mac_rmac_err_reg);
2397 if (err_reg & RMAC_LINK_STATE_CHANGE_INT) {
2398 schedule_work(&nic->set_link_task);
2401 /* Handling Ecc errors */
2402 val64 = readq(&bar0->mc_err_reg);
2403 writeq(val64, &bar0->mc_err_reg);
2404 if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
2405 if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
2406 nic->mac_control.stats_info->sw_stat.
2408 DBG_PRINT(ERR_DBG, "%s: Device indicates ",
2410 DBG_PRINT(ERR_DBG, "double ECC error!!\n");
2411 netif_stop_queue(dev);
2412 schedule_work(&nic->rst_timer_task);
2414 nic->mac_control.stats_info->sw_stat.
2419 /* In case of a serious error, the device will be Reset. */
2420 val64 = readq(&bar0->serr_source);
2421 if (val64 & SERR_SOURCE_ANY) {
2422 DBG_PRINT(ERR_DBG, "%s: Device indicates ", dev->name);
2423 DBG_PRINT(ERR_DBG, "serious error!!\n");
2424 netif_stop_queue(dev);
2425 schedule_work(&nic->rst_timer_task);
2429 * Also as mentioned in the latest Errata sheets if the PCC_FB_ECC
2430 * Error occurs, the adapter will be recycled by disabling the
2431 * adapter enable bit and enabling it again after the device
2432 * becomes Quiescent.
2434 val64 = readq(&bar0->pcc_err_reg);
2435 writeq(val64, &bar0->pcc_err_reg);
2436 if (val64 & PCC_FB_ECC_DB_ERR) {
2437 u64 ac = readq(&bar0->adapter_control);
2438 ac &= ~(ADAPTER_CNTL_EN);
2439 writeq(ac, &bar0->adapter_control);
2440 ac = readq(&bar0->adapter_control);
2441 schedule_work(&nic->set_link_task);
2444 /* Other type of interrupts are not being handled now, TODO */
2448 * wait_for_cmd_complete - waits for a command to complete.
2449 * @sp : private member of the device structure, which is a pointer to the
2450 * s2io_nic structure.
2451 * Description: Function that waits for a command to Write into RMAC
2452 * ADDR DATA registers to be completed and returns either success or
2453 * error depending on whether the command was complete or not.
2455 * SUCCESS on success and FAILURE on failure.
2458 int wait_for_cmd_complete(nic_t * sp)
2460 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2461 int ret = FAILURE, cnt = 0;
2465 val64 = readq(&bar0->rmac_addr_cmd_mem);
2466 if (!(val64 & RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING)) {
2479 * s2io_reset - Resets the card.
2480 * @sp : private member of the device structure.
2481 * Description: Function to Reset the card. This function then also
2482 * restores the previously saved PCI configuration space registers as
2483 * the card reset also resets the configuration space.
2488 void s2io_reset(nic_t * sp)
2490 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2494 val64 = SW_RESET_ALL;
2495 writeq(val64, &bar0->sw_reset);
2498 * At this stage, if the PCI write is indeed completed, the
2499 * card is reset and so is the PCI Config space of the device.
2500 * So a read cannot be issued at this stage on any of the
2501 * registers to ensure the write into "sw_reset" register
2503 * Question: Is there any system call that will explicitly force
2504 * all the write commands still pending on the bus to be pushed
2506 * As of now I'am just giving a 250ms delay and hoping that the
2507 * PCI write to sw_reset register is done by this time.
2511 /* Restore the PCI state saved during initializarion. */
2512 pci_restore_state(sp->pdev);
2518 /* Set swapper to enable I/O register access */
2519 s2io_set_swapper(sp);
2521 /* Clear certain PCI/PCI-X fields after reset */
2522 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
2523 pci_cmd &= 0x7FFF; /* Clear parity err detect bit */
2524 pci_write_config_word(sp->pdev, PCI_COMMAND, pci_cmd);
2526 val64 = readq(&bar0->txpic_int_reg);
2527 val64 &= ~BIT(62); /* Clearing PCI_STATUS error reflected here */
2528 writeq(val64, &bar0->txpic_int_reg);
2530 /* Clearing PCIX Ecc status register */
2531 pci_write_config_dword(sp->pdev, 0x68, 0);
2533 /* Reset device statistics maintained by OS */
2534 memset(&sp->stats, 0, sizeof (struct net_device_stats));
2536 /* SXE-002: Configure link and activity LED to turn it off */
2537 subid = sp->pdev->subsystem_device;
2538 if ((subid & 0xFF) >= 0x07) {
2539 val64 = readq(&bar0->gpio_control);
2540 val64 |= 0x0000800000000000ULL;
2541 writeq(val64, &bar0->gpio_control);
2542 val64 = 0x0411040400000000ULL;
2543 writeq(val64, (void __iomem *) ((u8 *) bar0 + 0x2700));
2546 sp->device_enabled_once = FALSE;
2550 * s2io_set_swapper - to set the swapper controle on the card
2551 * @sp : private member of the device structure,
2552 * pointer to the s2io_nic structure.
2553 * Description: Function to set the swapper control on the card
2554 * correctly depending on the 'endianness' of the system.
2556 * SUCCESS on success and FAILURE on failure.
2559 int s2io_set_swapper(nic_t * sp)
2561 struct net_device *dev = sp->dev;
2562 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2563 u64 val64, valt, valr;
2566 * Set proper endian settings and verify the same by reading
2567 * the PIF Feed-back register.
2570 val64 = readq(&bar0->pif_rd_swapper_fb);
2571 if (val64 != 0x0123456789ABCDEFULL) {
2573 u64 value[] = { 0xC30000C3C30000C3ULL, /* FE=1, SE=1 */
2574 0x8100008181000081ULL, /* FE=1, SE=0 */
2575 0x4200004242000042ULL, /* FE=0, SE=1 */
2576 0}; /* FE=0, SE=0 */
2579 writeq(value[i], &bar0->swapper_ctrl);
2580 val64 = readq(&bar0->pif_rd_swapper_fb);
2581 if (val64 == 0x0123456789ABCDEFULL)
2586 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
2588 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
2589 (unsigned long long) val64);
2594 valr = readq(&bar0->swapper_ctrl);
2597 valt = 0x0123456789ABCDEFULL;
2598 writeq(valt, &bar0->xmsi_address);
2599 val64 = readq(&bar0->xmsi_address);
2603 u64 value[] = { 0x00C3C30000C3C300ULL, /* FE=1, SE=1 */
2604 0x0081810000818100ULL, /* FE=1, SE=0 */
2605 0x0042420000424200ULL, /* FE=0, SE=1 */
2606 0}; /* FE=0, SE=0 */
2609 writeq((value[i] | valr), &bar0->swapper_ctrl);
2610 writeq(valt, &bar0->xmsi_address);
2611 val64 = readq(&bar0->xmsi_address);
2617 unsigned long long x = val64;
2618 DBG_PRINT(ERR_DBG, "Write failed, Xmsi_addr ");
2619 DBG_PRINT(ERR_DBG, "reads:0x%llx\n", x);
2623 val64 = readq(&bar0->swapper_ctrl);
2624 val64 &= 0xFFFF000000000000ULL;
2628 * The device by default set to a big endian format, so a
2629 * big endian driver need not set anything.
2631 val64 |= (SWAPPER_CTRL_TXP_FE |
2632 SWAPPER_CTRL_TXP_SE |
2633 SWAPPER_CTRL_TXD_R_FE |
2634 SWAPPER_CTRL_TXD_W_FE |
2635 SWAPPER_CTRL_TXF_R_FE |
2636 SWAPPER_CTRL_RXD_R_FE |
2637 SWAPPER_CTRL_RXD_W_FE |
2638 SWAPPER_CTRL_RXF_W_FE |
2639 SWAPPER_CTRL_XMSI_FE |
2640 SWAPPER_CTRL_XMSI_SE |
2641 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
2642 writeq(val64, &bar0->swapper_ctrl);
2645 * Initially we enable all bits to make it accessible by the
2646 * driver, then we selectively enable only those bits that
2649 val64 |= (SWAPPER_CTRL_TXP_FE |
2650 SWAPPER_CTRL_TXP_SE |
2651 SWAPPER_CTRL_TXD_R_FE |
2652 SWAPPER_CTRL_TXD_R_SE |
2653 SWAPPER_CTRL_TXD_W_FE |
2654 SWAPPER_CTRL_TXD_W_SE |
2655 SWAPPER_CTRL_TXF_R_FE |
2656 SWAPPER_CTRL_RXD_R_FE |
2657 SWAPPER_CTRL_RXD_R_SE |
2658 SWAPPER_CTRL_RXD_W_FE |
2659 SWAPPER_CTRL_RXD_W_SE |
2660 SWAPPER_CTRL_RXF_W_FE |
2661 SWAPPER_CTRL_XMSI_FE |
2662 SWAPPER_CTRL_XMSI_SE |
2663 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
2664 writeq(val64, &bar0->swapper_ctrl);
2666 val64 = readq(&bar0->swapper_ctrl);
2669 * Verifying if endian settings are accurate by reading a
2670 * feedback register.
2672 val64 = readq(&bar0->pif_rd_swapper_fb);
2673 if (val64 != 0x0123456789ABCDEFULL) {
2674 /* Endian settings are incorrect, calls for another dekko. */
2675 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
2677 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
2678 (unsigned long long) val64);
2685 /* ********************************************************* *
2686 * Functions defined below concern the OS part of the driver *
2687 * ********************************************************* */
2690 * s2io_open - open entry point of the driver
2691 * @dev : pointer to the device structure.
2693 * This function is the open entry point of the driver. It mainly calls a
2694 * function to allocate Rx buffers and inserts them into the buffer
2695 * descriptors and then enables the Rx part of the NIC.
2697 * 0 on success and an appropriate (-)ve integer as defined in errno.h
2701 int s2io_open(struct net_device *dev)
2703 nic_t *sp = dev->priv;
2707 * Make sure you have link off by default every time
2708 * Nic is initialized
2710 netif_carrier_off(dev);
2711 sp->last_link_state = 0; /* Unkown link state */
2713 /* Initialize H/W and enable interrupts */
2714 if (s2io_card_up(sp)) {
2715 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
2718 goto hw_init_failed;
2721 /* After proper initialization of H/W, register ISR */
2722 err = request_irq((int) sp->pdev->irq, s2io_isr, SA_SHIRQ,
2725 DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
2727 goto isr_registration_failed;
2730 if (s2io_set_mac_addr(dev, dev->dev_addr) == FAILURE) {
2731 DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
2733 goto setting_mac_address_failed;
2736 netif_start_queue(dev);
2739 setting_mac_address_failed:
2740 free_irq(sp->pdev->irq, dev);
2741 isr_registration_failed:
2748 * s2io_close -close entry point of the driver
2749 * @dev : device pointer.
2751 * This is the stop entry point of the driver. It needs to undo exactly
2752 * whatever was done by the open entry point,thus it's usually referred to
2753 * as the close function.Among other things this function mainly stops the
2754 * Rx side of the NIC and frees all the Rx buffers in the Rx rings.
2756 * 0 on success and an appropriate (-)ve integer as defined in errno.h
2760 int s2io_close(struct net_device *dev)
2762 nic_t *sp = dev->priv;
2763 flush_scheduled_work();
2764 netif_stop_queue(dev);
2765 /* Reset card, kill tasklet and free Tx and Rx buffers. */
2768 free_irq(sp->pdev->irq, dev);
2769 sp->device_close_flag = TRUE; /* Device is shut down. */
2774 * s2io_xmit - Tx entry point of te driver
2775 * @skb : the socket buffer containing the Tx data.
2776 * @dev : device pointer.
2778 * This function is the Tx entry point of the driver. S2IO NIC supports
2779 * certain protocol assist features on Tx side, namely CSO, S/G, LSO.
2780 * NOTE: when device cant queue the pkt,just the trans_start variable will
2783 * 0 on success & 1 on failure.
2786 int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
2788 nic_t *sp = dev->priv;
2789 u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
2792 TxFIFO_element_t __iomem *tx_fifo;
2793 unsigned long flags;
2797 mac_info_t *mac_control;
2798 struct config_param *config;
2799 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2801 mac_control = &sp->mac_control;
2802 config = &sp->config;
2804 DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
2805 spin_lock_irqsave(&sp->tx_lock, flags);
2806 if (atomic_read(&sp->card_state) == CARD_DOWN) {
2807 DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
2809 spin_unlock_irqrestore(&sp->tx_lock, flags);
2816 put_off = (u16) mac_control->fifos[queue].tx_curr_put_info.offset;
2817 get_off = (u16) mac_control->fifos[queue].tx_curr_get_info.offset;
2818 txdp = (TxD_t *) mac_control->fifos[queue].list_info[put_off].
2821 queue_len = mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1;
2822 /* Avoid "put" pointer going beyond "get" pointer */
2823 if (txdp->Host_Control || (((put_off + 1) % queue_len) == get_off)) {
2824 DBG_PRINT(ERR_DBG, "Error in xmit, No free TXDs.\n");
2825 netif_stop_queue(dev);
2827 spin_unlock_irqrestore(&sp->tx_lock, flags);
2831 mss = skb_shinfo(skb)->tso_size;
2833 txdp->Control_1 |= TXD_TCP_LSO_EN;
2834 txdp->Control_1 |= TXD_TCP_LSO_MSS(mss);
2838 frg_cnt = skb_shinfo(skb)->nr_frags;
2839 frg_len = skb->len - skb->data_len;
2841 txdp->Buffer_Pointer = pci_map_single
2842 (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE);
2843 txdp->Host_Control = (unsigned long) skb;
2844 if (skb->ip_summed == CHECKSUM_HW) {
2846 (TXD_TX_CKO_IPV4_EN | TXD_TX_CKO_TCP_EN |
2850 txdp->Control_2 |= config->tx_intr_type;
2852 txdp->Control_1 |= (TXD_BUFFER0_SIZE(frg_len) |
2853 TXD_GATHER_CODE_FIRST);
2854 txdp->Control_1 |= TXD_LIST_OWN_XENA;
2856 /* For fragmented SKB. */
2857 for (i = 0; i < frg_cnt; i++) {
2858 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2860 txdp->Buffer_Pointer = (u64) pci_map_page
2861 (sp->pdev, frag->page, frag->page_offset,
2862 frag->size, PCI_DMA_TODEVICE);
2863 txdp->Control_1 |= TXD_BUFFER0_SIZE(frag->size);
2865 txdp->Control_1 |= TXD_GATHER_CODE_LAST;
2867 tx_fifo = mac_control->tx_FIFO_start[queue];
2868 val64 = mac_control->fifos[queue].list_info[put_off].list_phy_addr;
2869 writeq(val64, &tx_fifo->TxDL_Pointer);
2871 val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
2876 val64 |= TX_FIFO_SPECIAL_FUNC;
2878 writeq(val64, &tx_fifo->List_Control);
2880 /* Perform a PCI read to flush previous writes */
2881 val64 = readq(&bar0->general_int_status);
2884 put_off %= mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1;
2885 mac_control->fifos[queue].tx_curr_put_info.offset = put_off;
2887 /* Avoid "put" pointer going beyond "get" pointer */
2888 if (((put_off + 1) % queue_len) == get_off) {
2890 "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
2892 netif_stop_queue(dev);
2895 dev->trans_start = jiffies;
2896 spin_unlock_irqrestore(&sp->tx_lock, flags);
2902 * s2io_isr - ISR handler of the device .
2903 * @irq: the irq of the device.
2904 * @dev_id: a void pointer to the dev structure of the NIC.
2905 * @pt_regs: pointer to the registers pushed on the stack.
2906 * Description: This function is the ISR handler of the device. It
2907 * identifies the reason for the interrupt and calls the relevant
2908 * service routines. As a contongency measure, this ISR allocates the
2909 * recv buffers, if their numbers are below the panic value which is
2910 * presently set to 25% of the original number of rcv buffers allocated.
2912 * IRQ_HANDLED: will be returned if IRQ was handled by this routine
2913 * IRQ_NONE: will be returned if interrupt is not from our device
2915 static irqreturn_t s2io_isr(int irq, void *dev_id, struct pt_regs *regs)
2917 struct net_device *dev = (struct net_device *) dev_id;
2918 nic_t *sp = dev->priv;
2919 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2922 mac_info_t *mac_control;
2923 struct config_param *config;
2925 atomic_inc(&sp->isr_cnt);
2926 mac_control = &sp->mac_control;
2927 config = &sp->config;
2930 * Identify the cause for interrupt and call the appropriate
2931 * interrupt handler. Causes for the interrupt could be;
2935 * 4. Error in any functional blocks of the NIC.
2937 reason = readq(&bar0->general_int_status);
2940 /* The interrupt was not raised by Xena. */
2941 atomic_dec(&sp->isr_cnt);
2945 if (reason & (GEN_ERROR_INTR))
2946 alarm_intr_handler(sp);
2948 #ifdef CONFIG_S2IO_NAPI
2949 if (reason & GEN_INTR_RXTRAFFIC) {
2950 if (netif_rx_schedule_prep(dev)) {
2951 en_dis_able_nic_intrs(sp, RX_TRAFFIC_INTR,
2953 __netif_rx_schedule(dev);
2957 /* If Intr is because of Rx Traffic */
2958 if (reason & GEN_INTR_RXTRAFFIC) {
2959 for (i = 0; i < config->rx_ring_num; i++) {
2960 rx_intr_handler(&mac_control->rings[i]);
2965 /* If Intr is because of Tx Traffic */
2966 if (reason & GEN_INTR_TXTRAFFIC) {
2967 for (i = 0; i < config->tx_fifo_num; i++)
2968 tx_intr_handler(&mac_control->fifos[i]);
2972 * If the Rx buffer count is below the panic threshold then
2973 * reallocate the buffers from the interrupt handler itself,
2974 * else schedule a tasklet to reallocate the buffers.
2976 #ifndef CONFIG_S2IO_NAPI
2977 for (i = 0; i < config->rx_ring_num; i++) {
2979 int rxb_size = atomic_read(&sp->rx_bufs_left[i]);
2980 int level = rx_buffer_level(sp, rxb_size, i);
2982 if ((level == PANIC) && (!TASKLET_IN_USE)) {
2983 DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", dev->name);
2984 DBG_PRINT(INTR_DBG, "PANIC levels\n");
2985 if ((ret = fill_rx_buffers(sp, i)) == -ENOMEM) {
2986 DBG_PRINT(ERR_DBG, "%s:Out of memory",
2988 DBG_PRINT(ERR_DBG, " in ISR!!\n");
2989 clear_bit(0, (&sp->tasklet_status));
2990 atomic_dec(&sp->isr_cnt);
2993 clear_bit(0, (&sp->tasklet_status));
2994 } else if (level == LOW) {
2995 tasklet_schedule(&sp->task);
3000 atomic_dec(&sp->isr_cnt);
3007 static void s2io_updt_stats(nic_t *sp)
3009 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3013 if (atomic_read(&sp->card_state) == CARD_UP) {
3014 /* Apprx 30us on a 133 MHz bus */
3015 val64 = SET_UPDT_CLICKS(10) |
3016 STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN;
3017 writeq(val64, &bar0->stat_cfg);
3020 val64 = readq(&bar0->stat_cfg);
3021 if (!(val64 & BIT(0)))
3025 break; /* Updt failed */
3031 * s2io_get_stats - Updates the device statistics structure.
3032 * @dev : pointer to the device structure.
3034 * This function updates the device statistics structure in the s2io_nic
3035 * structure and returns a pointer to the same.
3037 * pointer to the updated net_device_stats structure.
3040 struct net_device_stats *s2io_get_stats(struct net_device *dev)
3042 nic_t *sp = dev->priv;
3043 mac_info_t *mac_control;
3044 struct config_param *config;
3047 mac_control = &sp->mac_control;
3048 config = &sp->config;
3050 /* Configure Stats for immediate updt */
3051 s2io_updt_stats(sp);
3053 sp->stats.tx_packets =
3054 le32_to_cpu(mac_control->stats_info->tmac_frms);
3055 sp->stats.tx_errors =
3056 le32_to_cpu(mac_control->stats_info->tmac_any_err_frms);
3057 sp->stats.rx_errors =
3058 le32_to_cpu(mac_control->stats_info->rmac_drop_frms);
3059 sp->stats.multicast =
3060 le32_to_cpu(mac_control->stats_info->rmac_vld_mcst_frms);
3061 sp->stats.rx_length_errors =
3062 le32_to_cpu(mac_control->stats_info->rmac_long_frms);
3064 return (&sp->stats);
3068 * s2io_set_multicast - entry point for multicast address enable/disable.
3069 * @dev : pointer to the device structure
3071 * This function is a driver entry point which gets called by the kernel
3072 * whenever multicast addresses must be enabled/disabled. This also gets
3073 * called to set/reset promiscuous mode. Depending on the deivce flag, we
3074 * determine, if multicast address must be enabled or if promiscuous mode
3075 * is to be disabled etc.
3080 static void s2io_set_multicast(struct net_device *dev)
3083 struct dev_mc_list *mclist;
3084 nic_t *sp = dev->priv;
3085 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3086 u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
3088 u64 dis_addr = 0xffffffffffffULL, mac_addr = 0;
3091 if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
3092 /* Enable all Multicast addresses */
3093 writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
3094 &bar0->rmac_addr_data0_mem);
3095 writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
3096 &bar0->rmac_addr_data1_mem);
3097 val64 = RMAC_ADDR_CMD_MEM_WE |
3098 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3099 RMAC_ADDR_CMD_MEM_OFFSET(MAC_MC_ALL_MC_ADDR_OFFSET);
3100 writeq(val64, &bar0->rmac_addr_cmd_mem);
3101 /* Wait till command completes */
3102 wait_for_cmd_complete(sp);
3105 sp->all_multi_pos = MAC_MC_ALL_MC_ADDR_OFFSET;
3106 } else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
3107 /* Disable all Multicast addresses */
3108 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
3109 &bar0->rmac_addr_data0_mem);
3110 writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
3111 &bar0->rmac_addr_data1_mem);
3112 val64 = RMAC_ADDR_CMD_MEM_WE |
3113 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3114 RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
3115 writeq(val64, &bar0->rmac_addr_cmd_mem);
3116 /* Wait till command completes */
3117 wait_for_cmd_complete(sp);
3120 sp->all_multi_pos = 0;
3123 if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
3124 /* Put the NIC into promiscuous mode */
3125 add = &bar0->mac_cfg;
3126 val64 = readq(&bar0->mac_cfg);
3127 val64 |= MAC_CFG_RMAC_PROM_ENABLE;
3129 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
3130 writel((u32) val64, add);
3131 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
3132 writel((u32) (val64 >> 32), (add + 4));
3134 val64 = readq(&bar0->mac_cfg);
3135 sp->promisc_flg = 1;
3136 DBG_PRINT(ERR_DBG, "%s: entered promiscuous mode\n",
3138 } else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
3139 /* Remove the NIC from promiscuous mode */
3140 add = &bar0->mac_cfg;
3141 val64 = readq(&bar0->mac_cfg);
3142 val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
3144 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
3145 writel((u32) val64, add);
3146 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
3147 writel((u32) (val64 >> 32), (add + 4));
3149 val64 = readq(&bar0->mac_cfg);
3150 sp->promisc_flg = 0;
3151 DBG_PRINT(ERR_DBG, "%s: left promiscuous mode\n",
3155 /* Update individual M_CAST address list */
3156 if ((!sp->m_cast_flg) && dev->mc_count) {
3158 (MAX_ADDRS_SUPPORTED - MAC_MC_ADDR_START_OFFSET - 1)) {
3159 DBG_PRINT(ERR_DBG, "%s: No more Rx filters ",
3161 DBG_PRINT(ERR_DBG, "can be added, please enable ");
3162 DBG_PRINT(ERR_DBG, "ALL_MULTI instead\n");
3166 prev_cnt = sp->mc_addr_count;
3167 sp->mc_addr_count = dev->mc_count;
3169 /* Clear out the previous list of Mc in the H/W. */
3170 for (i = 0; i < prev_cnt; i++) {
3171 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
3172 &bar0->rmac_addr_data0_mem);
3173 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
3174 &bar0->rmac_addr_data1_mem);
3175 val64 = RMAC_ADDR_CMD_MEM_WE |
3176 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3177 RMAC_ADDR_CMD_MEM_OFFSET
3178 (MAC_MC_ADDR_START_OFFSET + i);
3179 writeq(val64, &bar0->rmac_addr_cmd_mem);
3181 /* Wait for command completes */
3182 if (wait_for_cmd_complete(sp)) {
3183 DBG_PRINT(ERR_DBG, "%s: Adding ",
3185 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
3190 /* Create the new Rx filter list and update the same in H/W. */
3191 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
3192 i++, mclist = mclist->next) {
3193 memcpy(sp->usr_addrs[i].addr, mclist->dmi_addr,
3195 for (j = 0; j < ETH_ALEN; j++) {
3196 mac_addr |= mclist->dmi_addr[j];
3200 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
3201 &bar0->rmac_addr_data0_mem);
3202 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
3203 &bar0->rmac_addr_data1_mem);
3204 val64 = RMAC_ADDR_CMD_MEM_WE |
3205 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3206 RMAC_ADDR_CMD_MEM_OFFSET
3207 (i + MAC_MC_ADDR_START_OFFSET);
3208 writeq(val64, &bar0->rmac_addr_cmd_mem);
3210 /* Wait for command completes */
3211 if (wait_for_cmd_complete(sp)) {
3212 DBG_PRINT(ERR_DBG, "%s: Adding ",
3214 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
3222 * s2io_set_mac_addr - Programs the Xframe mac address
3223 * @dev : pointer to the device structure.
3224 * @addr: a uchar pointer to the new mac address which is to be set.
3225 * Description : This procedure will program the Xframe to receive
3226 * frames with new Mac Address
3227 * Return value: SUCCESS on success and an appropriate (-)ve integer
3228 * as defined in errno.h file on failure.
3231 int s2io_set_mac_addr(struct net_device *dev, u8 * addr)
3233 nic_t *sp = dev->priv;
3234 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3235 register u64 val64, mac_addr = 0;
3239 * Set the new MAC address as the new unicast filter and reflect this
3240 * change on the device address registered with the OS. It will be
3243 for (i = 0; i < ETH_ALEN; i++) {
3245 mac_addr |= addr[i];
3248 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
3249 &bar0->rmac_addr_data0_mem);
3252 RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3253 RMAC_ADDR_CMD_MEM_OFFSET(0);
3254 writeq(val64, &bar0->rmac_addr_cmd_mem);
3255 /* Wait till command completes */
3256 if (wait_for_cmd_complete(sp)) {
3257 DBG_PRINT(ERR_DBG, "%s: set_mac_addr failed\n", dev->name);
3265 * s2io_ethtool_sset - Sets different link parameters.
3266 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
3267 * @info: pointer to the structure with parameters given by ethtool to set
3270 * The function sets different link parameters provided by the user onto
3276 static int s2io_ethtool_sset(struct net_device *dev,
3277 struct ethtool_cmd *info)
3279 nic_t *sp = dev->priv;
3280 if ((info->autoneg == AUTONEG_ENABLE) ||
3281 (info->speed != SPEED_10000) || (info->duplex != DUPLEX_FULL))
3284 s2io_close(sp->dev);
3292 * s2io_ethtol_gset - Return link specific information.
3293 * @sp : private member of the device structure, pointer to the
3294 * s2io_nic structure.
3295 * @info : pointer to the structure with parameters given by ethtool
3296 * to return link information.
3298 * Returns link specific information like speed, duplex etc.. to ethtool.
3300 * return 0 on success.
3303 static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
3305 nic_t *sp = dev->priv;
3306 info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
3307 info->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
3308 info->port = PORT_FIBRE;
3309 /* info->transceiver?? TODO */
3311 if (netif_carrier_ok(sp->dev)) {
3312 info->speed = 10000;
3313 info->duplex = DUPLEX_FULL;
3319 info->autoneg = AUTONEG_DISABLE;
3324 * s2io_ethtool_gdrvinfo - Returns driver specific information.
3325 * @sp : private member of the device structure, which is a pointer to the
3326 * s2io_nic structure.
3327 * @info : pointer to the structure with parameters given by ethtool to
3328 * return driver information.
3330 * Returns driver specefic information like name, version etc.. to ethtool.
3335 static void s2io_ethtool_gdrvinfo(struct net_device *dev,
3336 struct ethtool_drvinfo *info)
3338 nic_t *sp = dev->priv;
3340 strncpy(info->driver, s2io_driver_name, sizeof(s2io_driver_name));
3341 strncpy(info->version, s2io_driver_version,
3342 sizeof(s2io_driver_version));
3343 strncpy(info->fw_version, "", 32);
3344 strncpy(info->bus_info, pci_name(sp->pdev), 32);
3345 info->regdump_len = XENA_REG_SPACE;
3346 info->eedump_len = XENA_EEPROM_SPACE;
3347 info->testinfo_len = S2IO_TEST_LEN;
3348 info->n_stats = S2IO_STAT_LEN;
3352 * s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
3353 * @sp: private member of the device structure, which is a pointer to the
3354 * s2io_nic structure.
3355 * @regs : pointer to the structure with parameters given by ethtool for
3356 * dumping the registers.
3357 * @reg_space: The input argumnet into which all the registers are dumped.
3359 * Dumps the entire register space of xFrame NIC into the user given
3365 static void s2io_ethtool_gregs(struct net_device *dev,
3366 struct ethtool_regs *regs, void *space)
3370 u8 *reg_space = (u8 *) space;
3371 nic_t *sp = dev->priv;
3373 regs->len = XENA_REG_SPACE;
3374 regs->version = sp->pdev->subsystem_device;
3376 for (i = 0; i < regs->len; i += 8) {
3377 reg = readq(sp->bar0 + i);
3378 memcpy((reg_space + i), ®, 8);
3383 * s2io_phy_id - timer function that alternates adapter LED.
3384 * @data : address of the private member of the device structure, which
3385 * is a pointer to the s2io_nic structure, provided as an u32.
3386 * Description: This is actually the timer function that alternates the
3387 * adapter LED bit of the adapter control bit to set/reset every time on
3388 * invocation. The timer is set for 1/2 a second, hence tha NIC blinks
3389 * once every second.
3391 static void s2io_phy_id(unsigned long data)
3393 nic_t *sp = (nic_t *) data;
3394 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3398 subid = sp->pdev->subsystem_device;
3399 if ((subid & 0xFF) >= 0x07) {
3400 val64 = readq(&bar0->gpio_control);
3401 val64 ^= GPIO_CTRL_GPIO_0;
3402 writeq(val64, &bar0->gpio_control);
3404 val64 = readq(&bar0->adapter_control);
3405 val64 ^= ADAPTER_LED_ON;
3406 writeq(val64, &bar0->adapter_control);
3409 mod_timer(&sp->id_timer, jiffies + HZ / 2);
3413 * s2io_ethtool_idnic - To physically identify the nic on the system.
3414 * @sp : private member of the device structure, which is a pointer to the
3415 * s2io_nic structure.
3416 * @id : pointer to the structure with identification parameters given by
3418 * Description: Used to physically identify the NIC on the system.
3419 * The Link LED will blink for a time specified by the user for
3421 * NOTE: The Link has to be Up to be able to blink the LED. Hence
3422 * identification is possible only if it's link is up.
3424 * int , returns 0 on success
3427 static int s2io_ethtool_idnic(struct net_device *dev, u32 data)
3429 u64 val64 = 0, last_gpio_ctrl_val;
3430 nic_t *sp = dev->priv;
3431 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3434 subid = sp->pdev->subsystem_device;
3435 last_gpio_ctrl_val = readq(&bar0->gpio_control);
3436 if ((subid & 0xFF) < 0x07) {
3437 val64 = readq(&bar0->adapter_control);
3438 if (!(val64 & ADAPTER_CNTL_EN)) {
3440 "Adapter Link down, cannot blink LED\n");
3444 if (sp->id_timer.function == NULL) {
3445 init_timer(&sp->id_timer);
3446 sp->id_timer.function = s2io_phy_id;
3447 sp->id_timer.data = (unsigned long) sp;
3449 mod_timer(&sp->id_timer, jiffies);
3451 msleep_interruptible(data * HZ);
3453 msleep_interruptible(MAX_FLICKER_TIME);
3454 del_timer_sync(&sp->id_timer);
3456 if (CARDS_WITH_FAULTY_LINK_INDICATORS(subid)) {
3457 writeq(last_gpio_ctrl_val, &bar0->gpio_control);
3458 last_gpio_ctrl_val = readq(&bar0->gpio_control);
3465 * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
3466 * @sp : private member of the device structure, which is a pointer to the
3467 * s2io_nic structure.
3468 * @ep : pointer to the structure with pause parameters given by ethtool.
3470 * Returns the Pause frame generation and reception capability of the NIC.
3474 static void s2io_ethtool_getpause_data(struct net_device *dev,
3475 struct ethtool_pauseparam *ep)
3478 nic_t *sp = dev->priv;
3479 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3481 val64 = readq(&bar0->rmac_pause_cfg);
3482 if (val64 & RMAC_PAUSE_GEN_ENABLE)
3483 ep->tx_pause = TRUE;
3484 if (val64 & RMAC_PAUSE_RX_ENABLE)
3485 ep->rx_pause = TRUE;
3486 ep->autoneg = FALSE;
3490 * s2io_ethtool_setpause_data - set/reset pause frame generation.
3491 * @sp : private member of the device structure, which is a pointer to the
3492 * s2io_nic structure.
3493 * @ep : pointer to the structure with pause parameters given by ethtool.
3495 * It can be used to set or reset Pause frame generation or reception
3496 * support of the NIC.
3498 * int, returns 0 on Success
3501 static int s2io_ethtool_setpause_data(struct net_device *dev,
3502 struct ethtool_pauseparam *ep)
3505 nic_t *sp = dev->priv;
3506 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3508 val64 = readq(&bar0->rmac_pause_cfg);
3510 val64 |= RMAC_PAUSE_GEN_ENABLE;
3512 val64 &= ~RMAC_PAUSE_GEN_ENABLE;
3514 val64 |= RMAC_PAUSE_RX_ENABLE;
3516 val64 &= ~RMAC_PAUSE_RX_ENABLE;
3517 writeq(val64, &bar0->rmac_pause_cfg);
3522 * read_eeprom - reads 4 bytes of data from user given offset.
3523 * @sp : private member of the device structure, which is a pointer to the
3524 * s2io_nic structure.
3525 * @off : offset at which the data must be written
3526 * @data : Its an output parameter where the data read at the given
3529 * Will read 4 bytes of data from the user given offset and return the
3531 * NOTE: Will allow to read only part of the EEPROM visible through the
3534 * -1 on failure and 0 on success.
3537 #define S2IO_DEV_ID 5
3538 static int read_eeprom(nic_t * sp, int off, u32 * data)
3543 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3545 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
3546 I2C_CONTROL_BYTE_CNT(0x3) | I2C_CONTROL_READ |
3547 I2C_CONTROL_CNTL_START;
3548 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
3550 while (exit_cnt < 5) {
3551 val64 = readq(&bar0->i2c_control);
3552 if (I2C_CONTROL_CNTL_END(val64)) {
3553 *data = I2C_CONTROL_GET_DATA(val64);
3565 * write_eeprom - actually writes the relevant part of the data value.
3566 * @sp : private member of the device structure, which is a pointer to the
3567 * s2io_nic structure.
3568 * @off : offset at which the data must be written
3569 * @data : The data that is to be written
3570 * @cnt : Number of bytes of the data that are actually to be written into
3571 * the Eeprom. (max of 3)
3573 * Actually writes the relevant part of the data value into the Eeprom
3574 * through the I2C bus.
3576 * 0 on success, -1 on failure.
3579 static int write_eeprom(nic_t * sp, int off, u32 data, int cnt)
3581 int exit_cnt = 0, ret = -1;
3583 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3585 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
3586 I2C_CONTROL_BYTE_CNT(cnt) | I2C_CONTROL_SET_DATA(data) |
3587 I2C_CONTROL_CNTL_START;
3588 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
3590 while (exit_cnt < 5) {
3591 val64 = readq(&bar0->i2c_control);
3592 if (I2C_CONTROL_CNTL_END(val64)) {
3593 if (!(val64 & I2C_CONTROL_NACK))
3605 * s2io_ethtool_geeprom - reads the value stored in the Eeprom.
3606 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
3607 * @eeprom : pointer to the user level structure provided by ethtool,
3608 * containing all relevant information.
3609 * @data_buf : user defined value to be written into Eeprom.
3610 * Description: Reads the values stored in the Eeprom at given offset
3611 * for a given length. Stores these values int the input argument data
3612 * buffer 'data_buf' and returns these to the caller (ethtool.)
3617 static int s2io_ethtool_geeprom(struct net_device *dev,
3618 struct ethtool_eeprom *eeprom, u8 * data_buf)
3621 nic_t *sp = dev->priv;
3623 eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
3625 if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
3626 eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
3628 for (i = 0; i < eeprom->len; i += 4) {
3629 if (read_eeprom(sp, (eeprom->offset + i), &data)) {
3630 DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
3634 memcpy((data_buf + i), &valid, 4);
3640 * s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
3641 * @sp : private member of the device structure, which is a pointer to the
3642 * s2io_nic structure.
3643 * @eeprom : pointer to the user level structure provided by ethtool,
3644 * containing all relevant information.
3645 * @data_buf ; user defined value to be written into Eeprom.
3647 * Tries to write the user provided value in the Eeprom, at the offset
3648 * given by the user.
3650 * 0 on success, -EFAULT on failure.
3653 static int s2io_ethtool_seeprom(struct net_device *dev,
3654 struct ethtool_eeprom *eeprom,
3657 int len = eeprom->len, cnt = 0;
3658 u32 valid = 0, data;
3659 nic_t *sp = dev->priv;
3661 if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
3663 "ETHTOOL_WRITE_EEPROM Err: Magic value ");
3664 DBG_PRINT(ERR_DBG, "is wrong, Its not 0x%x\n",
3670 data = (u32) data_buf[cnt] & 0x000000FF;
3672 valid = (u32) (data << 24);
3676 if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
3678 "ETHTOOL_WRITE_EEPROM Err: Cannot ");
3680 "write into the specified offset\n");
3691 * s2io_register_test - reads and writes into all clock domains.
3692 * @sp : private member of the device structure, which is a pointer to the
3693 * s2io_nic structure.
3694 * @data : variable that returns the result of each of the test conducted b
3697 * Read and write into all clock domains. The NIC has 3 clock domains,
3698 * see that registers in all the three regions are accessible.
3703 static int s2io_register_test(nic_t * sp, uint64_t * data)
3705 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3709 val64 = readq(&bar0->pif_rd_swapper_fb);
3710 if (val64 != 0x123456789abcdefULL) {
3712 DBG_PRINT(INFO_DBG, "Read Test level 1 fails\n");
3715 val64 = readq(&bar0->rmac_pause_cfg);
3716 if (val64 != 0xc000ffff00000000ULL) {
3718 DBG_PRINT(INFO_DBG, "Read Test level 2 fails\n");
3721 val64 = readq(&bar0->rx_queue_cfg);
3722 if (val64 != 0x0808080808080808ULL) {
3724 DBG_PRINT(INFO_DBG, "Read Test level 3 fails\n");
3727 val64 = readq(&bar0->xgxs_efifo_cfg);
3728 if (val64 != 0x000000001923141EULL) {
3730 DBG_PRINT(INFO_DBG, "Read Test level 4 fails\n");
3733 val64 = 0x5A5A5A5A5A5A5A5AULL;
3734 writeq(val64, &bar0->xmsi_data);
3735 val64 = readq(&bar0->xmsi_data);
3736 if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
3738 DBG_PRINT(ERR_DBG, "Write Test level 1 fails\n");
3741 val64 = 0xA5A5A5A5A5A5A5A5ULL;
3742 writeq(val64, &bar0->xmsi_data);
3743 val64 = readq(&bar0->xmsi_data);
3744 if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
3746 DBG_PRINT(ERR_DBG, "Write Test level 2 fails\n");
3754 * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
3755 * @sp : private member of the device structure, which is a pointer to the
3756 * s2io_nic structure.
3757 * @data:variable that returns the result of each of the test conducted by
3760 * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
3766 static int s2io_eeprom_test(nic_t * sp, uint64_t * data)
3771 /* Test Write Error at offset 0 */
3772 if (!write_eeprom(sp, 0, 0, 3))
3775 /* Test Write at offset 4f0 */
3776 if (write_eeprom(sp, 0x4F0, 0x01234567, 3))
3778 if (read_eeprom(sp, 0x4F0, &ret_data))
3781 if (ret_data != 0x01234567)
3784 /* Reset the EEPROM data go FFFF */
3785 write_eeprom(sp, 0x4F0, 0xFFFFFFFF, 3);
3787 /* Test Write Request Error at offset 0x7c */
3788 if (!write_eeprom(sp, 0x07C, 0, 3))
3791 /* Test Write Request at offset 0x7fc */
3792 if (write_eeprom(sp, 0x7FC, 0x01234567, 3))
3794 if (read_eeprom(sp, 0x7FC, &ret_data))
3797 if (ret_data != 0x01234567)
3800 /* Reset the EEPROM data go FFFF */
3801 write_eeprom(sp, 0x7FC, 0xFFFFFFFF, 3);
3803 /* Test Write Error at offset 0x80 */
3804 if (!write_eeprom(sp, 0x080, 0, 3))
3807 /* Test Write Error at offset 0xfc */
3808 if (!write_eeprom(sp, 0x0FC, 0, 3))
3811 /* Test Write Error at offset 0x100 */
3812 if (!write_eeprom(sp, 0x100, 0, 3))
3815 /* Test Write Error at offset 4ec */
3816 if (!write_eeprom(sp, 0x4EC, 0, 3))
3824 * s2io_bist_test - invokes the MemBist test of the card .
3825 * @sp : private member of the device structure, which is a pointer to the
3826 * s2io_nic structure.
3827 * @data:variable that returns the result of each of the test conducted by
3830 * This invokes the MemBist test of the card. We give around
3831 * 2 secs time for the Test to complete. If it's still not complete
3832 * within this peiod, we consider that the test failed.
3834 * 0 on success and -1 on failure.
3837 static int s2io_bist_test(nic_t * sp, uint64_t * data)
3840 int cnt = 0, ret = -1;
3842 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
3843 bist |= PCI_BIST_START;
3844 pci_write_config_word(sp->pdev, PCI_BIST, bist);
3847 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
3848 if (!(bist & PCI_BIST_START)) {
3849 *data = (bist & PCI_BIST_CODE_MASK);
3861 * s2io-link_test - verifies the link state of the nic
3862 * @sp ; private member of the device structure, which is a pointer to the
3863 * s2io_nic structure.
3864 * @data: variable that returns the result of each of the test conducted by
3867 * The function verifies the link state of the NIC and updates the input
3868 * argument 'data' appropriately.
3873 static int s2io_link_test(nic_t * sp, uint64_t * data)
3875 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3878 val64 = readq(&bar0->adapter_status);
3879 if (val64 & ADAPTER_STATUS_RMAC_LOCAL_FAULT)
3886 * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
3887 * @sp - private member of the device structure, which is a pointer to the
3888 * s2io_nic structure.
3889 * @data - variable that returns the result of each of the test
3890 * conducted by the driver.
3892 * This is one of the offline test that tests the read and write
3893 * access to the RldRam chip on the NIC.
3898 static int s2io_rldram_test(nic_t * sp, uint64_t * data)
3900 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3902 int cnt, iteration = 0, test_pass = 0;
3904 val64 = readq(&bar0->adapter_control);
3905 val64 &= ~ADAPTER_ECC_EN;
3906 writeq(val64, &bar0->adapter_control);
3908 val64 = readq(&bar0->mc_rldram_test_ctrl);
3909 val64 |= MC_RLDRAM_TEST_MODE;
3910 writeq(val64, &bar0->mc_rldram_test_ctrl);
3912 val64 = readq(&bar0->mc_rldram_mrs);
3913 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
3914 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
3916 val64 |= MC_RLDRAM_MRS_ENABLE;
3917 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
3919 while (iteration < 2) {
3920 val64 = 0x55555555aaaa0000ULL;
3921 if (iteration == 1) {
3922 val64 ^= 0xFFFFFFFFFFFF0000ULL;
3924 writeq(val64, &bar0->mc_rldram_test_d0);
3926 val64 = 0xaaaa5a5555550000ULL;
3927 if (iteration == 1) {
3928 val64 ^= 0xFFFFFFFFFFFF0000ULL;
3930 writeq(val64, &bar0->mc_rldram_test_d1);
3932 val64 = 0x55aaaaaaaa5a0000ULL;
3933 if (iteration == 1) {
3934 val64 ^= 0xFFFFFFFFFFFF0000ULL;
3936 writeq(val64, &bar0->mc_rldram_test_d2);
3938 val64 = (u64) (0x0000003fffff0000ULL);
3939 writeq(val64, &bar0->mc_rldram_test_add);
3942 val64 = MC_RLDRAM_TEST_MODE;
3943 writeq(val64, &bar0->mc_rldram_test_ctrl);
3946 MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_WRITE |
3948 writeq(val64, &bar0->mc_rldram_test_ctrl);
3950 for (cnt = 0; cnt < 5; cnt++) {
3951 val64 = readq(&bar0->mc_rldram_test_ctrl);
3952 if (val64 & MC_RLDRAM_TEST_DONE)
3960 val64 = MC_RLDRAM_TEST_MODE;
3961 writeq(val64, &bar0->mc_rldram_test_ctrl);
3963 val64 |= MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
3964 writeq(val64, &bar0->mc_rldram_test_ctrl);
3966 for (cnt = 0; cnt < 5; cnt++) {
3967 val64 = readq(&bar0->mc_rldram_test_ctrl);
3968 if (val64 & MC_RLDRAM_TEST_DONE)
3976 val64 = readq(&bar0->mc_rldram_test_ctrl);
3977 if (val64 & MC_RLDRAM_TEST_PASS)
3992 * s2io_ethtool_test - conducts 6 tsets to determine the health of card.
3993 * @sp : private member of the device structure, which is a pointer to the
3994 * s2io_nic structure.
3995 * @ethtest : pointer to a ethtool command specific structure that will be
3996 * returned to the user.
3997 * @data : variable that returns the result of each of the test
3998 * conducted by the driver.
4000 * This function conducts 6 tests ( 4 offline and 2 online) to determine
4001 * the health of the card.
4006 static void s2io_ethtool_test(struct net_device *dev,
4007 struct ethtool_test *ethtest,
4010 nic_t *sp = dev->priv;
4011 int orig_state = netif_running(sp->dev);
4013 if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
4014 /* Offline Tests. */
4016 s2io_close(sp->dev);
4018 if (s2io_register_test(sp, &data[0]))
4019 ethtest->flags |= ETH_TEST_FL_FAILED;
4023 if (s2io_rldram_test(sp, &data[3]))
4024 ethtest->flags |= ETH_TEST_FL_FAILED;
4028 if (s2io_eeprom_test(sp, &data[1]))
4029 ethtest->flags |= ETH_TEST_FL_FAILED;
4031 if (s2io_bist_test(sp, &data[4]))
4032 ethtest->flags |= ETH_TEST_FL_FAILED;
4042 "%s: is not up, cannot run test\n",
4051 if (s2io_link_test(sp, &data[2]))
4052 ethtest->flags |= ETH_TEST_FL_FAILED;
4061 static void s2io_get_ethtool_stats(struct net_device *dev,
4062 struct ethtool_stats *estats,
4066 nic_t *sp = dev->priv;
4067 StatInfo_t *stat_info = sp->mac_control.stats_info;
4069 s2io_updt_stats(sp);
4070 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_frms);
4071 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_data_octets);
4072 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_drop_frms);
4073 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_mcst_frms);
4074 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_bcst_frms);
4075 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_pause_ctrl_frms);
4076 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_any_err_frms);
4077 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_vld_ip_octets);
4078 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_vld_ip);
4079 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_drop_ip);
4080 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_icmp);
4081 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_rst_tcp);
4082 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_tcp);
4083 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_udp);
4084 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_vld_frms);
4085 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_data_octets);
4086 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_fcs_err_frms);
4087 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_drop_frms);
4088 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_vld_mcst_frms);
4089 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_vld_bcst_frms);
4090 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_in_rng_len_err_frms);
4091 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_long_frms);
4092 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_pause_ctrl_frms);
4093 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_discarded_frms);
4094 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_usized_frms);
4095 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_osized_frms);
4096 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_frag_frms);
4097 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_jabber_frms);
4098 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_ip);
4099 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ip_octets);
4100 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_hdr_err_ip);
4101 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_drop_ip);
4102 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_icmp);
4103 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_tcp);
4104 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_udp);
4105 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_drp_udp);
4106 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_pause_cnt);
4107 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_accepted_ip);
4108 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_tcp);
4110 tmp_stats[i++] = stat_info->sw_stat.single_ecc_errs;
4111 tmp_stats[i++] = stat_info->sw_stat.double_ecc_errs;
4114 int s2io_ethtool_get_regs_len(struct net_device *dev)
4116 return (XENA_REG_SPACE);
4120 u32 s2io_ethtool_get_rx_csum(struct net_device * dev)
4122 nic_t *sp = dev->priv;
4124 return (sp->rx_csum);
4126 int s2io_ethtool_set_rx_csum(struct net_device *dev, u32 data)
4128 nic_t *sp = dev->priv;
4137 int s2io_get_eeprom_len(struct net_device *dev)
4139 return (XENA_EEPROM_SPACE);
4142 int s2io_ethtool_self_test_count(struct net_device *dev)
4144 return (S2IO_TEST_LEN);
4146 void s2io_ethtool_get_strings(struct net_device *dev,
4147 u32 stringset, u8 * data)
4149 switch (stringset) {
4151 memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
4154 memcpy(data, ðtool_stats_keys,
4155 sizeof(ethtool_stats_keys));
4158 static int s2io_ethtool_get_stats_count(struct net_device *dev)
4160 return (S2IO_STAT_LEN);
4163 int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
4166 dev->features |= NETIF_F_IP_CSUM;
4168 dev->features &= ~NETIF_F_IP_CSUM;
4174 static struct ethtool_ops netdev_ethtool_ops = {
4175 .get_settings = s2io_ethtool_gset,
4176 .set_settings = s2io_ethtool_sset,
4177 .get_drvinfo = s2io_ethtool_gdrvinfo,
4178 .get_regs_len = s2io_ethtool_get_regs_len,
4179 .get_regs = s2io_ethtool_gregs,
4180 .get_link = ethtool_op_get_link,
4181 .get_eeprom_len = s2io_get_eeprom_len,
4182 .get_eeprom = s2io_ethtool_geeprom,
4183 .set_eeprom = s2io_ethtool_seeprom,
4184 .get_pauseparam = s2io_ethtool_getpause_data,
4185 .set_pauseparam = s2io_ethtool_setpause_data,
4186 .get_rx_csum = s2io_ethtool_get_rx_csum,
4187 .set_rx_csum = s2io_ethtool_set_rx_csum,
4188 .get_tx_csum = ethtool_op_get_tx_csum,
4189 .set_tx_csum = s2io_ethtool_op_set_tx_csum,
4190 .get_sg = ethtool_op_get_sg,
4191 .set_sg = ethtool_op_set_sg,
4193 .get_tso = ethtool_op_get_tso,
4194 .set_tso = ethtool_op_set_tso,
4196 .self_test_count = s2io_ethtool_self_test_count,
4197 .self_test = s2io_ethtool_test,
4198 .get_strings = s2io_ethtool_get_strings,
4199 .phys_id = s2io_ethtool_idnic,
4200 .get_stats_count = s2io_ethtool_get_stats_count,
4201 .get_ethtool_stats = s2io_get_ethtool_stats
4205 * s2io_ioctl - Entry point for the Ioctl
4206 * @dev : Device pointer.
4207 * @ifr : An IOCTL specefic structure, that can contain a pointer to
4208 * a proprietary structure used to pass information to the driver.
4209 * @cmd : This is used to distinguish between the different commands that
4210 * can be passed to the IOCTL functions.
4212 * Currently there are no special functionality supported in IOCTL, hence
4213 * function always return EOPNOTSUPPORTED
4216 int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
4222 * s2io_change_mtu - entry point to change MTU size for the device.
4223 * @dev : device pointer.
4224 * @new_mtu : the new MTU size for the device.
4225 * Description: A driver entry point to change MTU size for the device.
4226 * Before changing the MTU the device must be stopped.
4228 * 0 on success and an appropriate (-)ve integer as defined in errno.h
4232 int s2io_change_mtu(struct net_device *dev, int new_mtu)
4234 nic_t *sp = dev->priv;
4235 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4238 if (netif_running(dev)) {
4239 DBG_PRINT(ERR_DBG, "%s: Must be stopped to ", dev->name);
4240 DBG_PRINT(ERR_DBG, "change its MTU\n");
4244 if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) {
4245 DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n",
4250 /* Set the new MTU into the PYLD register of the NIC */
4252 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
4260 * s2io_tasklet - Bottom half of the ISR.
4261 * @dev_adr : address of the device structure in dma_addr_t format.
4263 * This is the tasklet or the bottom half of the ISR. This is
4264 * an extension of the ISR which is scheduled by the scheduler to be run
4265 * when the load on the CPU is low. All low priority tasks of the ISR can
4266 * be pushed into the tasklet. For now the tasklet is used only to
4267 * replenish the Rx buffers in the Rx buffer descriptors.
4272 static void s2io_tasklet(unsigned long dev_addr)
4274 struct net_device *dev = (struct net_device *) dev_addr;
4275 nic_t *sp = dev->priv;
4277 mac_info_t *mac_control;
4278 struct config_param *config;
4280 mac_control = &sp->mac_control;
4281 config = &sp->config;
4283 if (!TASKLET_IN_USE) {
4284 for (i = 0; i < config->rx_ring_num; i++) {
4285 ret = fill_rx_buffers(sp, i);
4286 if (ret == -ENOMEM) {
4287 DBG_PRINT(ERR_DBG, "%s: Out of ",
4289 DBG_PRINT(ERR_DBG, "memory in tasklet\n");
4291 } else if (ret == -EFILL) {
4293 "%s: Rx Ring %d is full\n",
4298 clear_bit(0, (&sp->tasklet_status));
4303 * s2io_set_link - Set the LInk status
4304 * @data: long pointer to device private structue
4305 * Description: Sets the link status for the adapter
4308 static void s2io_set_link(unsigned long data)
4310 nic_t *nic = (nic_t *) data;
4311 struct net_device *dev = nic->dev;
4312 XENA_dev_config_t __iomem *bar0 = nic->bar0;
4316 if (test_and_set_bit(0, &(nic->link_state))) {
4317 /* The card is being reset, no point doing anything */
4321 subid = nic->pdev->subsystem_device;
4323 * Allow a small delay for the NICs self initiated
4324 * cleanup to complete.
4328 val64 = readq(&bar0->adapter_status);
4329 if (verify_xena_quiescence(nic, val64, nic->device_enabled_once)) {
4330 if (LINK_IS_UP(val64)) {
4331 val64 = readq(&bar0->adapter_control);
4332 val64 |= ADAPTER_CNTL_EN;
4333 writeq(val64, &bar0->adapter_control);
4334 if (CARDS_WITH_FAULTY_LINK_INDICATORS(subid)) {
4335 val64 = readq(&bar0->gpio_control);
4336 val64 |= GPIO_CTRL_GPIO_0;
4337 writeq(val64, &bar0->gpio_control);
4338 val64 = readq(&bar0->gpio_control);
4340 val64 |= ADAPTER_LED_ON;
4341 writeq(val64, &bar0->adapter_control);
4343 val64 = readq(&bar0->adapter_status);
4344 if (!LINK_IS_UP(val64)) {
4345 DBG_PRINT(ERR_DBG, "%s:", dev->name);
4346 DBG_PRINT(ERR_DBG, " Link down");
4347 DBG_PRINT(ERR_DBG, "after ");
4348 DBG_PRINT(ERR_DBG, "enabling ");
4349 DBG_PRINT(ERR_DBG, "device \n");
4351 if (nic->device_enabled_once == FALSE) {
4352 nic->device_enabled_once = TRUE;
4354 s2io_link(nic, LINK_UP);
4356 if (CARDS_WITH_FAULTY_LINK_INDICATORS(subid)) {
4357 val64 = readq(&bar0->gpio_control);
4358 val64 &= ~GPIO_CTRL_GPIO_0;
4359 writeq(val64, &bar0->gpio_control);
4360 val64 = readq(&bar0->gpio_control);
4362 s2io_link(nic, LINK_DOWN);
4364 } else { /* NIC is not Quiescent. */
4365 DBG_PRINT(ERR_DBG, "%s: Error: ", dev->name);
4366 DBG_PRINT(ERR_DBG, "device is not Quiescent\n");
4367 netif_stop_queue(dev);
4369 clear_bit(0, &(nic->link_state));
4372 static void s2io_card_down(nic_t * sp)
4375 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4376 unsigned long flags;
4377 register u64 val64 = 0;
4379 /* If s2io_set_link task is executing, wait till it completes. */
4380 while (test_and_set_bit(0, &(sp->link_state))) {
4383 atomic_set(&sp->card_state, CARD_DOWN);
4385 /* disable Tx and Rx traffic on the NIC */
4389 tasklet_kill(&sp->task);
4391 /* Check if the device is Quiescent and then Reset the NIC */
4393 val64 = readq(&bar0->adapter_status);
4394 if (verify_xena_quiescence(sp, val64, sp->device_enabled_once)) {
4402 "s2io_close:Device not Quiescent ");
4403 DBG_PRINT(ERR_DBG, "adaper status reads 0x%llx\n",
4404 (unsigned long long) val64);
4410 /* Waiting till all Interrupt handlers are complete */
4414 if (!atomic_read(&sp->isr_cnt))
4419 spin_lock_irqsave(&sp->tx_lock, flags);
4420 /* Free all Tx buffers */
4421 free_tx_buffers(sp);
4422 spin_unlock_irqrestore(&sp->tx_lock, flags);
4424 /* Free all Rx buffers */
4425 spin_lock_irqsave(&sp->rx_lock, flags);
4426 free_rx_buffers(sp);
4427 spin_unlock_irqrestore(&sp->rx_lock, flags);
4429 clear_bit(0, &(sp->link_state));
4432 static int s2io_card_up(nic_t * sp)
4435 mac_info_t *mac_control;
4436 struct config_param *config;
4437 struct net_device *dev = (struct net_device *) sp->dev;
4439 /* Initialize the H/W I/O registers */
4440 if (init_nic(sp) != 0) {
4441 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
4447 * Initializing the Rx buffers. For now we are considering only 1
4448 * Rx ring and initializing buffers into 30 Rx blocks
4450 mac_control = &sp->mac_control;
4451 config = &sp->config;
4453 for (i = 0; i < config->rx_ring_num; i++) {
4454 if ((ret = fill_rx_buffers(sp, i))) {
4455 DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
4458 free_rx_buffers(sp);
4461 DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
4462 atomic_read(&sp->rx_bufs_left[i]));
4465 /* Setting its receive mode */
4466 s2io_set_multicast(dev);
4468 /* Enable tasklet for the device */
4469 tasklet_init(&sp->task, s2io_tasklet, (unsigned long) dev);
4471 /* Enable Rx Traffic and interrupts on the NIC */
4472 if (start_nic(sp)) {
4473 DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
4474 tasklet_kill(&sp->task);
4476 free_irq(dev->irq, dev);
4477 free_rx_buffers(sp);
4481 atomic_set(&sp->card_state, CARD_UP);
4486 * s2io_restart_nic - Resets the NIC.
4487 * @data : long pointer to the device private structure
4489 * This function is scheduled to be run by the s2io_tx_watchdog
4490 * function after 0.5 secs to reset the NIC. The idea is to reduce
4491 * the run time of the watch dog routine which is run holding a
4495 static void s2io_restart_nic(unsigned long data)
4497 struct net_device *dev = (struct net_device *) data;
4498 nic_t *sp = dev->priv;
4501 if (s2io_card_up(sp)) {
4502 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
4505 netif_wake_queue(dev);
4506 DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n",
4512 * s2io_tx_watchdog - Watchdog for transmit side.
4513 * @dev : Pointer to net device structure
4515 * This function is triggered if the Tx Queue is stopped
4516 * for a pre-defined amount of time when the Interface is still up.
4517 * If the Interface is jammed in such a situation, the hardware is
4518 * reset (by s2io_close) and restarted again (by s2io_open) to
4519 * overcome any problem that might have been caused in the hardware.
4524 static void s2io_tx_watchdog(struct net_device *dev)
4526 nic_t *sp = dev->priv;
4528 if (netif_carrier_ok(dev)) {
4529 schedule_work(&sp->rst_timer_task);
4534 * rx_osm_handler - To perform some OS related operations on SKB.
4535 * @sp: private member of the device structure,pointer to s2io_nic structure.
4536 * @skb : the socket buffer pointer.
4537 * @len : length of the packet
4538 * @cksum : FCS checksum of the frame.
4539 * @ring_no : the ring from which this RxD was extracted.
4541 * This function is called by the Tx interrupt serivce routine to perform
4542 * some OS related operations on the SKB before passing it to the upper
4543 * layers. It mainly checks if the checksum is OK, if so adds it to the
4544 * SKBs cksum variable, increments the Rx packet count and passes the SKB
4545 * to the upper layer. If the checksum is wrong, it increments the Rx
4546 * packet error count, frees the SKB and returns error.
4548 * SUCCESS on success and -1 on failure.
4550 static int rx_osm_handler(ring_info_t *ring_data, RxD_t * rxdp)
4552 nic_t *sp = ring_data->nic;
4553 struct net_device *dev = (struct net_device *) sp->dev;
4554 struct sk_buff *skb = (struct sk_buff *)
4555 ((unsigned long) rxdp->Host_Control);
4556 int ring_no = ring_data->ring_no;
4557 u16 l3_csum, l4_csum;
4558 #ifdef CONFIG_2BUFF_MODE
4559 int buf0_len = RXD_GET_BUFFER0_SIZE(rxdp->Control_2);
4560 int buf2_len = RXD_GET_BUFFER2_SIZE(rxdp->Control_2);
4561 int get_block = ring_data->rx_curr_get_info.block_index;
4562 int get_off = ring_data->rx_curr_get_info.offset;
4563 buffAdd_t *ba = &ring_data->ba[get_block][get_off];
4564 unsigned char *buff;
4566 u16 len = (u16) ((RXD_GET_BUFFER0_SIZE(rxdp->Control_2)) >> 48);;
4569 if (rxdp->Control_1 & RXD_T_CODE) {
4570 unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
4571 DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%llx\n",
4575 /* Updating statistics */
4576 rxdp->Host_Control = 0;
4578 sp->stats.rx_packets++;
4579 #ifndef CONFIG_2BUFF_MODE
4580 sp->stats.rx_bytes += len;
4582 sp->stats.rx_bytes += buf0_len + buf2_len;
4585 #ifndef CONFIG_2BUFF_MODE
4588 buff = skb_push(skb, buf0_len);
4589 memcpy(buff, ba->ba_0, buf0_len);
4590 skb_put(skb, buf2_len);
4593 if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) &&
4595 l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
4596 l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
4597 if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
4599 * NIC verifies if the Checksum of the received
4600 * frame is Ok or not and accordingly returns
4601 * a flag in the RxD.
4603 skb->ip_summed = CHECKSUM_UNNECESSARY;
4606 * Packet with erroneous checksum, let the
4607 * upper layers deal with it.
4609 skb->ip_summed = CHECKSUM_NONE;
4612 skb->ip_summed = CHECKSUM_NONE;
4615 skb->protocol = eth_type_trans(skb, dev);
4616 #ifdef CONFIG_S2IO_NAPI
4617 netif_receive_skb(skb);
4621 dev->last_rx = jiffies;
4622 atomic_dec(&sp->rx_bufs_left[ring_no]);
4627 * s2io_link - stops/starts the Tx queue.
4628 * @sp : private member of the device structure, which is a pointer to the
4629 * s2io_nic structure.
4630 * @link : inidicates whether link is UP/DOWN.
4632 * This function stops/starts the Tx queue depending on whether the link
4633 * status of the NIC is is down or up. This is called by the Alarm
4634 * interrupt handler whenever a link change interrupt comes up.
4639 void s2io_link(nic_t * sp, int link)
4641 struct net_device *dev = (struct net_device *) sp->dev;
4643 if (link != sp->last_link_state) {
4644 if (link == LINK_DOWN) {
4645 DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
4646 netif_carrier_off(dev);
4648 DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
4649 netif_carrier_on(dev);
4652 sp->last_link_state = link;
4656 * get_xena_rev_id - to identify revision ID of xena.
4657 * @pdev : PCI Dev structure
4659 * Function to identify the Revision ID of xena.
4661 * returns the revision ID of the device.
4664 int get_xena_rev_id(struct pci_dev *pdev)
4668 ret = pci_read_config_byte(pdev, PCI_REVISION_ID, (u8 *) & id);
4673 * s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
4674 * @sp : private member of the device structure, which is a pointer to the
4675 * s2io_nic structure.
4677 * This function initializes a few of the PCI and PCI-X configuration registers
4678 * with recommended values.
4683 static void s2io_init_pci(nic_t * sp)
4685 u16 pci_cmd = 0, pcix_cmd = 0;
4687 /* Enable Data Parity Error Recovery in PCI-X command register. */
4688 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4690 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4692 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4695 /* Set the PErr Response bit in PCI command register. */
4696 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
4697 pci_write_config_word(sp->pdev, PCI_COMMAND,
4698 (pci_cmd | PCI_COMMAND_PARITY));
4699 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
4701 /* Forcibly disabling relaxed ordering capability of the card. */
4703 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4705 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4709 MODULE_AUTHOR("Raghavendra Koushik <raghavendra.koushik@neterion.com>");
4710 MODULE_LICENSE("GPL");
4711 module_param(tx_fifo_num, int, 0);
4712 module_param(rx_ring_num, int, 0);
4713 module_param_array(tx_fifo_len, uint, NULL, 0);
4714 module_param_array(rx_ring_sz, uint, NULL, 0);
4715 module_param_array(rts_frm_len, uint, NULL, 0);
4716 module_param(use_continuous_tx_intrs, int, 1);
4717 module_param(rmac_pause_time, int, 0);
4718 module_param(mc_pause_threshold_q0q3, int, 0);
4719 module_param(mc_pause_threshold_q4q7, int, 0);
4720 module_param(shared_splits, int, 0);
4721 module_param(tmac_util_period, int, 0);
4722 module_param(rmac_util_period, int, 0);
4723 #ifndef CONFIG_S2IO_NAPI
4724 module_param(indicate_max_pkts, int, 0);
4728 * s2io_init_nic - Initialization of the adapter .
4729 * @pdev : structure containing the PCI related information of the device.
4730 * @pre: List of PCI devices supported by the driver listed in s2io_tbl.
4732 * The function initializes an adapter identified by the pci_dec structure.
4733 * All OS related initialization including memory and device structure and
4734 * initlaization of the device private variable is done. Also the swapper
4735 * control register is initialized to enable read and write into the I/O
4736 * registers of the device.
4738 * returns 0 on success and negative on failure.
4741 static int __devinit
4742 s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
4745 struct net_device *dev;
4747 int dma_flag = FALSE;
4748 u32 mac_up, mac_down;
4749 u64 val64 = 0, tmp64 = 0;
4750 XENA_dev_config_t __iomem *bar0 = NULL;
4752 mac_info_t *mac_control;
4753 struct config_param *config;
4755 #ifdef CONFIG_S2IO_NAPI
4756 DBG_PRINT(ERR_DBG, "NAPI support has been enabled\n");
4759 if ((ret = pci_enable_device(pdev))) {
4761 "s2io_init_nic: pci_enable_device failed\n");
4765 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
4766 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 64bit DMA\n");
4768 if (pci_set_consistent_dma_mask
4769 (pdev, DMA_64BIT_MASK)) {
4771 "Unable to obtain 64bit DMA for \
4772 consistent allocations\n");
4773 pci_disable_device(pdev);
4776 } else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
4777 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 32bit DMA\n");
4779 pci_disable_device(pdev);
4783 if (pci_request_regions(pdev, s2io_driver_name)) {
4784 DBG_PRINT(ERR_DBG, "Request Regions failed\n"),
4785 pci_disable_device(pdev);
4789 dev = alloc_etherdev(sizeof(nic_t));
4791 DBG_PRINT(ERR_DBG, "Device allocation failed\n");
4792 pci_disable_device(pdev);
4793 pci_release_regions(pdev);
4797 pci_set_master(pdev);
4798 pci_set_drvdata(pdev, dev);
4799 SET_MODULE_OWNER(dev);
4800 SET_NETDEV_DEV(dev, &pdev->dev);
4802 /* Private member variable initialized to s2io NIC structure */
4804 memset(sp, 0, sizeof(nic_t));
4807 sp->high_dma_flag = dma_flag;
4808 sp->device_enabled_once = FALSE;
4810 /* Initialize some PCI/PCI-X fields of the NIC. */
4814 * Setting the device configuration parameters.
4815 * Most of these parameters can be specified by the user during
4816 * module insertion as they are module loadable parameters. If
4817 * these parameters are not not specified during load time, they
4818 * are initialized with default values.
4820 mac_control = &sp->mac_control;
4821 config = &sp->config;
4823 /* Tx side parameters. */
4824 tx_fifo_len[0] = DEFAULT_FIFO_LEN; /* Default value. */
4825 config->tx_fifo_num = tx_fifo_num;
4826 for (i = 0; i < MAX_TX_FIFOS; i++) {
4827 config->tx_cfg[i].fifo_len = tx_fifo_len[i];
4828 config->tx_cfg[i].fifo_priority = i;
4831 /* mapping the QoS priority to the configured fifos */
4832 for (i = 0; i < MAX_TX_FIFOS; i++)
4833 config->fifo_mapping[i] = fifo_map[config->tx_fifo_num][i];
4835 config->tx_intr_type = TXD_INT_TYPE_UTILZ;
4836 for (i = 0; i < config->tx_fifo_num; i++) {
4837 config->tx_cfg[i].f_no_snoop =
4838 (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
4839 if (config->tx_cfg[i].fifo_len < 65) {
4840 config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
4844 config->max_txds = MAX_SKB_FRAGS;
4846 /* Rx side parameters. */
4847 rx_ring_sz[0] = SMALL_BLK_CNT; /* Default value. */
4848 config->rx_ring_num = rx_ring_num;
4849 for (i = 0; i < MAX_RX_RINGS; i++) {
4850 config->rx_cfg[i].num_rxd = rx_ring_sz[i] *
4851 (MAX_RXDS_PER_BLOCK + 1);
4852 config->rx_cfg[i].ring_priority = i;
4855 for (i = 0; i < rx_ring_num; i++) {
4856 config->rx_cfg[i].ring_org = RING_ORG_BUFF1;
4857 config->rx_cfg[i].f_no_snoop =
4858 (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
4861 /* Setting Mac Control parameters */
4862 mac_control->rmac_pause_time = rmac_pause_time;
4863 mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
4864 mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
4867 /* Initialize Ring buffer parameters. */
4868 for (i = 0; i < config->rx_ring_num; i++)
4869 atomic_set(&sp->rx_bufs_left[i], 0);
4871 /* Initialize the number of ISRs currently running */
4872 atomic_set(&sp->isr_cnt, 0);
4874 /* initialize the shared memory used by the NIC and the host */
4875 if (init_shared_mem(sp)) {
4876 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n",
4879 goto mem_alloc_failed;
4882 sp->bar0 = ioremap(pci_resource_start(pdev, 0),
4883 pci_resource_len(pdev, 0));
4885 DBG_PRINT(ERR_DBG, "%s: S2IO: cannot remap io mem1\n",
4888 goto bar0_remap_failed;
4891 sp->bar1 = ioremap(pci_resource_start(pdev, 2),
4892 pci_resource_len(pdev, 2));
4894 DBG_PRINT(ERR_DBG, "%s: S2IO: cannot remap io mem2\n",
4897 goto bar1_remap_failed;
4900 dev->irq = pdev->irq;
4901 dev->base_addr = (unsigned long) sp->bar0;
4903 /* Initializing the BAR1 address as the start of the FIFO pointer. */
4904 for (j = 0; j < MAX_TX_FIFOS; j++) {
4905 mac_control->tx_FIFO_start[j] = (TxFIFO_element_t __iomem *)
4906 (sp->bar1 + (j * 0x00020000));
4909 /* Driver entry points */
4910 dev->open = &s2io_open;
4911 dev->stop = &s2io_close;
4912 dev->hard_start_xmit = &s2io_xmit;
4913 dev->get_stats = &s2io_get_stats;
4914 dev->set_multicast_list = &s2io_set_multicast;
4915 dev->do_ioctl = &s2io_ioctl;
4916 dev->change_mtu = &s2io_change_mtu;
4917 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
4920 * will use eth_mac_addr() for dev->set_mac_address
4921 * mac address will be set every time dev->open() is called
4923 #if defined(CONFIG_S2IO_NAPI)
4924 dev->poll = s2io_poll;
4928 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
4929 if (sp->high_dma_flag == TRUE)
4930 dev->features |= NETIF_F_HIGHDMA;
4932 dev->features |= NETIF_F_TSO;
4935 dev->tx_timeout = &s2io_tx_watchdog;
4936 dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
4937 INIT_WORK(&sp->rst_timer_task,
4938 (void (*)(void *)) s2io_restart_nic, dev);
4939 INIT_WORK(&sp->set_link_task,
4940 (void (*)(void *)) s2io_set_link, sp);
4942 pci_save_state(sp->pdev);
4944 /* Setting swapper control on the NIC, for proper reset operation */
4945 if (s2io_set_swapper(sp)) {
4946 DBG_PRINT(ERR_DBG, "%s:swapper settings are wrong\n",
4949 goto set_swap_failed;
4953 * Fix for all "FFs" MAC address problems observed on
4956 fix_mac_address(sp);
4960 * MAC address initialization.
4961 * For now only one mac address will be read and used.
4964 val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4965 RMAC_ADDR_CMD_MEM_OFFSET(0 + MAC_MAC_ADDR_START_OFFSET);
4966 writeq(val64, &bar0->rmac_addr_cmd_mem);
4967 wait_for_cmd_complete(sp);
4969 tmp64 = readq(&bar0->rmac_addr_data0_mem);
4970 mac_down = (u32) tmp64;
4971 mac_up = (u32) (tmp64 >> 32);
4973 memset(sp->def_mac_addr[0].mac_addr, 0, sizeof(ETH_ALEN));
4975 sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
4976 sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
4977 sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
4978 sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
4979 sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
4980 sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
4983 "DEFAULT MAC ADDR:0x%02x-%02x-%02x-%02x-%02x-%02x\n",
4984 sp->def_mac_addr[0].mac_addr[0],
4985 sp->def_mac_addr[0].mac_addr[1],
4986 sp->def_mac_addr[0].mac_addr[2],
4987 sp->def_mac_addr[0].mac_addr[3],
4988 sp->def_mac_addr[0].mac_addr[4],
4989 sp->def_mac_addr[0].mac_addr[5]);
4991 /* Set the factory defined MAC address initially */
4992 dev->addr_len = ETH_ALEN;
4993 memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
4996 * Initialize the tasklet status and link state flags
4997 * and the card statte parameter
4999 atomic_set(&(sp->card_state), 0);
5000 sp->tasklet_status = 0;
5003 /* Initialize spinlocks */
5004 spin_lock_init(&sp->tx_lock);
5005 #ifndef CONFIG_S2IO_NAPI
5006 spin_lock_init(&sp->put_lock);
5008 spin_lock_init(&sp->rx_lock);
5011 * SXE-002: Configure link and activity LED to init state
5014 subid = sp->pdev->subsystem_device;
5015 if ((subid & 0xFF) >= 0x07) {
5016 val64 = readq(&bar0->gpio_control);
5017 val64 |= 0x0000800000000000ULL;
5018 writeq(val64, &bar0->gpio_control);
5019 val64 = 0x0411040400000000ULL;
5020 writeq(val64, (void __iomem *) bar0 + 0x2700);
5021 val64 = readq(&bar0->gpio_control);
5024 sp->rx_csum = 1; /* Rx chksum verify enabled by default */
5026 if (register_netdev(dev)) {
5027 DBG_PRINT(ERR_DBG, "Device registration failed\n");
5029 goto register_failed;
5032 /* Initialize device name */
5033 strcpy(sp->name, dev->name);
5034 strcat(sp->name, ": Neterion Xframe I 10GbE adapter");
5037 * Make Link state as off at this point, when the Link change
5038 * interrupt comes the state will be automatically changed to
5041 netif_carrier_off(dev);
5052 free_shared_mem(sp);
5053 pci_disable_device(pdev);
5054 pci_release_regions(pdev);
5055 pci_set_drvdata(pdev, NULL);
5062 * s2io_rem_nic - Free the PCI device
5063 * @pdev: structure containing the PCI related information of the device.
5064 * Description: This function is called by the Pci subsystem to release a
5065 * PCI device and free up all resource held up by the device. This could
5066 * be in response to a Hot plug event or when the driver is to be removed
5070 static void __devexit s2io_rem_nic(struct pci_dev *pdev)
5072 struct net_device *dev =
5073 (struct net_device *) pci_get_drvdata(pdev);
5077 DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
5082 unregister_netdev(dev);
5084 free_shared_mem(sp);
5087 pci_disable_device(pdev);
5088 pci_release_regions(pdev);
5089 pci_set_drvdata(pdev, NULL);
5094 * s2io_starter - Entry point for the driver
5095 * Description: This function is the entry point for the driver. It verifies
5096 * the module loadable parameters and initializes PCI configuration space.
5099 int __init s2io_starter(void)
5101 return pci_module_init(&s2io_driver);
5105 * s2io_closer - Cleanup routine for the driver
5106 * Description: This function is the cleanup routine for the driver. It unregist * ers the driver.
5109 void s2io_closer(void)
5111 pci_unregister_driver(&s2io_driver);
5112 DBG_PRINT(INIT_DBG, "cleanup done\n");
5115 module_init(s2io_starter);
5116 module_exit(s2io_closer);