1 /************************************************************************
2 * s2io.c: A Linux PCI-X Ethernet driver for S2IO 10GbE Server NIC
3 * Copyright(c) 2002-2005 Neterion Inc.
5 * This software may be used and distributed according to the terms of
6 * the GNU General Public License (GPL), incorporated herein by reference.
7 * Drivers based on or derived from this code fall under the GPL and must
8 * retain the authorship, copyright and license notice. This file is not
9 * a complete program and may only be used when the entire operating
10 * system is licensed under the GPL.
11 * See the file COPYING in this distribution for more information.
14 * Jeff Garzik : For pointing out the improper error condition
15 * check in the s2io_xmit routine and also some
16 * issues in the Tx watch dog function. Also for
17 * patiently answering all those innumerable
18 * questions regaring the 2.6 porting issues.
19 * Stephen Hemminger : Providing proper 2.6 porting mechanism for some
20 * macros available only in 2.6 Kernel.
21 * Francois Romieu : For pointing out all code part that were
22 * deprecated and also styling related comments.
23 * Grant Grundler : For helping me get rid of some Architecture
25 * Christopher Hellwig : Some more 2.6 specific issues in the driver.
27 * The module loadable parameters that are supported by the driver and a brief
28 * explaination of all the variables.
29 * rx_ring_num : This can be used to program the number of receive rings used
31 * rx_ring_len: This defines the number of descriptors each ring can have. This
32 * is also an array of size 8.
33 * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
34 * tx_fifo_len: This too is an array of 8. Each element defines the number of
35 * Tx descriptors that can be associated with each corresponding FIFO.
36 ************************************************************************/
38 #include <linux/config.h>
39 #include <linux/module.h>
40 #include <linux/types.h>
41 #include <linux/errno.h>
42 #include <linux/ioport.h>
43 #include <linux/pci.h>
44 #include <linux/dma-mapping.h>
45 #include <linux/kernel.h>
46 #include <linux/netdevice.h>
47 #include <linux/etherdevice.h>
48 #include <linux/skbuff.h>
49 #include <linux/init.h>
50 #include <linux/delay.h>
51 #include <linux/stddef.h>
52 #include <linux/ioctl.h>
53 #include <linux/timex.h>
54 #include <linux/sched.h>
55 #include <linux/ethtool.h>
56 #include <linux/version.h>
57 #include <linux/workqueue.h>
58 #include <linux/if_vlan.h>
60 #include <asm/system.h>
61 #include <asm/uaccess.h>
66 #include "s2io-regs.h"
68 /* S2io Driver name & version. */
69 static char s2io_driver_name[] = "Neterion";
70 static char s2io_driver_version[] = "Version 2.0.2.0";
72 static inline int RXD_IS_UP2DT(RxD_t *rxdp)
76 ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
77 (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
83 * Cards with following subsystem_id have a link state indication
84 * problem, 600B, 600C, 600D, 640B, 640C and 640D.
85 * macro below identifies these cards given the subsystem_id.
87 #define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid) \
88 (dev_type == XFRAME_I_DEVICE) ? \
89 ((((subid >= 0x600B) && (subid <= 0x600D)) || \
90 ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
92 #define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
93 ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
94 #define TASKLET_IN_USE test_and_set_bit(0, (&sp->tasklet_status))
97 static inline int rx_buffer_level(nic_t * sp, int rxb_size, int ring)
100 mac_info_t *mac_control;
102 mac_control = &sp->mac_control;
103 if ((mac_control->rings[ring].pkt_cnt - rxb_size) > 16) {
105 if (rxb_size <= MAX_RXDS_PER_BLOCK) {
113 /* Ethtool related variables and Macros. */
114 static char s2io_gstrings[][ETH_GSTRING_LEN] = {
115 "Register test\t(offline)",
116 "Eeprom test\t(offline)",
117 "Link test\t(online)",
118 "RLDRAM test\t(offline)",
119 "BIST Test\t(offline)"
122 static char ethtool_stats_keys[][ETH_GSTRING_LEN] = {
124 {"tmac_data_octets"},
128 {"tmac_pause_ctrl_frms"},
129 {"tmac_any_err_frms"},
130 {"tmac_vld_ip_octets"},
138 {"rmac_data_octets"},
139 {"rmac_fcs_err_frms"},
141 {"rmac_vld_mcst_frms"},
142 {"rmac_vld_bcst_frms"},
143 {"rmac_in_rng_len_err_frms"},
145 {"rmac_pause_ctrl_frms"},
146 {"rmac_discarded_frms"},
147 {"rmac_usized_frms"},
148 {"rmac_osized_frms"},
150 {"rmac_jabber_frms"},
158 {"rmac_err_drp_udp"},
160 {"rmac_accepted_ip"},
162 {"\n DRIVER STATISTICS"},
163 {"single_bit_ecc_errs"},
164 {"double_bit_ecc_errs"},
167 #define S2IO_STAT_LEN sizeof(ethtool_stats_keys)/ ETH_GSTRING_LEN
168 #define S2IO_STAT_STRINGS_LEN S2IO_STAT_LEN * ETH_GSTRING_LEN
170 #define S2IO_TEST_LEN sizeof(s2io_gstrings) / ETH_GSTRING_LEN
171 #define S2IO_STRINGS_LEN S2IO_TEST_LEN * ETH_GSTRING_LEN
173 #define S2IO_TIMER_CONF(timer, handle, arg, exp) \
174 init_timer(&timer); \
175 timer.function = handle; \
176 timer.data = (unsigned long) arg; \
177 mod_timer(&timer, (jiffies + exp)) \
180 static void s2io_vlan_rx_register(struct net_device *dev,
181 struct vlan_group *grp)
183 nic_t *nic = dev->priv;
186 spin_lock_irqsave(&nic->tx_lock, flags);
188 spin_unlock_irqrestore(&nic->tx_lock, flags);
191 /* Unregister the vlan */
192 static void s2io_vlan_rx_kill_vid(struct net_device *dev, unsigned long vid)
194 nic_t *nic = dev->priv;
197 spin_lock_irqsave(&nic->tx_lock, flags);
199 nic->vlgrp->vlan_devices[vid] = NULL;
200 spin_unlock_irqrestore(&nic->tx_lock, flags);
204 * Constants to be programmed into the Xena's registers, to configure
208 #define SWITCH_SIGN 0xA5A5A5A5A5A5A5A5ULL
211 static u64 herc_act_dtx_cfg[] = {
213 0x80000515BA750000ULL, 0x80000515BA7500E0ULL,
215 0x80000515BA750004ULL, 0x80000515BA7500E4ULL,
217 0x80010515003F0000ULL, 0x80010515003F00E0ULL,
219 0x80010515003F0004ULL, 0x80010515003F00E4ULL,
221 0x80020515F2100000ULL, 0x80020515F21000E0ULL,
223 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
228 static u64 xena_mdio_cfg[] = {
230 0xC001010000000000ULL, 0xC0010100000000E0ULL,
231 0xC0010100008000E4ULL,
232 /* Remove Reset from PMA PLL */
233 0xC001010000000000ULL, 0xC0010100000000E0ULL,
234 0xC0010100000000E4ULL,
238 static u64 xena_dtx_cfg[] = {
239 0x8000051500000000ULL, 0x80000515000000E0ULL,
240 0x80000515D93500E4ULL, 0x8001051500000000ULL,
241 0x80010515000000E0ULL, 0x80010515001E00E4ULL,
242 0x8002051500000000ULL, 0x80020515000000E0ULL,
243 0x80020515F21000E4ULL,
244 /* Set PADLOOPBACKN */
245 0x8002051500000000ULL, 0x80020515000000E0ULL,
246 0x80020515B20000E4ULL, 0x8003051500000000ULL,
247 0x80030515000000E0ULL, 0x80030515B20000E4ULL,
248 0x8004051500000000ULL, 0x80040515000000E0ULL,
249 0x80040515B20000E4ULL, 0x8005051500000000ULL,
250 0x80050515000000E0ULL, 0x80050515B20000E4ULL,
252 /* Remove PADLOOPBACKN */
253 0x8002051500000000ULL, 0x80020515000000E0ULL,
254 0x80020515F20000E4ULL, 0x8003051500000000ULL,
255 0x80030515000000E0ULL, 0x80030515F20000E4ULL,
256 0x8004051500000000ULL, 0x80040515000000E0ULL,
257 0x80040515F20000E4ULL, 0x8005051500000000ULL,
258 0x80050515000000E0ULL, 0x80050515F20000E4ULL,
263 * Constants for Fixing the MacAddress problem seen mostly on
266 static u64 fix_mac[] = {
267 0x0060000000000000ULL, 0x0060600000000000ULL,
268 0x0040600000000000ULL, 0x0000600000000000ULL,
269 0x0020600000000000ULL, 0x0060600000000000ULL,
270 0x0020600000000000ULL, 0x0060600000000000ULL,
271 0x0020600000000000ULL, 0x0060600000000000ULL,
272 0x0020600000000000ULL, 0x0060600000000000ULL,
273 0x0020600000000000ULL, 0x0060600000000000ULL,
274 0x0020600000000000ULL, 0x0060600000000000ULL,
275 0x0020600000000000ULL, 0x0060600000000000ULL,
276 0x0020600000000000ULL, 0x0060600000000000ULL,
277 0x0020600000000000ULL, 0x0060600000000000ULL,
278 0x0020600000000000ULL, 0x0060600000000000ULL,
279 0x0020600000000000ULL, 0x0000600000000000ULL,
280 0x0040600000000000ULL, 0x0060600000000000ULL,
284 /* Module Loadable parameters. */
285 static unsigned int tx_fifo_num = 1;
286 static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
287 {[0 ...(MAX_TX_FIFOS - 1)] = 0 };
288 static unsigned int rx_ring_num = 1;
289 static unsigned int rx_ring_sz[MAX_RX_RINGS] =
290 {[0 ...(MAX_RX_RINGS - 1)] = 0 };
291 static unsigned int rts_frm_len[MAX_RX_RINGS] =
292 {[0 ...(MAX_RX_RINGS - 1)] = 0 };
293 static unsigned int use_continuous_tx_intrs = 1;
294 static unsigned int rmac_pause_time = 65535;
295 static unsigned int mc_pause_threshold_q0q3 = 187;
296 static unsigned int mc_pause_threshold_q4q7 = 187;
297 static unsigned int shared_splits;
298 static unsigned int tmac_util_period = 5;
299 static unsigned int rmac_util_period = 5;
300 static unsigned int bimodal = 0;
301 #ifndef CONFIG_S2IO_NAPI
302 static unsigned int indicate_max_pkts;
307 * This table lists all the devices that this driver supports.
309 static struct pci_device_id s2io_tbl[] __devinitdata = {
310 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
311 PCI_ANY_ID, PCI_ANY_ID},
312 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
313 PCI_ANY_ID, PCI_ANY_ID},
314 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
315 PCI_ANY_ID, PCI_ANY_ID},
316 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
317 PCI_ANY_ID, PCI_ANY_ID},
321 MODULE_DEVICE_TABLE(pci, s2io_tbl);
323 static struct pci_driver s2io_driver = {
325 .id_table = s2io_tbl,
326 .probe = s2io_init_nic,
327 .remove = __devexit_p(s2io_rem_nic),
330 /* A simplifier macro used both by init and free shared_mem Fns(). */
331 #define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
334 * init_shared_mem - Allocation and Initialization of Memory
335 * @nic: Device private variable.
336 * Description: The function allocates all the memory areas shared
337 * between the NIC and the driver. This includes Tx descriptors,
338 * Rx descriptors and the statistics block.
341 static int init_shared_mem(struct s2io_nic *nic)
344 void *tmp_v_addr, *tmp_v_addr_next;
345 dma_addr_t tmp_p_addr, tmp_p_addr_next;
346 RxD_block_t *pre_rxd_blk = NULL;
347 int i, j, blk_cnt, rx_sz, tx_sz;
348 int lst_size, lst_per_page;
349 struct net_device *dev = nic->dev;
350 #ifdef CONFIG_2BUFF_MODE
355 mac_info_t *mac_control;
356 struct config_param *config;
358 mac_control = &nic->mac_control;
359 config = &nic->config;
362 /* Allocation and initialization of TXDLs in FIOFs */
364 for (i = 0; i < config->tx_fifo_num; i++) {
365 size += config->tx_cfg[i].fifo_len;
367 if (size > MAX_AVAILABLE_TXDS) {
368 DBG_PRINT(ERR_DBG, "%s: Requested TxDs too high, ",
370 DBG_PRINT(ERR_DBG, "Requested: %d, max supported: 8192\n", size);
374 lst_size = (sizeof(TxD_t) * config->max_txds);
375 tx_sz = lst_size * size;
376 lst_per_page = PAGE_SIZE / lst_size;
378 for (i = 0; i < config->tx_fifo_num; i++) {
379 int fifo_len = config->tx_cfg[i].fifo_len;
380 int list_holder_size = fifo_len * sizeof(list_info_hold_t);
381 mac_control->fifos[i].list_info = kmalloc(list_holder_size,
383 if (!mac_control->fifos[i].list_info) {
385 "Malloc failed for list_info\n");
388 memset(mac_control->fifos[i].list_info, 0, list_holder_size);
390 for (i = 0; i < config->tx_fifo_num; i++) {
391 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
393 mac_control->fifos[i].tx_curr_put_info.offset = 0;
394 mac_control->fifos[i].tx_curr_put_info.fifo_len =
395 config->tx_cfg[i].fifo_len - 1;
396 mac_control->fifos[i].tx_curr_get_info.offset = 0;
397 mac_control->fifos[i].tx_curr_get_info.fifo_len =
398 config->tx_cfg[i].fifo_len - 1;
399 mac_control->fifos[i].fifo_no = i;
400 mac_control->fifos[i].nic = nic;
401 mac_control->fifos[i].max_txds = MAX_SKB_FRAGS;
403 for (j = 0; j < page_num; j++) {
407 tmp_v = pci_alloc_consistent(nic->pdev,
411 "pci_alloc_consistent ");
412 DBG_PRINT(ERR_DBG, "failed for TxDL\n");
415 while (k < lst_per_page) {
416 int l = (j * lst_per_page) + k;
417 if (l == config->tx_cfg[i].fifo_len)
419 mac_control->fifos[i].list_info[l].list_virt_addr =
420 tmp_v + (k * lst_size);
421 mac_control->fifos[i].list_info[l].list_phy_addr =
422 tmp_p + (k * lst_size);
428 /* Allocation and initialization of RXDs in Rings */
430 for (i = 0; i < config->rx_ring_num; i++) {
431 if (config->rx_cfg[i].num_rxd % (MAX_RXDS_PER_BLOCK + 1)) {
432 DBG_PRINT(ERR_DBG, "%s: RxD count of ", dev->name);
433 DBG_PRINT(ERR_DBG, "Ring%d is not a multiple of ",
435 DBG_PRINT(ERR_DBG, "RxDs per Block");
438 size += config->rx_cfg[i].num_rxd;
439 mac_control->rings[i].block_count =
440 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
441 mac_control->rings[i].pkt_cnt =
442 config->rx_cfg[i].num_rxd - mac_control->rings[i].block_count;
444 size = (size * (sizeof(RxD_t)));
447 for (i = 0; i < config->rx_ring_num; i++) {
448 mac_control->rings[i].rx_curr_get_info.block_index = 0;
449 mac_control->rings[i].rx_curr_get_info.offset = 0;
450 mac_control->rings[i].rx_curr_get_info.ring_len =
451 config->rx_cfg[i].num_rxd - 1;
452 mac_control->rings[i].rx_curr_put_info.block_index = 0;
453 mac_control->rings[i].rx_curr_put_info.offset = 0;
454 mac_control->rings[i].rx_curr_put_info.ring_len =
455 config->rx_cfg[i].num_rxd - 1;
456 mac_control->rings[i].nic = nic;
457 mac_control->rings[i].ring_no = i;
460 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
461 /* Allocating all the Rx blocks */
462 for (j = 0; j < blk_cnt; j++) {
463 #ifndef CONFIG_2BUFF_MODE
464 size = (MAX_RXDS_PER_BLOCK + 1) * (sizeof(RxD_t));
466 size = SIZE_OF_BLOCK;
468 tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
470 if (tmp_v_addr == NULL) {
472 * In case of failure, free_shared_mem()
473 * is called, which should free any
474 * memory that was alloced till the
477 mac_control->rings[i].rx_blocks[j].block_virt_addr =
481 memset(tmp_v_addr, 0, size);
482 mac_control->rings[i].rx_blocks[j].block_virt_addr =
484 mac_control->rings[i].rx_blocks[j].block_dma_addr =
487 /* Interlinking all Rx Blocks */
488 for (j = 0; j < blk_cnt; j++) {
490 mac_control->rings[i].rx_blocks[j].block_virt_addr;
492 mac_control->rings[i].rx_blocks[(j + 1) %
493 blk_cnt].block_virt_addr;
495 mac_control->rings[i].rx_blocks[j].block_dma_addr;
497 mac_control->rings[i].rx_blocks[(j + 1) %
498 blk_cnt].block_dma_addr;
500 pre_rxd_blk = (RxD_block_t *) tmp_v_addr;
501 pre_rxd_blk->reserved_1 = END_OF_BLOCK; /* last RxD
504 #ifndef CONFIG_2BUFF_MODE
505 pre_rxd_blk->reserved_2_pNext_RxD_block =
506 (unsigned long) tmp_v_addr_next;
508 pre_rxd_blk->pNext_RxD_Blk_physical =
509 (u64) tmp_p_addr_next;
513 #ifdef CONFIG_2BUFF_MODE
515 * Allocation of Storages for buffer addresses in 2BUFF mode
516 * and the buffers as well.
518 for (i = 0; i < config->rx_ring_num; i++) {
520 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
521 mac_control->rings[i].ba = kmalloc((sizeof(buffAdd_t *) * blk_cnt),
523 if (!mac_control->rings[i].ba)
525 for (j = 0; j < blk_cnt; j++) {
527 mac_control->rings[i].ba[j] = kmalloc((sizeof(buffAdd_t) *
528 (MAX_RXDS_PER_BLOCK + 1)),
530 if (!mac_control->rings[i].ba[j])
532 while (k != MAX_RXDS_PER_BLOCK) {
533 ba = &mac_control->rings[i].ba[j][k];
535 ba->ba_0_org = (void *) kmalloc
536 (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL);
539 tmp = (u64) ba->ba_0_org;
541 tmp &= ~((u64) ALIGN_SIZE);
542 ba->ba_0 = (void *) tmp;
544 ba->ba_1_org = (void *) kmalloc
545 (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL);
548 tmp = (u64) ba->ba_1_org;
550 tmp &= ~((u64) ALIGN_SIZE);
551 ba->ba_1 = (void *) tmp;
558 /* Allocation and initialization of Statistics block */
559 size = sizeof(StatInfo_t);
560 mac_control->stats_mem = pci_alloc_consistent
561 (nic->pdev, size, &mac_control->stats_mem_phy);
563 if (!mac_control->stats_mem) {
565 * In case of failure, free_shared_mem() is called, which
566 * should free any memory that was alloced till the
571 mac_control->stats_mem_sz = size;
573 tmp_v_addr = mac_control->stats_mem;
574 mac_control->stats_info = (StatInfo_t *) tmp_v_addr;
575 memset(tmp_v_addr, 0, size);
576 DBG_PRINT(INIT_DBG, "%s:Ring Mem PHY: 0x%llx\n", dev->name,
577 (unsigned long long) tmp_p_addr);
583 * free_shared_mem - Free the allocated Memory
584 * @nic: Device private variable.
585 * Description: This function is to free all memory locations allocated by
586 * the init_shared_mem() function and return it to the kernel.
589 static void free_shared_mem(struct s2io_nic *nic)
591 int i, j, blk_cnt, size;
593 dma_addr_t tmp_p_addr;
594 mac_info_t *mac_control;
595 struct config_param *config;
596 int lst_size, lst_per_page;
602 mac_control = &nic->mac_control;
603 config = &nic->config;
605 lst_size = (sizeof(TxD_t) * config->max_txds);
606 lst_per_page = PAGE_SIZE / lst_size;
608 for (i = 0; i < config->tx_fifo_num; i++) {
609 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
611 for (j = 0; j < page_num; j++) {
612 int mem_blks = (j * lst_per_page);
613 if ((!mac_control->fifos[i].list_info) ||
614 (!mac_control->fifos[i].list_info[mem_blks].
617 pci_free_consistent(nic->pdev, PAGE_SIZE,
618 mac_control->fifos[i].
621 mac_control->fifos[i].
625 kfree(mac_control->fifos[i].list_info);
628 #ifndef CONFIG_2BUFF_MODE
629 size = (MAX_RXDS_PER_BLOCK + 1) * (sizeof(RxD_t));
631 size = SIZE_OF_BLOCK;
633 for (i = 0; i < config->rx_ring_num; i++) {
634 blk_cnt = mac_control->rings[i].block_count;
635 for (j = 0; j < blk_cnt; j++) {
636 tmp_v_addr = mac_control->rings[i].rx_blocks[j].
638 tmp_p_addr = mac_control->rings[i].rx_blocks[j].
640 if (tmp_v_addr == NULL)
642 pci_free_consistent(nic->pdev, size,
643 tmp_v_addr, tmp_p_addr);
647 #ifdef CONFIG_2BUFF_MODE
648 /* Freeing buffer storage addresses in 2BUFF mode. */
649 for (i = 0; i < config->rx_ring_num; i++) {
651 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
652 for (j = 0; j < blk_cnt; j++) {
654 if (!mac_control->rings[i].ba[j])
656 while (k != MAX_RXDS_PER_BLOCK) {
657 buffAdd_t *ba = &mac_control->rings[i].ba[j][k];
662 kfree(mac_control->rings[i].ba[j]);
664 if (mac_control->rings[i].ba)
665 kfree(mac_control->rings[i].ba);
669 if (mac_control->stats_mem) {
670 pci_free_consistent(nic->pdev,
671 mac_control->stats_mem_sz,
672 mac_control->stats_mem,
673 mac_control->stats_mem_phy);
678 * s2io_verify_pci_mode -
681 static int s2io_verify_pci_mode(nic_t *nic)
683 XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0;
684 register u64 val64 = 0;
687 val64 = readq(&bar0->pci_mode);
688 mode = (u8)GET_PCI_MODE(val64);
690 if ( val64 & PCI_MODE_UNKNOWN_MODE)
691 return -1; /* Unknown PCI mode */
697 * s2io_print_pci_mode -
699 static int s2io_print_pci_mode(nic_t *nic)
701 XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0;
702 register u64 val64 = 0;
704 struct config_param *config = &nic->config;
706 val64 = readq(&bar0->pci_mode);
707 mode = (u8)GET_PCI_MODE(val64);
709 if ( val64 & PCI_MODE_UNKNOWN_MODE)
710 return -1; /* Unknown PCI mode */
712 if (val64 & PCI_MODE_32_BITS) {
713 DBG_PRINT(ERR_DBG, "%s: Device is on 32 bit ", nic->dev->name);
715 DBG_PRINT(ERR_DBG, "%s: Device is on 64 bit ", nic->dev->name);
719 case PCI_MODE_PCI_33:
720 DBG_PRINT(ERR_DBG, "33MHz PCI bus\n");
721 config->bus_speed = 33;
723 case PCI_MODE_PCI_66:
724 DBG_PRINT(ERR_DBG, "66MHz PCI bus\n");
725 config->bus_speed = 133;
727 case PCI_MODE_PCIX_M1_66:
728 DBG_PRINT(ERR_DBG, "66MHz PCIX(M1) bus\n");
729 config->bus_speed = 133; /* Herc doubles the clock rate */
731 case PCI_MODE_PCIX_M1_100:
732 DBG_PRINT(ERR_DBG, "100MHz PCIX(M1) bus\n");
733 config->bus_speed = 200;
735 case PCI_MODE_PCIX_M1_133:
736 DBG_PRINT(ERR_DBG, "133MHz PCIX(M1) bus\n");
737 config->bus_speed = 266;
739 case PCI_MODE_PCIX_M2_66:
740 DBG_PRINT(ERR_DBG, "133MHz PCIX(M2) bus\n");
741 config->bus_speed = 133;
743 case PCI_MODE_PCIX_M2_100:
744 DBG_PRINT(ERR_DBG, "200MHz PCIX(M2) bus\n");
745 config->bus_speed = 200;
747 case PCI_MODE_PCIX_M2_133:
748 DBG_PRINT(ERR_DBG, "266MHz PCIX(M2) bus\n");
749 config->bus_speed = 266;
752 return -1; /* Unsupported bus speed */
759 * init_nic - Initialization of hardware
760 * @nic: device peivate variable
761 * Description: The function sequentially configures every block
762 * of the H/W from their reset values.
763 * Return Value: SUCCESS on success and
764 * '-1' on failure (endian settings incorrect).
767 static int init_nic(struct s2io_nic *nic)
769 XENA_dev_config_t __iomem *bar0 = nic->bar0;
770 struct net_device *dev = nic->dev;
771 register u64 val64 = 0;
775 mac_info_t *mac_control;
776 struct config_param *config;
777 int mdio_cnt = 0, dtx_cnt = 0;
778 unsigned long long mem_share;
781 mac_control = &nic->mac_control;
782 config = &nic->config;
784 /* to set the swapper controle on the card */
785 if(s2io_set_swapper(nic)) {
786 DBG_PRINT(ERR_DBG,"ERROR: Setting Swapper failed\n");
791 * Herc requires EOI to be removed from reset before XGXS, so..
793 if (nic->device_type & XFRAME_II_DEVICE) {
794 val64 = 0xA500000000ULL;
795 writeq(val64, &bar0->sw_reset);
797 val64 = readq(&bar0->sw_reset);
800 /* Remove XGXS from reset state */
802 writeq(val64, &bar0->sw_reset);
804 val64 = readq(&bar0->sw_reset);
806 /* Enable Receiving broadcasts */
807 add = &bar0->mac_cfg;
808 val64 = readq(&bar0->mac_cfg);
809 val64 |= MAC_RMAC_BCAST_ENABLE;
810 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
811 writel((u32) val64, add);
812 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
813 writel((u32) (val64 >> 32), (add + 4));
815 /* Read registers in all blocks */
816 val64 = readq(&bar0->mac_int_mask);
817 val64 = readq(&bar0->mc_int_mask);
818 val64 = readq(&bar0->xgxs_int_mask);
822 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
825 * Configuring the XAUI Interface of Xena.
826 * ***************************************
827 * To Configure the Xena's XAUI, one has to write a series
828 * of 64 bit values into two registers in a particular
829 * sequence. Hence a macro 'SWITCH_SIGN' has been defined
830 * which will be defined in the array of configuration values
831 * (xena_dtx_cfg & xena_mdio_cfg) at appropriate places
832 * to switch writing from one regsiter to another. We continue
833 * writing these values until we encounter the 'END_SIGN' macro.
834 * For example, After making a series of 21 writes into
835 * dtx_control register the 'SWITCH_SIGN' appears and hence we
836 * start writing into mdio_control until we encounter END_SIGN.
838 if (nic->device_type & XFRAME_II_DEVICE) {
839 while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
840 SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
841 &bar0->dtx_control, UF);
843 msleep(1); /* Necessary!! */
849 while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
850 if (xena_dtx_cfg[dtx_cnt] == SWITCH_SIGN) {
854 SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
855 &bar0->dtx_control, UF);
856 val64 = readq(&bar0->dtx_control);
860 while (xena_mdio_cfg[mdio_cnt] != END_SIGN) {
861 if (xena_mdio_cfg[mdio_cnt] == SWITCH_SIGN) {
865 SPECIAL_REG_WRITE(xena_mdio_cfg[mdio_cnt],
866 &bar0->mdio_control, UF);
867 val64 = readq(&bar0->mdio_control);
870 if ((xena_dtx_cfg[dtx_cnt] == END_SIGN) &&
871 (xena_mdio_cfg[mdio_cnt] == END_SIGN)) {
879 /* Tx DMA Initialization */
881 writeq(val64, &bar0->tx_fifo_partition_0);
882 writeq(val64, &bar0->tx_fifo_partition_1);
883 writeq(val64, &bar0->tx_fifo_partition_2);
884 writeq(val64, &bar0->tx_fifo_partition_3);
887 for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
889 vBIT(config->tx_cfg[i].fifo_len - 1, ((i * 32) + 19),
890 13) | vBIT(config->tx_cfg[i].fifo_priority,
893 if (i == (config->tx_fifo_num - 1)) {
900 writeq(val64, &bar0->tx_fifo_partition_0);
904 writeq(val64, &bar0->tx_fifo_partition_1);
908 writeq(val64, &bar0->tx_fifo_partition_2);
912 writeq(val64, &bar0->tx_fifo_partition_3);
917 /* Enable Tx FIFO partition 0. */
918 val64 = readq(&bar0->tx_fifo_partition_0);
919 val64 |= BIT(0); /* To enable the FIFO partition. */
920 writeq(val64, &bar0->tx_fifo_partition_0);
923 * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
924 * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
926 if ((nic->device_type == XFRAME_I_DEVICE) &&
927 (get_xena_rev_id(nic->pdev) < 4))
928 writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
930 val64 = readq(&bar0->tx_fifo_partition_0);
931 DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
932 &bar0->tx_fifo_partition_0, (unsigned long long) val64);
935 * Initialization of Tx_PA_CONFIG register to ignore packet
936 * integrity checking.
938 val64 = readq(&bar0->tx_pa_cfg);
939 val64 |= TX_PA_CFG_IGNORE_FRM_ERR | TX_PA_CFG_IGNORE_SNAP_OUI |
940 TX_PA_CFG_IGNORE_LLC_CTRL | TX_PA_CFG_IGNORE_L2_ERR;
941 writeq(val64, &bar0->tx_pa_cfg);
943 /* Rx DMA intialization. */
945 for (i = 0; i < config->rx_ring_num; i++) {
947 vBIT(config->rx_cfg[i].ring_priority, (5 + (i * 8)),
950 writeq(val64, &bar0->rx_queue_priority);
953 * Allocating equal share of memory to all the
957 if (nic->device_type & XFRAME_II_DEVICE)
962 for (i = 0; i < config->rx_ring_num; i++) {
965 mem_share = (mem_size / config->rx_ring_num +
966 mem_size % config->rx_ring_num);
967 val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
970 mem_share = (mem_size / config->rx_ring_num);
971 val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
974 mem_share = (mem_size / config->rx_ring_num);
975 val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
978 mem_share = (mem_size / config->rx_ring_num);
979 val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
982 mem_share = (mem_size / config->rx_ring_num);
983 val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
986 mem_share = (mem_size / config->rx_ring_num);
987 val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
990 mem_share = (mem_size / config->rx_ring_num);
991 val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
994 mem_share = (mem_size / config->rx_ring_num);
995 val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
999 writeq(val64, &bar0->rx_queue_cfg);
1002 * Filling Tx round robin registers
1003 * as per the number of FIFOs
1005 switch (config->tx_fifo_num) {
1007 val64 = 0x0000000000000000ULL;
1008 writeq(val64, &bar0->tx_w_round_robin_0);
1009 writeq(val64, &bar0->tx_w_round_robin_1);
1010 writeq(val64, &bar0->tx_w_round_robin_2);
1011 writeq(val64, &bar0->tx_w_round_robin_3);
1012 writeq(val64, &bar0->tx_w_round_robin_4);
1015 val64 = 0x0000010000010000ULL;
1016 writeq(val64, &bar0->tx_w_round_robin_0);
1017 val64 = 0x0100000100000100ULL;
1018 writeq(val64, &bar0->tx_w_round_robin_1);
1019 val64 = 0x0001000001000001ULL;
1020 writeq(val64, &bar0->tx_w_round_robin_2);
1021 val64 = 0x0000010000010000ULL;
1022 writeq(val64, &bar0->tx_w_round_robin_3);
1023 val64 = 0x0100000000000000ULL;
1024 writeq(val64, &bar0->tx_w_round_robin_4);
1027 val64 = 0x0001000102000001ULL;
1028 writeq(val64, &bar0->tx_w_round_robin_0);
1029 val64 = 0x0001020000010001ULL;
1030 writeq(val64, &bar0->tx_w_round_robin_1);
1031 val64 = 0x0200000100010200ULL;
1032 writeq(val64, &bar0->tx_w_round_robin_2);
1033 val64 = 0x0001000102000001ULL;
1034 writeq(val64, &bar0->tx_w_round_robin_3);
1035 val64 = 0x0001020000000000ULL;
1036 writeq(val64, &bar0->tx_w_round_robin_4);
1039 val64 = 0x0001020300010200ULL;
1040 writeq(val64, &bar0->tx_w_round_robin_0);
1041 val64 = 0x0100000102030001ULL;
1042 writeq(val64, &bar0->tx_w_round_robin_1);
1043 val64 = 0x0200010000010203ULL;
1044 writeq(val64, &bar0->tx_w_round_robin_2);
1045 val64 = 0x0001020001000001ULL;
1046 writeq(val64, &bar0->tx_w_round_robin_3);
1047 val64 = 0x0203000100000000ULL;
1048 writeq(val64, &bar0->tx_w_round_robin_4);
1051 val64 = 0x0001000203000102ULL;
1052 writeq(val64, &bar0->tx_w_round_robin_0);
1053 val64 = 0x0001020001030004ULL;
1054 writeq(val64, &bar0->tx_w_round_robin_1);
1055 val64 = 0x0001000203000102ULL;
1056 writeq(val64, &bar0->tx_w_round_robin_2);
1057 val64 = 0x0001020001030004ULL;
1058 writeq(val64, &bar0->tx_w_round_robin_3);
1059 val64 = 0x0001000000000000ULL;
1060 writeq(val64, &bar0->tx_w_round_robin_4);
1063 val64 = 0x0001020304000102ULL;
1064 writeq(val64, &bar0->tx_w_round_robin_0);
1065 val64 = 0x0304050001020001ULL;
1066 writeq(val64, &bar0->tx_w_round_robin_1);
1067 val64 = 0x0203000100000102ULL;
1068 writeq(val64, &bar0->tx_w_round_robin_2);
1069 val64 = 0x0304000102030405ULL;
1070 writeq(val64, &bar0->tx_w_round_robin_3);
1071 val64 = 0x0001000200000000ULL;
1072 writeq(val64, &bar0->tx_w_round_robin_4);
1075 val64 = 0x0001020001020300ULL;
1076 writeq(val64, &bar0->tx_w_round_robin_0);
1077 val64 = 0x0102030400010203ULL;
1078 writeq(val64, &bar0->tx_w_round_robin_1);
1079 val64 = 0x0405060001020001ULL;
1080 writeq(val64, &bar0->tx_w_round_robin_2);
1081 val64 = 0x0304050000010200ULL;
1082 writeq(val64, &bar0->tx_w_round_robin_3);
1083 val64 = 0x0102030000000000ULL;
1084 writeq(val64, &bar0->tx_w_round_robin_4);
1087 val64 = 0x0001020300040105ULL;
1088 writeq(val64, &bar0->tx_w_round_robin_0);
1089 val64 = 0x0200030106000204ULL;
1090 writeq(val64, &bar0->tx_w_round_robin_1);
1091 val64 = 0x0103000502010007ULL;
1092 writeq(val64, &bar0->tx_w_round_robin_2);
1093 val64 = 0x0304010002060500ULL;
1094 writeq(val64, &bar0->tx_w_round_robin_3);
1095 val64 = 0x0103020400000000ULL;
1096 writeq(val64, &bar0->tx_w_round_robin_4);
1100 /* Filling the Rx round robin registers as per the
1101 * number of Rings and steering based on QoS.
1103 switch (config->rx_ring_num) {
1105 val64 = 0x8080808080808080ULL;
1106 writeq(val64, &bar0->rts_qos_steering);
1109 val64 = 0x0000010000010000ULL;
1110 writeq(val64, &bar0->rx_w_round_robin_0);
1111 val64 = 0x0100000100000100ULL;
1112 writeq(val64, &bar0->rx_w_round_robin_1);
1113 val64 = 0x0001000001000001ULL;
1114 writeq(val64, &bar0->rx_w_round_robin_2);
1115 val64 = 0x0000010000010000ULL;
1116 writeq(val64, &bar0->rx_w_round_robin_3);
1117 val64 = 0x0100000000000000ULL;
1118 writeq(val64, &bar0->rx_w_round_robin_4);
1120 val64 = 0x8080808040404040ULL;
1121 writeq(val64, &bar0->rts_qos_steering);
1124 val64 = 0x0001000102000001ULL;
1125 writeq(val64, &bar0->rx_w_round_robin_0);
1126 val64 = 0x0001020000010001ULL;
1127 writeq(val64, &bar0->rx_w_round_robin_1);
1128 val64 = 0x0200000100010200ULL;
1129 writeq(val64, &bar0->rx_w_round_robin_2);
1130 val64 = 0x0001000102000001ULL;
1131 writeq(val64, &bar0->rx_w_round_robin_3);
1132 val64 = 0x0001020000000000ULL;
1133 writeq(val64, &bar0->rx_w_round_robin_4);
1135 val64 = 0x8080804040402020ULL;
1136 writeq(val64, &bar0->rts_qos_steering);
1139 val64 = 0x0001020300010200ULL;
1140 writeq(val64, &bar0->rx_w_round_robin_0);
1141 val64 = 0x0100000102030001ULL;
1142 writeq(val64, &bar0->rx_w_round_robin_1);
1143 val64 = 0x0200010000010203ULL;
1144 writeq(val64, &bar0->rx_w_round_robin_2);
1145 val64 = 0x0001020001000001ULL;
1146 writeq(val64, &bar0->rx_w_round_robin_3);
1147 val64 = 0x0203000100000000ULL;
1148 writeq(val64, &bar0->rx_w_round_robin_4);
1150 val64 = 0x8080404020201010ULL;
1151 writeq(val64, &bar0->rts_qos_steering);
1154 val64 = 0x0001000203000102ULL;
1155 writeq(val64, &bar0->rx_w_round_robin_0);
1156 val64 = 0x0001020001030004ULL;
1157 writeq(val64, &bar0->rx_w_round_robin_1);
1158 val64 = 0x0001000203000102ULL;
1159 writeq(val64, &bar0->rx_w_round_robin_2);
1160 val64 = 0x0001020001030004ULL;
1161 writeq(val64, &bar0->rx_w_round_robin_3);
1162 val64 = 0x0001000000000000ULL;
1163 writeq(val64, &bar0->rx_w_round_robin_4);
1165 val64 = 0x8080404020201008ULL;
1166 writeq(val64, &bar0->rts_qos_steering);
1169 val64 = 0x0001020304000102ULL;
1170 writeq(val64, &bar0->rx_w_round_robin_0);
1171 val64 = 0x0304050001020001ULL;
1172 writeq(val64, &bar0->rx_w_round_robin_1);
1173 val64 = 0x0203000100000102ULL;
1174 writeq(val64, &bar0->rx_w_round_robin_2);
1175 val64 = 0x0304000102030405ULL;
1176 writeq(val64, &bar0->rx_w_round_robin_3);
1177 val64 = 0x0001000200000000ULL;
1178 writeq(val64, &bar0->rx_w_round_robin_4);
1180 val64 = 0x8080404020100804ULL;
1181 writeq(val64, &bar0->rts_qos_steering);
1184 val64 = 0x0001020001020300ULL;
1185 writeq(val64, &bar0->rx_w_round_robin_0);
1186 val64 = 0x0102030400010203ULL;
1187 writeq(val64, &bar0->rx_w_round_robin_1);
1188 val64 = 0x0405060001020001ULL;
1189 writeq(val64, &bar0->rx_w_round_robin_2);
1190 val64 = 0x0304050000010200ULL;
1191 writeq(val64, &bar0->rx_w_round_robin_3);
1192 val64 = 0x0102030000000000ULL;
1193 writeq(val64, &bar0->rx_w_round_robin_4);
1195 val64 = 0x8080402010080402ULL;
1196 writeq(val64, &bar0->rts_qos_steering);
1199 val64 = 0x0001020300040105ULL;
1200 writeq(val64, &bar0->rx_w_round_robin_0);
1201 val64 = 0x0200030106000204ULL;
1202 writeq(val64, &bar0->rx_w_round_robin_1);
1203 val64 = 0x0103000502010007ULL;
1204 writeq(val64, &bar0->rx_w_round_robin_2);
1205 val64 = 0x0304010002060500ULL;
1206 writeq(val64, &bar0->rx_w_round_robin_3);
1207 val64 = 0x0103020400000000ULL;
1208 writeq(val64, &bar0->rx_w_round_robin_4);
1210 val64 = 0x8040201008040201ULL;
1211 writeq(val64, &bar0->rts_qos_steering);
1217 for (i = 0; i < 8; i++)
1218 writeq(val64, &bar0->rts_frm_len_n[i]);
1220 /* Set the default rts frame length for the rings configured */
1221 val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1222 for (i = 0 ; i < config->rx_ring_num ; i++)
1223 writeq(val64, &bar0->rts_frm_len_n[i]);
1225 /* Set the frame length for the configured rings
1226 * desired by the user
1228 for (i = 0; i < config->rx_ring_num; i++) {
1229 /* If rts_frm_len[i] == 0 then it is assumed that user not
1230 * specified frame length steering.
1231 * If the user provides the frame length then program
1232 * the rts_frm_len register for those values or else
1233 * leave it as it is.
1235 if (rts_frm_len[i] != 0) {
1236 writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
1237 &bar0->rts_frm_len_n[i]);
1241 /* Program statistics memory */
1242 writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1244 if (nic->device_type == XFRAME_II_DEVICE) {
1245 val64 = STAT_BC(0x320);
1246 writeq(val64, &bar0->stat_byte_cnt);
1250 * Initializing the sampling rate for the device to calculate the
1251 * bandwidth utilization.
1253 val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
1254 MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1255 writeq(val64, &bar0->mac_link_util);
1259 * Initializing the Transmit and Receive Traffic Interrupt
1263 * TTI Initialization. Default Tx timer gets us about
1264 * 250 interrupts per sec. Continuous interrupts are enabled
1267 if (nic->device_type == XFRAME_II_DEVICE) {
1268 int count = (nic->config.bus_speed * 125)/2;
1269 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count);
1272 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
1274 val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) |
1275 TTI_DATA1_MEM_TX_URNG_B(0x10) |
1276 TTI_DATA1_MEM_TX_URNG_C(0x30) | TTI_DATA1_MEM_TX_TIMER_AC_EN;
1277 if (use_continuous_tx_intrs)
1278 val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
1279 writeq(val64, &bar0->tti_data1_mem);
1281 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1282 TTI_DATA2_MEM_TX_UFC_B(0x20) |
1283 TTI_DATA2_MEM_TX_UFC_C(0x70) | TTI_DATA2_MEM_TX_UFC_D(0x80);
1284 writeq(val64, &bar0->tti_data2_mem);
1286 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1287 writeq(val64, &bar0->tti_command_mem);
1290 * Once the operation completes, the Strobe bit of the command
1291 * register will be reset. We poll for this particular condition
1292 * We wait for a maximum of 500ms for the operation to complete,
1293 * if it's not complete by then we return error.
1297 val64 = readq(&bar0->tti_command_mem);
1298 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1302 DBG_PRINT(ERR_DBG, "%s: TTI init Failed\n",
1310 if (nic->config.bimodal) {
1312 for (k = 0; k < config->rx_ring_num; k++) {
1313 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1314 val64 |= TTI_CMD_MEM_OFFSET(0x38+k);
1315 writeq(val64, &bar0->tti_command_mem);
1318 * Once the operation completes, the Strobe bit of the command
1319 * register will be reset. We poll for this particular condition
1320 * We wait for a maximum of 500ms for the operation to complete,
1321 * if it's not complete by then we return error.
1325 val64 = readq(&bar0->tti_command_mem);
1326 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1331 "%s: TTI init Failed\n",
1341 /* RTI Initialization */
1342 if (nic->device_type == XFRAME_II_DEVICE) {
1344 * Programmed to generate Apprx 500 Intrs per
1347 int count = (nic->config.bus_speed * 125)/4;
1348 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count);
1350 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
1352 val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) |
1353 RTI_DATA1_MEM_RX_URNG_B(0x10) |
1354 RTI_DATA1_MEM_RX_URNG_C(0x30) | RTI_DATA1_MEM_RX_TIMER_AC_EN;
1356 writeq(val64, &bar0->rti_data1_mem);
1358 val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
1359 RTI_DATA2_MEM_RX_UFC_B(0x2) |
1360 RTI_DATA2_MEM_RX_UFC_C(0x40) | RTI_DATA2_MEM_RX_UFC_D(0x80);
1361 writeq(val64, &bar0->rti_data2_mem);
1363 for (i = 0; i < config->rx_ring_num; i++) {
1364 val64 = RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE_NEW_CMD
1365 | RTI_CMD_MEM_OFFSET(i);
1366 writeq(val64, &bar0->rti_command_mem);
1369 * Once the operation completes, the Strobe bit of the
1370 * command register will be reset. We poll for this
1371 * particular condition. We wait for a maximum of 500ms
1372 * for the operation to complete, if it's not complete
1373 * by then we return error.
1377 val64 = readq(&bar0->rti_command_mem);
1378 if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD)) {
1382 DBG_PRINT(ERR_DBG, "%s: RTI init Failed\n",
1393 * Initializing proper values as Pause threshold into all
1394 * the 8 Queues on Rx side.
1396 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1397 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1399 /* Disable RMAC PAD STRIPPING */
1400 add = (void *) &bar0->mac_cfg;
1401 val64 = readq(&bar0->mac_cfg);
1402 val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1403 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1404 writel((u32) (val64), add);
1405 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1406 writel((u32) (val64 >> 32), (add + 4));
1407 val64 = readq(&bar0->mac_cfg);
1410 * Set the time value to be inserted in the pause frame
1411 * generated by xena.
1413 val64 = readq(&bar0->rmac_pause_cfg);
1414 val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1415 val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1416 writeq(val64, &bar0->rmac_pause_cfg);
1419 * Set the Threshold Limit for Generating the pause frame
1420 * If the amount of data in any Queue exceeds ratio of
1421 * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1422 * pause frame is generated
1425 for (i = 0; i < 4; i++) {
1427 (((u64) 0xFF00 | nic->mac_control.
1428 mc_pause_threshold_q0q3)
1431 writeq(val64, &bar0->mc_pause_thresh_q0q3);
1434 for (i = 0; i < 4; i++) {
1436 (((u64) 0xFF00 | nic->mac_control.
1437 mc_pause_threshold_q4q7)
1440 writeq(val64, &bar0->mc_pause_thresh_q4q7);
1443 * TxDMA will stop Read request if the number of read split has
1444 * exceeded the limit pointed by shared_splits
1446 val64 = readq(&bar0->pic_control);
1447 val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1448 writeq(val64, &bar0->pic_control);
1451 * Programming the Herc to split every write transaction
1452 * that does not start on an ADB to reduce disconnects.
1454 if (nic->device_type == XFRAME_II_DEVICE) {
1455 val64 = WREQ_SPLIT_MASK_SET_MASK(255);
1456 writeq(val64, &bar0->wreq_split_mask);
1459 /* Setting Link stability period to 64 ms */
1460 if (nic->device_type == XFRAME_II_DEVICE) {
1461 val64 = MISC_LINK_STABILITY_PRD(3);
1462 writeq(val64, &bar0->misc_control);
1467 #define LINK_UP_DOWN_INTERRUPT 1
1468 #define MAC_RMAC_ERR_TIMER 2
1470 #if defined(CONFIG_MSI_MODE) || defined(CONFIG_MSIX_MODE)
1471 #define s2io_link_fault_indication(x) MAC_RMAC_ERR_TIMER
1473 int s2io_link_fault_indication(nic_t *nic)
1475 if (nic->device_type == XFRAME_II_DEVICE)
1476 return LINK_UP_DOWN_INTERRUPT;
1478 return MAC_RMAC_ERR_TIMER;
1483 * en_dis_able_nic_intrs - Enable or Disable the interrupts
1484 * @nic: device private variable,
1485 * @mask: A mask indicating which Intr block must be modified and,
1486 * @flag: A flag indicating whether to enable or disable the Intrs.
1487 * Description: This function will either disable or enable the interrupts
1488 * depending on the flag argument. The mask argument can be used to
1489 * enable/disable any Intr block.
1490 * Return Value: NONE.
1493 static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1495 XENA_dev_config_t __iomem *bar0 = nic->bar0;
1496 register u64 val64 = 0, temp64 = 0;
1498 /* Top level interrupt classification */
1499 /* PIC Interrupts */
1500 if ((mask & (TX_PIC_INTR | RX_PIC_INTR))) {
1501 /* Enable PIC Intrs in the general intr mask register */
1502 val64 = TXPIC_INT_M | PIC_RX_INT_M;
1503 if (flag == ENABLE_INTRS) {
1504 temp64 = readq(&bar0->general_int_mask);
1505 temp64 &= ~((u64) val64);
1506 writeq(temp64, &bar0->general_int_mask);
1508 * If Hercules adapter enable GPIO otherwise
1509 * disabled all PCIX, Flash, MDIO, IIC and GPIO
1510 * interrupts for now.
1513 if (s2io_link_fault_indication(nic) ==
1514 LINK_UP_DOWN_INTERRUPT ) {
1515 temp64 = readq(&bar0->pic_int_mask);
1516 temp64 &= ~((u64) PIC_INT_GPIO);
1517 writeq(temp64, &bar0->pic_int_mask);
1518 temp64 = readq(&bar0->gpio_int_mask);
1519 temp64 &= ~((u64) GPIO_INT_MASK_LINK_UP);
1520 writeq(temp64, &bar0->gpio_int_mask);
1522 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1525 * No MSI Support is available presently, so TTI and
1526 * RTI interrupts are also disabled.
1528 } else if (flag == DISABLE_INTRS) {
1530 * Disable PIC Intrs in the general
1531 * intr mask register
1533 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1534 temp64 = readq(&bar0->general_int_mask);
1536 writeq(val64, &bar0->general_int_mask);
1540 /* DMA Interrupts */
1541 /* Enabling/Disabling Tx DMA interrupts */
1542 if (mask & TX_DMA_INTR) {
1543 /* Enable TxDMA Intrs in the general intr mask register */
1544 val64 = TXDMA_INT_M;
1545 if (flag == ENABLE_INTRS) {
1546 temp64 = readq(&bar0->general_int_mask);
1547 temp64 &= ~((u64) val64);
1548 writeq(temp64, &bar0->general_int_mask);
1550 * Keep all interrupts other than PFC interrupt
1551 * and PCC interrupt disabled in DMA level.
1553 val64 = DISABLE_ALL_INTRS & ~(TXDMA_PFC_INT_M |
1555 writeq(val64, &bar0->txdma_int_mask);
1557 * Enable only the MISC error 1 interrupt in PFC block
1559 val64 = DISABLE_ALL_INTRS & (~PFC_MISC_ERR_1);
1560 writeq(val64, &bar0->pfc_err_mask);
1562 * Enable only the FB_ECC error interrupt in PCC block
1564 val64 = DISABLE_ALL_INTRS & (~PCC_FB_ECC_ERR);
1565 writeq(val64, &bar0->pcc_err_mask);
1566 } else if (flag == DISABLE_INTRS) {
1568 * Disable TxDMA Intrs in the general intr mask
1571 writeq(DISABLE_ALL_INTRS, &bar0->txdma_int_mask);
1572 writeq(DISABLE_ALL_INTRS, &bar0->pfc_err_mask);
1573 temp64 = readq(&bar0->general_int_mask);
1575 writeq(val64, &bar0->general_int_mask);
1579 /* Enabling/Disabling Rx DMA interrupts */
1580 if (mask & RX_DMA_INTR) {
1581 /* Enable RxDMA Intrs in the general intr mask register */
1582 val64 = RXDMA_INT_M;
1583 if (flag == ENABLE_INTRS) {
1584 temp64 = readq(&bar0->general_int_mask);
1585 temp64 &= ~((u64) val64);
1586 writeq(temp64, &bar0->general_int_mask);
1588 * All RxDMA block interrupts are disabled for now
1591 writeq(DISABLE_ALL_INTRS, &bar0->rxdma_int_mask);
1592 } else if (flag == DISABLE_INTRS) {
1594 * Disable RxDMA Intrs in the general intr mask
1597 writeq(DISABLE_ALL_INTRS, &bar0->rxdma_int_mask);
1598 temp64 = readq(&bar0->general_int_mask);
1600 writeq(val64, &bar0->general_int_mask);
1604 /* MAC Interrupts */
1605 /* Enabling/Disabling MAC interrupts */
1606 if (mask & (TX_MAC_INTR | RX_MAC_INTR)) {
1607 val64 = TXMAC_INT_M | RXMAC_INT_M;
1608 if (flag == ENABLE_INTRS) {
1609 temp64 = readq(&bar0->general_int_mask);
1610 temp64 &= ~((u64) val64);
1611 writeq(temp64, &bar0->general_int_mask);
1613 * All MAC block error interrupts are disabled for now
1616 } else if (flag == DISABLE_INTRS) {
1618 * Disable MAC Intrs in the general intr mask register
1620 writeq(DISABLE_ALL_INTRS, &bar0->mac_int_mask);
1621 writeq(DISABLE_ALL_INTRS,
1622 &bar0->mac_rmac_err_mask);
1624 temp64 = readq(&bar0->general_int_mask);
1626 writeq(val64, &bar0->general_int_mask);
1630 /* XGXS Interrupts */
1631 if (mask & (TX_XGXS_INTR | RX_XGXS_INTR)) {
1632 val64 = TXXGXS_INT_M | RXXGXS_INT_M;
1633 if (flag == ENABLE_INTRS) {
1634 temp64 = readq(&bar0->general_int_mask);
1635 temp64 &= ~((u64) val64);
1636 writeq(temp64, &bar0->general_int_mask);
1638 * All XGXS block error interrupts are disabled for now
1641 writeq(DISABLE_ALL_INTRS, &bar0->xgxs_int_mask);
1642 } else if (flag == DISABLE_INTRS) {
1644 * Disable MC Intrs in the general intr mask register
1646 writeq(DISABLE_ALL_INTRS, &bar0->xgxs_int_mask);
1647 temp64 = readq(&bar0->general_int_mask);
1649 writeq(val64, &bar0->general_int_mask);
1653 /* Memory Controller(MC) interrupts */
1654 if (mask & MC_INTR) {
1656 if (flag == ENABLE_INTRS) {
1657 temp64 = readq(&bar0->general_int_mask);
1658 temp64 &= ~((u64) val64);
1659 writeq(temp64, &bar0->general_int_mask);
1661 * Enable all MC Intrs.
1663 writeq(0x0, &bar0->mc_int_mask);
1664 writeq(0x0, &bar0->mc_err_mask);
1665 } else if (flag == DISABLE_INTRS) {
1667 * Disable MC Intrs in the general intr mask register
1669 writeq(DISABLE_ALL_INTRS, &bar0->mc_int_mask);
1670 temp64 = readq(&bar0->general_int_mask);
1672 writeq(val64, &bar0->general_int_mask);
1677 /* Tx traffic interrupts */
1678 if (mask & TX_TRAFFIC_INTR) {
1679 val64 = TXTRAFFIC_INT_M;
1680 if (flag == ENABLE_INTRS) {
1681 temp64 = readq(&bar0->general_int_mask);
1682 temp64 &= ~((u64) val64);
1683 writeq(temp64, &bar0->general_int_mask);
1685 * Enable all the Tx side interrupts
1686 * writing 0 Enables all 64 TX interrupt levels
1688 writeq(0x0, &bar0->tx_traffic_mask);
1689 } else if (flag == DISABLE_INTRS) {
1691 * Disable Tx Traffic Intrs in the general intr mask
1694 writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
1695 temp64 = readq(&bar0->general_int_mask);
1697 writeq(val64, &bar0->general_int_mask);
1701 /* Rx traffic interrupts */
1702 if (mask & RX_TRAFFIC_INTR) {
1703 val64 = RXTRAFFIC_INT_M;
1704 if (flag == ENABLE_INTRS) {
1705 temp64 = readq(&bar0->general_int_mask);
1706 temp64 &= ~((u64) val64);
1707 writeq(temp64, &bar0->general_int_mask);
1708 /* writing 0 Enables all 8 RX interrupt levels */
1709 writeq(0x0, &bar0->rx_traffic_mask);
1710 } else if (flag == DISABLE_INTRS) {
1712 * Disable Rx Traffic Intrs in the general intr mask
1715 writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
1716 temp64 = readq(&bar0->general_int_mask);
1718 writeq(val64, &bar0->general_int_mask);
1723 static int check_prc_pcc_state(u64 val64, int flag, int rev_id, int herc)
1727 if (flag == FALSE) {
1728 if ((!herc && (rev_id >= 4)) || herc) {
1729 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) &&
1730 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1731 ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
1735 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) &&
1736 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1737 ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
1742 if ((!herc && (rev_id >= 4)) || herc) {
1743 if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
1744 ADAPTER_STATUS_RMAC_PCC_IDLE) &&
1745 (!(val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ||
1746 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1747 ADAPTER_STATUS_RC_PRC_QUIESCENT))) {
1751 if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
1752 ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) &&
1753 (!(val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ||
1754 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1755 ADAPTER_STATUS_RC_PRC_QUIESCENT))) {
1764 * verify_xena_quiescence - Checks whether the H/W is ready
1765 * @val64 : Value read from adapter status register.
1766 * @flag : indicates if the adapter enable bit was ever written once
1768 * Description: Returns whether the H/W is ready to go or not. Depending
1769 * on whether adapter enable bit was written or not the comparison
1770 * differs and the calling function passes the input argument flag to
1772 * Return: 1 If xena is quiescence
1773 * 0 If Xena is not quiescence
1776 static int verify_xena_quiescence(nic_t *sp, u64 val64, int flag)
1779 u64 tmp64 = ~((u64) val64);
1780 int rev_id = get_xena_rev_id(sp->pdev);
1782 herc = (sp->device_type == XFRAME_II_DEVICE);
1785 (ADAPTER_STATUS_TDMA_READY | ADAPTER_STATUS_RDMA_READY |
1786 ADAPTER_STATUS_PFC_READY | ADAPTER_STATUS_TMAC_BUF_EMPTY |
1787 ADAPTER_STATUS_PIC_QUIESCENT | ADAPTER_STATUS_MC_DRAM_READY |
1788 ADAPTER_STATUS_MC_QUEUES_READY | ADAPTER_STATUS_M_PLL_LOCK |
1789 ADAPTER_STATUS_P_PLL_LOCK))) {
1790 ret = check_prc_pcc_state(val64, flag, rev_id, herc);
1797 * fix_mac_address - Fix for Mac addr problem on Alpha platforms
1798 * @sp: Pointer to device specifc structure
1800 * New procedure to clear mac address reading problems on Alpha platforms
1804 void fix_mac_address(nic_t * sp)
1806 XENA_dev_config_t __iomem *bar0 = sp->bar0;
1810 while (fix_mac[i] != END_SIGN) {
1811 writeq(fix_mac[i++], &bar0->gpio_control);
1813 val64 = readq(&bar0->gpio_control);
1818 * start_nic - Turns the device on
1819 * @nic : device private variable.
1821 * This function actually turns the device on. Before this function is
1822 * called,all Registers are configured from their reset states
1823 * and shared memory is allocated but the NIC is still quiescent. On
1824 * calling this function, the device interrupts are cleared and the NIC is
1825 * literally switched on by writing into the adapter control register.
1827 * SUCCESS on success and -1 on failure.
1830 static int start_nic(struct s2io_nic *nic)
1832 XENA_dev_config_t __iomem *bar0 = nic->bar0;
1833 struct net_device *dev = nic->dev;
1834 register u64 val64 = 0;
1837 mac_info_t *mac_control;
1838 struct config_param *config;
1840 mac_control = &nic->mac_control;
1841 config = &nic->config;
1843 /* PRC Initialization and configuration */
1844 for (i = 0; i < config->rx_ring_num; i++) {
1845 writeq((u64) mac_control->rings[i].rx_blocks[0].block_dma_addr,
1846 &bar0->prc_rxd0_n[i]);
1848 val64 = readq(&bar0->prc_ctrl_n[i]);
1849 if (nic->config.bimodal)
1850 val64 |= PRC_CTRL_BIMODAL_INTERRUPT;
1851 #ifndef CONFIG_2BUFF_MODE
1852 val64 |= PRC_CTRL_RC_ENABLED;
1854 val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
1856 writeq(val64, &bar0->prc_ctrl_n[i]);
1859 #ifdef CONFIG_2BUFF_MODE
1860 /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
1861 val64 = readq(&bar0->rx_pa_cfg);
1862 val64 |= RX_PA_CFG_IGNORE_L2_ERR;
1863 writeq(val64, &bar0->rx_pa_cfg);
1867 * Enabling MC-RLDRAM. After enabling the device, we timeout
1868 * for around 100ms, which is approximately the time required
1869 * for the device to be ready for operation.
1871 val64 = readq(&bar0->mc_rldram_mrs);
1872 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
1873 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
1874 val64 = readq(&bar0->mc_rldram_mrs);
1876 msleep(100); /* Delay by around 100 ms. */
1878 /* Enabling ECC Protection. */
1879 val64 = readq(&bar0->adapter_control);
1880 val64 &= ~ADAPTER_ECC_EN;
1881 writeq(val64, &bar0->adapter_control);
1884 * Clearing any possible Link state change interrupts that
1885 * could have popped up just before Enabling the card.
1887 val64 = readq(&bar0->mac_rmac_err_reg);
1889 writeq(val64, &bar0->mac_rmac_err_reg);
1892 * Verify if the device is ready to be enabled, if so enable
1895 val64 = readq(&bar0->adapter_status);
1896 if (!verify_xena_quiescence(nic, val64, nic->device_enabled_once)) {
1897 DBG_PRINT(ERR_DBG, "%s: device is not ready, ", dev->name);
1898 DBG_PRINT(ERR_DBG, "Adapter status reads: 0x%llx\n",
1899 (unsigned long long) val64);
1903 /* Enable select interrupts */
1904 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR | MC_INTR;
1905 interruptible |= TX_PIC_INTR | RX_PIC_INTR;
1906 interruptible |= TX_MAC_INTR | RX_MAC_INTR;
1908 en_dis_able_nic_intrs(nic, interruptible, ENABLE_INTRS);
1911 * With some switches, link might be already up at this point.
1912 * Because of this weird behavior, when we enable laser,
1913 * we may not get link. We need to handle this. We cannot
1914 * figure out which switch is misbehaving. So we are forced to
1915 * make a global change.
1918 /* Enabling Laser. */
1919 val64 = readq(&bar0->adapter_control);
1920 val64 |= ADAPTER_EOI_TX_ON;
1921 writeq(val64, &bar0->adapter_control);
1923 /* SXE-002: Initialize link and activity LED */
1924 subid = nic->pdev->subsystem_device;
1925 if (((subid & 0xFF) >= 0x07) &&
1926 (nic->device_type == XFRAME_I_DEVICE)) {
1927 val64 = readq(&bar0->gpio_control);
1928 val64 |= 0x0000800000000000ULL;
1929 writeq(val64, &bar0->gpio_control);
1930 val64 = 0x0411040400000000ULL;
1931 writeq(val64, (void __iomem *) ((u8 *) bar0 + 0x2700));
1935 * Don't see link state interrupts on certain switches, so
1936 * directly scheduling a link state task from here.
1938 schedule_work(&nic->set_link_task);
1944 * free_tx_buffers - Free all queued Tx buffers
1945 * @nic : device private variable.
1947 * Free all queued Tx buffers.
1948 * Return Value: void
1951 static void free_tx_buffers(struct s2io_nic *nic)
1953 struct net_device *dev = nic->dev;
1954 struct sk_buff *skb;
1957 mac_info_t *mac_control;
1958 struct config_param *config;
1959 int cnt = 0, frg_cnt;
1961 mac_control = &nic->mac_control;
1962 config = &nic->config;
1964 for (i = 0; i < config->tx_fifo_num; i++) {
1965 for (j = 0; j < config->tx_cfg[i].fifo_len - 1; j++) {
1966 txdp = (TxD_t *) mac_control->fifos[i].list_info[j].
1969 (struct sk_buff *) ((unsigned long) txdp->
1972 memset(txdp, 0, sizeof(TxD_t) *
1976 frg_cnt = skb_shinfo(skb)->nr_frags;
1977 pci_unmap_single(nic->pdev, (dma_addr_t)
1978 txdp->Buffer_Pointer,
1979 skb->len - skb->data_len,
1985 for (j = 0; j < frg_cnt; j++, txdp++) {
1987 &skb_shinfo(skb)->frags[j];
1988 pci_unmap_page(nic->pdev,
1998 memset(txdp, 0, sizeof(TxD_t) * config->max_txds);
2002 "%s:forcibly freeing %d skbs on FIFO%d\n",
2004 mac_control->fifos[i].tx_curr_get_info.offset = 0;
2005 mac_control->fifos[i].tx_curr_put_info.offset = 0;
2010 * stop_nic - To stop the nic
2011 * @nic ; device private variable.
2013 * This function does exactly the opposite of what the start_nic()
2014 * function does. This function is called to stop the device.
2019 static void stop_nic(struct s2io_nic *nic)
2021 XENA_dev_config_t __iomem *bar0 = nic->bar0;
2022 register u64 val64 = 0;
2023 u16 interruptible, i;
2024 mac_info_t *mac_control;
2025 struct config_param *config;
2027 mac_control = &nic->mac_control;
2028 config = &nic->config;
2030 /* Disable all interrupts */
2031 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR | MC_INTR;
2032 interruptible |= TX_PIC_INTR | RX_PIC_INTR;
2033 interruptible |= TX_MAC_INTR | RX_MAC_INTR;
2034 en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
2037 for (i = 0; i < config->rx_ring_num; i++) {
2038 val64 = readq(&bar0->prc_ctrl_n[i]);
2039 val64 &= ~((u64) PRC_CTRL_RC_ENABLED);
2040 writeq(val64, &bar0->prc_ctrl_n[i]);
2045 * fill_rx_buffers - Allocates the Rx side skbs
2046 * @nic: device private variable
2047 * @ring_no: ring number
2049 * The function allocates Rx side skbs and puts the physical
2050 * address of these buffers into the RxD buffer pointers, so that the NIC
2051 * can DMA the received frame into these locations.
2052 * The NIC supports 3 receive modes, viz
2054 * 2. three buffer and
2055 * 3. Five buffer modes.
2056 * Each mode defines how many fragments the received frame will be split
2057 * up into by the NIC. The frame is split into L3 header, L4 Header,
2058 * L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
2059 * is split into 3 fragments. As of now only single buffer mode is
2062 * SUCCESS on success or an appropriate -ve value on failure.
2065 int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2067 struct net_device *dev = nic->dev;
2068 struct sk_buff *skb;
2070 int off, off1, size, block_no, block_no1;
2071 int offset, offset1;
2074 mac_info_t *mac_control;
2075 struct config_param *config;
2076 #ifdef CONFIG_2BUFF_MODE
2081 dma_addr_t rxdpphys;
2083 #ifndef CONFIG_S2IO_NAPI
2084 unsigned long flags;
2087 mac_control = &nic->mac_control;
2088 config = &nic->config;
2089 alloc_cnt = mac_control->rings[ring_no].pkt_cnt -
2090 atomic_read(&nic->rx_bufs_left[ring_no]);
2091 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
2092 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
2094 while (alloc_tab < alloc_cnt) {
2095 block_no = mac_control->rings[ring_no].rx_curr_put_info.
2097 block_no1 = mac_control->rings[ring_no].rx_curr_get_info.
2099 off = mac_control->rings[ring_no].rx_curr_put_info.offset;
2100 off1 = mac_control->rings[ring_no].rx_curr_get_info.offset;
2101 #ifndef CONFIG_2BUFF_MODE
2102 offset = block_no * (MAX_RXDS_PER_BLOCK + 1) + off;
2103 offset1 = block_no1 * (MAX_RXDS_PER_BLOCK + 1) + off1;
2105 offset = block_no * (MAX_RXDS_PER_BLOCK) + off;
2106 offset1 = block_no1 * (MAX_RXDS_PER_BLOCK) + off1;
2109 rxdp = mac_control->rings[ring_no].rx_blocks[block_no].
2110 block_virt_addr + off;
2111 if ((offset == offset1) && (rxdp->Host_Control)) {
2112 DBG_PRINT(INTR_DBG, "%s: Get and Put", dev->name);
2113 DBG_PRINT(INTR_DBG, " info equated\n");
2116 #ifndef CONFIG_2BUFF_MODE
2117 if (rxdp->Control_1 == END_OF_BLOCK) {
2118 mac_control->rings[ring_no].rx_curr_put_info.
2120 mac_control->rings[ring_no].rx_curr_put_info.
2121 block_index %= mac_control->rings[ring_no].block_count;
2122 block_no = mac_control->rings[ring_no].rx_curr_put_info.
2125 off %= (MAX_RXDS_PER_BLOCK + 1);
2126 mac_control->rings[ring_no].rx_curr_put_info.offset =
2128 rxdp = (RxD_t *) ((unsigned long) rxdp->Control_2);
2129 DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
2132 #ifndef CONFIG_S2IO_NAPI
2133 spin_lock_irqsave(&nic->put_lock, flags);
2134 mac_control->rings[ring_no].put_pos =
2135 (block_no * (MAX_RXDS_PER_BLOCK + 1)) + off;
2136 spin_unlock_irqrestore(&nic->put_lock, flags);
2139 if (rxdp->Host_Control == END_OF_BLOCK) {
2140 mac_control->rings[ring_no].rx_curr_put_info.
2142 mac_control->rings[ring_no].rx_curr_put_info.block_index
2143 %= mac_control->rings[ring_no].block_count;
2144 block_no = mac_control->rings[ring_no].rx_curr_put_info
2147 DBG_PRINT(INTR_DBG, "%s: block%d at: 0x%llx\n",
2148 dev->name, block_no,
2149 (unsigned long long) rxdp->Control_1);
2150 mac_control->rings[ring_no].rx_curr_put_info.offset =
2152 rxdp = mac_control->rings[ring_no].rx_blocks[block_no].
2155 #ifndef CONFIG_S2IO_NAPI
2156 spin_lock_irqsave(&nic->put_lock, flags);
2157 mac_control->rings[ring_no].put_pos = (block_no *
2158 (MAX_RXDS_PER_BLOCK + 1)) + off;
2159 spin_unlock_irqrestore(&nic->put_lock, flags);
2163 #ifndef CONFIG_2BUFF_MODE
2164 if (rxdp->Control_1 & RXD_OWN_XENA)
2166 if (rxdp->Control_2 & BIT(0))
2169 mac_control->rings[ring_no].rx_curr_put_info.
2173 #ifdef CONFIG_2BUFF_MODE
2175 * RxDs Spanning cache lines will be replenished only
2176 * if the succeeding RxD is also owned by Host. It
2177 * will always be the ((8*i)+3) and ((8*i)+6)
2178 * descriptors for the 48 byte descriptor. The offending
2179 * decsriptor is of-course the 3rd descriptor.
2181 rxdpphys = mac_control->rings[ring_no].rx_blocks[block_no].
2182 block_dma_addr + (off * sizeof(RxD_t));
2183 if (((u64) (rxdpphys)) % 128 > 80) {
2184 rxdpnext = mac_control->rings[ring_no].rx_blocks[block_no].
2185 block_virt_addr + (off + 1);
2186 if (rxdpnext->Host_Control == END_OF_BLOCK) {
2187 nextblk = (block_no + 1) %
2188 (mac_control->rings[ring_no].block_count);
2189 rxdpnext = mac_control->rings[ring_no].rx_blocks
2190 [nextblk].block_virt_addr;
2192 if (rxdpnext->Control_2 & BIT(0))
2197 #ifndef CONFIG_2BUFF_MODE
2198 skb = dev_alloc_skb(size + NET_IP_ALIGN);
2200 skb = dev_alloc_skb(dev->mtu + ALIGN_SIZE + BUF0_LEN + 4);
2203 DBG_PRINT(ERR_DBG, "%s: Out of ", dev->name);
2204 DBG_PRINT(ERR_DBG, "memory to allocate SKBs\n");
2207 #ifndef CONFIG_2BUFF_MODE
2208 skb_reserve(skb, NET_IP_ALIGN);
2209 memset(rxdp, 0, sizeof(RxD_t));
2210 rxdp->Buffer0_ptr = pci_map_single
2211 (nic->pdev, skb->data, size, PCI_DMA_FROMDEVICE);
2212 rxdp->Control_2 &= (~MASK_BUFFER0_SIZE);
2213 rxdp->Control_2 |= SET_BUFFER0_SIZE(size);
2214 rxdp->Host_Control = (unsigned long) (skb);
2215 rxdp->Control_1 |= RXD_OWN_XENA;
2217 off %= (MAX_RXDS_PER_BLOCK + 1);
2218 mac_control->rings[ring_no].rx_curr_put_info.offset = off;
2220 ba = &mac_control->rings[ring_no].ba[block_no][off];
2221 skb_reserve(skb, BUF0_LEN);
2222 tmp = ((unsigned long) skb->data & ALIGN_SIZE);
2224 skb_reserve(skb, (ALIGN_SIZE + 1) - tmp);
2226 memset(rxdp, 0, sizeof(RxD_t));
2227 rxdp->Buffer2_ptr = pci_map_single
2228 (nic->pdev, skb->data, dev->mtu + BUF0_LEN + 4,
2229 PCI_DMA_FROMDEVICE);
2231 pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN,
2232 PCI_DMA_FROMDEVICE);
2234 pci_map_single(nic->pdev, ba->ba_1, BUF1_LEN,
2235 PCI_DMA_FROMDEVICE);
2237 rxdp->Control_2 = SET_BUFFER2_SIZE(dev->mtu + 4);
2238 rxdp->Control_2 |= SET_BUFFER0_SIZE(BUF0_LEN);
2239 rxdp->Control_2 |= SET_BUFFER1_SIZE(1); /* dummy. */
2240 rxdp->Control_2 |= BIT(0); /* Set Buffer_Empty bit. */
2241 rxdp->Host_Control = (u64) ((unsigned long) (skb));
2242 rxdp->Control_1 |= RXD_OWN_XENA;
2244 mac_control->rings[ring_no].rx_curr_put_info.offset = off;
2246 rxdp->Control_2 |= SET_RXD_MARKER;
2248 atomic_inc(&nic->rx_bufs_left[ring_no]);
2257 * free_rx_buffers - Frees all Rx buffers
2258 * @sp: device private variable.
2260 * This function will free all Rx buffers allocated by host.
2265 static void free_rx_buffers(struct s2io_nic *sp)
2267 struct net_device *dev = sp->dev;
2268 int i, j, blk = 0, off, buf_cnt = 0;
2270 struct sk_buff *skb;
2271 mac_info_t *mac_control;
2272 struct config_param *config;
2273 #ifdef CONFIG_2BUFF_MODE
2277 mac_control = &sp->mac_control;
2278 config = &sp->config;
2280 for (i = 0; i < config->rx_ring_num; i++) {
2281 for (j = 0, blk = 0; j < config->rx_cfg[i].num_rxd; j++) {
2282 off = j % (MAX_RXDS_PER_BLOCK + 1);
2283 rxdp = mac_control->rings[i].rx_blocks[blk].
2284 block_virt_addr + off;
2286 #ifndef CONFIG_2BUFF_MODE
2287 if (rxdp->Control_1 == END_OF_BLOCK) {
2289 (RxD_t *) ((unsigned long) rxdp->
2295 if (rxdp->Host_Control == END_OF_BLOCK) {
2301 if (!(rxdp->Control_1 & RXD_OWN_XENA)) {
2302 memset(rxdp, 0, sizeof(RxD_t));
2307 (struct sk_buff *) ((unsigned long) rxdp->
2310 #ifndef CONFIG_2BUFF_MODE
2311 pci_unmap_single(sp->pdev, (dma_addr_t)
2314 HEADER_ETHERNET_II_802_3_SIZE
2315 + HEADER_802_2_SIZE +
2317 PCI_DMA_FROMDEVICE);
2319 ba = &mac_control->rings[i].ba[blk][off];
2320 pci_unmap_single(sp->pdev, (dma_addr_t)
2323 PCI_DMA_FROMDEVICE);
2324 pci_unmap_single(sp->pdev, (dma_addr_t)
2327 PCI_DMA_FROMDEVICE);
2328 pci_unmap_single(sp->pdev, (dma_addr_t)
2330 dev->mtu + BUF0_LEN + 4,
2331 PCI_DMA_FROMDEVICE);
2334 atomic_dec(&sp->rx_bufs_left[i]);
2337 memset(rxdp, 0, sizeof(RxD_t));
2339 mac_control->rings[i].rx_curr_put_info.block_index = 0;
2340 mac_control->rings[i].rx_curr_get_info.block_index = 0;
2341 mac_control->rings[i].rx_curr_put_info.offset = 0;
2342 mac_control->rings[i].rx_curr_get_info.offset = 0;
2343 atomic_set(&sp->rx_bufs_left[i], 0);
2344 DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n",
2345 dev->name, buf_cnt, i);
2350 * s2io_poll - Rx interrupt handler for NAPI support
2351 * @dev : pointer to the device structure.
2352 * @budget : The number of packets that were budgeted to be processed
2353 * during one pass through the 'Poll" function.
2355 * Comes into picture only if NAPI support has been incorporated. It does
2356 * the same thing that rx_intr_handler does, but not in a interrupt context
2357 * also It will process only a given number of packets.
2359 * 0 on success and 1 if there are No Rx packets to be processed.
2362 #if defined(CONFIG_S2IO_NAPI)
2363 static int s2io_poll(struct net_device *dev, int *budget)
2365 nic_t *nic = dev->priv;
2366 int pkt_cnt = 0, org_pkts_to_process;
2367 mac_info_t *mac_control;
2368 struct config_param *config;
2369 XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0;
2373 atomic_inc(&nic->isr_cnt);
2374 mac_control = &nic->mac_control;
2375 config = &nic->config;
2377 nic->pkts_to_process = *budget;
2378 if (nic->pkts_to_process > dev->quota)
2379 nic->pkts_to_process = dev->quota;
2380 org_pkts_to_process = nic->pkts_to_process;
2382 val64 = readq(&bar0->rx_traffic_int);
2383 writeq(val64, &bar0->rx_traffic_int);
2385 for (i = 0; i < config->rx_ring_num; i++) {
2386 rx_intr_handler(&mac_control->rings[i]);
2387 pkt_cnt = org_pkts_to_process - nic->pkts_to_process;
2388 if (!nic->pkts_to_process) {
2389 /* Quota for the current iteration has been met */
2396 dev->quota -= pkt_cnt;
2398 netif_rx_complete(dev);
2400 for (i = 0; i < config->rx_ring_num; i++) {
2401 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2402 DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2403 DBG_PRINT(ERR_DBG, " in Rx Poll!!\n");
2407 /* Re enable the Rx interrupts. */
2408 en_dis_able_nic_intrs(nic, RX_TRAFFIC_INTR, ENABLE_INTRS);
2409 atomic_dec(&nic->isr_cnt);
2413 dev->quota -= pkt_cnt;
2416 for (i = 0; i < config->rx_ring_num; i++) {
2417 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2418 DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2419 DBG_PRINT(ERR_DBG, " in Rx Poll!!\n");
2423 atomic_dec(&nic->isr_cnt);
2429 * rx_intr_handler - Rx interrupt handler
2430 * @nic: device private variable.
2432 * If the interrupt is because of a received frame or if the
2433 * receive ring contains fresh as yet un-processed frames,this function is
2434 * called. It picks out the RxD at which place the last Rx processing had
2435 * stopped and sends the skb to the OSM's Rx handler and then increments
2440 static void rx_intr_handler(ring_info_t *ring_data)
2442 nic_t *nic = ring_data->nic;
2443 struct net_device *dev = (struct net_device *) nic->dev;
2444 int get_block, get_offset, put_block, put_offset, ring_bufs;
2445 rx_curr_get_info_t get_info, put_info;
2447 struct sk_buff *skb;
2448 #ifndef CONFIG_S2IO_NAPI
2451 spin_lock(&nic->rx_lock);
2452 if (atomic_read(&nic->card_state) == CARD_DOWN) {
2453 DBG_PRINT(ERR_DBG, "%s: %s going down for reset\n",
2454 __FUNCTION__, dev->name);
2455 spin_unlock(&nic->rx_lock);
2458 get_info = ring_data->rx_curr_get_info;
2459 get_block = get_info.block_index;
2460 put_info = ring_data->rx_curr_put_info;
2461 put_block = put_info.block_index;
2462 ring_bufs = get_info.ring_len+1;
2463 rxdp = ring_data->rx_blocks[get_block].block_virt_addr +
2465 get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
2467 #ifndef CONFIG_S2IO_NAPI
2468 spin_lock(&nic->put_lock);
2469 put_offset = ring_data->put_pos;
2470 spin_unlock(&nic->put_lock);
2472 put_offset = (put_block * (MAX_RXDS_PER_BLOCK + 1)) +
2475 while (RXD_IS_UP2DT(rxdp) &&
2476 (((get_offset + 1) % ring_bufs) != put_offset)) {
2477 skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control);
2479 DBG_PRINT(ERR_DBG, "%s: The skb is ",
2481 DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
2482 spin_unlock(&nic->rx_lock);
2485 #ifndef CONFIG_2BUFF_MODE
2486 pci_unmap_single(nic->pdev, (dma_addr_t)
2489 HEADER_ETHERNET_II_802_3_SIZE +
2492 PCI_DMA_FROMDEVICE);
2494 pci_unmap_single(nic->pdev, (dma_addr_t)
2496 BUF0_LEN, PCI_DMA_FROMDEVICE);
2497 pci_unmap_single(nic->pdev, (dma_addr_t)
2499 BUF1_LEN, PCI_DMA_FROMDEVICE);
2500 pci_unmap_single(nic->pdev, (dma_addr_t)
2502 dev->mtu + BUF0_LEN + 4,
2503 PCI_DMA_FROMDEVICE);
2505 rx_osm_handler(ring_data, rxdp);
2507 ring_data->rx_curr_get_info.offset =
2509 rxdp = ring_data->rx_blocks[get_block].block_virt_addr +
2511 if (get_info.offset &&
2512 (!(get_info.offset % MAX_RXDS_PER_BLOCK))) {
2513 get_info.offset = 0;
2514 ring_data->rx_curr_get_info.offset
2517 get_block %= ring_data->block_count;
2518 ring_data->rx_curr_get_info.block_index
2520 rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
2523 get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
2525 #ifdef CONFIG_S2IO_NAPI
2526 nic->pkts_to_process -= 1;
2527 if (!nic->pkts_to_process)
2531 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
2535 spin_unlock(&nic->rx_lock);
2539 * tx_intr_handler - Transmit interrupt handler
2540 * @nic : device private variable
2542 * If an interrupt was raised to indicate DMA complete of the
2543 * Tx packet, this function is called. It identifies the last TxD
2544 * whose buffer was freed and frees all skbs whose data have already
2545 * DMA'ed into the NICs internal memory.
2550 static void tx_intr_handler(fifo_info_t *fifo_data)
2552 nic_t *nic = fifo_data->nic;
2553 struct net_device *dev = (struct net_device *) nic->dev;
2554 tx_curr_get_info_t get_info, put_info;
2555 struct sk_buff *skb;
2559 get_info = fifo_data->tx_curr_get_info;
2560 put_info = fifo_data->tx_curr_put_info;
2561 txdlp = (TxD_t *) fifo_data->list_info[get_info.offset].
2563 while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
2564 (get_info.offset != put_info.offset) &&
2565 (txdlp->Host_Control)) {
2566 /* Check for TxD errors */
2567 if (txdlp->Control_1 & TXD_T_CODE) {
2568 unsigned long long err;
2569 err = txdlp->Control_1 & TXD_T_CODE;
2570 DBG_PRINT(ERR_DBG, "***TxD error %llx\n",
2574 skb = (struct sk_buff *) ((unsigned long)
2575 txdlp->Host_Control);
2577 DBG_PRINT(ERR_DBG, "%s: Null skb ",
2579 DBG_PRINT(ERR_DBG, "in Tx Free Intr\n");
2583 frg_cnt = skb_shinfo(skb)->nr_frags;
2584 nic->tx_pkt_count++;
2586 pci_unmap_single(nic->pdev, (dma_addr_t)
2587 txdlp->Buffer_Pointer,
2588 skb->len - skb->data_len,
2594 for (j = 0; j < frg_cnt; j++, txdlp++) {
2596 &skb_shinfo(skb)->frags[j];
2597 if (!txdlp->Buffer_Pointer)
2599 pci_unmap_page(nic->pdev,
2609 (sizeof(TxD_t) * fifo_data->max_txds));
2611 /* Updating the statistics block */
2612 nic->stats.tx_bytes += skb->len;
2613 dev_kfree_skb_irq(skb);
2616 get_info.offset %= get_info.fifo_len + 1;
2617 txdlp = (TxD_t *) fifo_data->list_info
2618 [get_info.offset].list_virt_addr;
2619 fifo_data->tx_curr_get_info.offset =
2623 spin_lock(&nic->tx_lock);
2624 if (netif_queue_stopped(dev))
2625 netif_wake_queue(dev);
2626 spin_unlock(&nic->tx_lock);
2630 * alarm_intr_handler - Alarm Interrrupt handler
2631 * @nic: device private variable
2632 * Description: If the interrupt was neither because of Rx packet or Tx
2633 * complete, this function is called. If the interrupt was to indicate
2634 * a loss of link, the OSM link status handler is invoked for any other
2635 * alarm interrupt the block that raised the interrupt is displayed
2636 * and a H/W reset is issued.
2641 static void alarm_intr_handler(struct s2io_nic *nic)
2643 struct net_device *dev = (struct net_device *) nic->dev;
2644 XENA_dev_config_t __iomem *bar0 = nic->bar0;
2645 register u64 val64 = 0, err_reg = 0;
2647 /* Handling link status change error Intr */
2648 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
2649 err_reg = readq(&bar0->mac_rmac_err_reg);
2650 writeq(err_reg, &bar0->mac_rmac_err_reg);
2651 if (err_reg & RMAC_LINK_STATE_CHANGE_INT) {
2652 schedule_work(&nic->set_link_task);
2656 /* Handling Ecc errors */
2657 val64 = readq(&bar0->mc_err_reg);
2658 writeq(val64, &bar0->mc_err_reg);
2659 if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
2660 if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
2661 nic->mac_control.stats_info->sw_stat.
2663 DBG_PRINT(ERR_DBG, "%s: Device indicates ",
2665 DBG_PRINT(ERR_DBG, "double ECC error!!\n");
2666 netif_stop_queue(dev);
2667 schedule_work(&nic->rst_timer_task);
2669 nic->mac_control.stats_info->sw_stat.
2674 /* In case of a serious error, the device will be Reset. */
2675 val64 = readq(&bar0->serr_source);
2676 if (val64 & SERR_SOURCE_ANY) {
2677 DBG_PRINT(ERR_DBG, "%s: Device indicates ", dev->name);
2678 DBG_PRINT(ERR_DBG, "serious error!!\n");
2679 netif_stop_queue(dev);
2680 schedule_work(&nic->rst_timer_task);
2684 * Also as mentioned in the latest Errata sheets if the PCC_FB_ECC
2685 * Error occurs, the adapter will be recycled by disabling the
2686 * adapter enable bit and enabling it again after the device
2687 * becomes Quiescent.
2689 val64 = readq(&bar0->pcc_err_reg);
2690 writeq(val64, &bar0->pcc_err_reg);
2691 if (val64 & PCC_FB_ECC_DB_ERR) {
2692 u64 ac = readq(&bar0->adapter_control);
2693 ac &= ~(ADAPTER_CNTL_EN);
2694 writeq(ac, &bar0->adapter_control);
2695 ac = readq(&bar0->adapter_control);
2696 schedule_work(&nic->set_link_task);
2699 /* Other type of interrupts are not being handled now, TODO */
2703 * wait_for_cmd_complete - waits for a command to complete.
2704 * @sp : private member of the device structure, which is a pointer to the
2705 * s2io_nic structure.
2706 * Description: Function that waits for a command to Write into RMAC
2707 * ADDR DATA registers to be completed and returns either success or
2708 * error depending on whether the command was complete or not.
2710 * SUCCESS on success and FAILURE on failure.
2713 int wait_for_cmd_complete(nic_t * sp)
2715 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2716 int ret = FAILURE, cnt = 0;
2720 val64 = readq(&bar0->rmac_addr_cmd_mem);
2721 if (!(val64 & RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING)) {
2734 * s2io_reset - Resets the card.
2735 * @sp : private member of the device structure.
2736 * Description: Function to Reset the card. This function then also
2737 * restores the previously saved PCI configuration space registers as
2738 * the card reset also resets the configuration space.
2743 void s2io_reset(nic_t * sp)
2745 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2749 /* Back up the PCI-X CMD reg, dont want to lose MMRBC, OST settings */
2750 if (sp->device_type == XFRAME_I_DEVICE)
2751 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd));
2753 val64 = SW_RESET_ALL;
2754 writeq(val64, &bar0->sw_reset);
2757 * At this stage, if the PCI write is indeed completed, the
2758 * card is reset and so is the PCI Config space of the device.
2759 * So a read cannot be issued at this stage on any of the
2760 * registers to ensure the write into "sw_reset" register
2762 * Question: Is there any system call that will explicitly force
2763 * all the write commands still pending on the bus to be pushed
2765 * As of now I'am just giving a 250ms delay and hoping that the
2766 * PCI write to sw_reset register is done by this time.
2770 if (!(sp->device_type & XFRAME_II_DEVICE)) {
2771 /* Restore the PCI state saved during initializarion. */
2772 pci_restore_state(sp->pdev);
2773 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
2776 pci_set_master(sp->pdev);
2782 /* Set swapper to enable I/O register access */
2783 s2io_set_swapper(sp);
2785 /* Clear certain PCI/PCI-X fields after reset */
2786 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
2787 pci_cmd &= 0x7FFF; /* Clear parity err detect bit */
2788 pci_write_config_word(sp->pdev, PCI_COMMAND, pci_cmd);
2790 val64 = readq(&bar0->txpic_int_reg);
2791 val64 &= ~BIT(62); /* Clearing PCI_STATUS error reflected here */
2792 writeq(val64, &bar0->txpic_int_reg);
2794 /* Clearing PCIX Ecc status register */
2795 pci_write_config_dword(sp->pdev, 0x68, 0);
2797 /* Reset device statistics maintained by OS */
2798 memset(&sp->stats, 0, sizeof (struct net_device_stats));
2800 /* SXE-002: Configure link and activity LED to turn it off */
2801 subid = sp->pdev->subsystem_device;
2802 if (((subid & 0xFF) >= 0x07) &&
2803 (sp->device_type == XFRAME_I_DEVICE)) {
2804 val64 = readq(&bar0->gpio_control);
2805 val64 |= 0x0000800000000000ULL;
2806 writeq(val64, &bar0->gpio_control);
2807 val64 = 0x0411040400000000ULL;
2808 writeq(val64, (void __iomem *) ((u8 *) bar0 + 0x2700));
2812 * Clear spurious ECC interrupts that would have occured on
2813 * XFRAME II cards after reset.
2815 if (sp->device_type == XFRAME_II_DEVICE) {
2816 val64 = readq(&bar0->pcc_err_reg);
2817 writeq(val64, &bar0->pcc_err_reg);
2820 sp->device_enabled_once = FALSE;
2824 * s2io_set_swapper - to set the swapper controle on the card
2825 * @sp : private member of the device structure,
2826 * pointer to the s2io_nic structure.
2827 * Description: Function to set the swapper control on the card
2828 * correctly depending on the 'endianness' of the system.
2830 * SUCCESS on success and FAILURE on failure.
2833 int s2io_set_swapper(nic_t * sp)
2835 struct net_device *dev = sp->dev;
2836 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2837 u64 val64, valt, valr;
2840 * Set proper endian settings and verify the same by reading
2841 * the PIF Feed-back register.
2844 val64 = readq(&bar0->pif_rd_swapper_fb);
2845 if (val64 != 0x0123456789ABCDEFULL) {
2847 u64 value[] = { 0xC30000C3C30000C3ULL, /* FE=1, SE=1 */
2848 0x8100008181000081ULL, /* FE=1, SE=0 */
2849 0x4200004242000042ULL, /* FE=0, SE=1 */
2850 0}; /* FE=0, SE=0 */
2853 writeq(value[i], &bar0->swapper_ctrl);
2854 val64 = readq(&bar0->pif_rd_swapper_fb);
2855 if (val64 == 0x0123456789ABCDEFULL)
2860 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
2862 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
2863 (unsigned long long) val64);
2868 valr = readq(&bar0->swapper_ctrl);
2871 valt = 0x0123456789ABCDEFULL;
2872 writeq(valt, &bar0->xmsi_address);
2873 val64 = readq(&bar0->xmsi_address);
2877 u64 value[] = { 0x00C3C30000C3C300ULL, /* FE=1, SE=1 */
2878 0x0081810000818100ULL, /* FE=1, SE=0 */
2879 0x0042420000424200ULL, /* FE=0, SE=1 */
2880 0}; /* FE=0, SE=0 */
2883 writeq((value[i] | valr), &bar0->swapper_ctrl);
2884 writeq(valt, &bar0->xmsi_address);
2885 val64 = readq(&bar0->xmsi_address);
2891 unsigned long long x = val64;
2892 DBG_PRINT(ERR_DBG, "Write failed, Xmsi_addr ");
2893 DBG_PRINT(ERR_DBG, "reads:0x%llx\n", x);
2897 val64 = readq(&bar0->swapper_ctrl);
2898 val64 &= 0xFFFF000000000000ULL;
2902 * The device by default set to a big endian format, so a
2903 * big endian driver need not set anything.
2905 val64 |= (SWAPPER_CTRL_TXP_FE |
2906 SWAPPER_CTRL_TXP_SE |
2907 SWAPPER_CTRL_TXD_R_FE |
2908 SWAPPER_CTRL_TXD_W_FE |
2909 SWAPPER_CTRL_TXF_R_FE |
2910 SWAPPER_CTRL_RXD_R_FE |
2911 SWAPPER_CTRL_RXD_W_FE |
2912 SWAPPER_CTRL_RXF_W_FE |
2913 SWAPPER_CTRL_XMSI_FE |
2914 SWAPPER_CTRL_XMSI_SE |
2915 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
2916 writeq(val64, &bar0->swapper_ctrl);
2919 * Initially we enable all bits to make it accessible by the
2920 * driver, then we selectively enable only those bits that
2923 val64 |= (SWAPPER_CTRL_TXP_FE |
2924 SWAPPER_CTRL_TXP_SE |
2925 SWAPPER_CTRL_TXD_R_FE |
2926 SWAPPER_CTRL_TXD_R_SE |
2927 SWAPPER_CTRL_TXD_W_FE |
2928 SWAPPER_CTRL_TXD_W_SE |
2929 SWAPPER_CTRL_TXF_R_FE |
2930 SWAPPER_CTRL_RXD_R_FE |
2931 SWAPPER_CTRL_RXD_R_SE |
2932 SWAPPER_CTRL_RXD_W_FE |
2933 SWAPPER_CTRL_RXD_W_SE |
2934 SWAPPER_CTRL_RXF_W_FE |
2935 SWAPPER_CTRL_XMSI_FE |
2936 SWAPPER_CTRL_XMSI_SE |
2937 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
2938 writeq(val64, &bar0->swapper_ctrl);
2940 val64 = readq(&bar0->swapper_ctrl);
2943 * Verifying if endian settings are accurate by reading a
2944 * feedback register.
2946 val64 = readq(&bar0->pif_rd_swapper_fb);
2947 if (val64 != 0x0123456789ABCDEFULL) {
2948 /* Endian settings are incorrect, calls for another dekko. */
2949 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
2951 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
2952 (unsigned long long) val64);
2959 /* ********************************************************* *
2960 * Functions defined below concern the OS part of the driver *
2961 * ********************************************************* */
2964 * s2io_open - open entry point of the driver
2965 * @dev : pointer to the device structure.
2967 * This function is the open entry point of the driver. It mainly calls a
2968 * function to allocate Rx buffers and inserts them into the buffer
2969 * descriptors and then enables the Rx part of the NIC.
2971 * 0 on success and an appropriate (-)ve integer as defined in errno.h
2975 int s2io_open(struct net_device *dev)
2977 nic_t *sp = dev->priv;
2981 * Make sure you have link off by default every time
2982 * Nic is initialized
2984 netif_carrier_off(dev);
2985 sp->last_link_state = 0;
2987 /* Initialize H/W and enable interrupts */
2988 if (s2io_card_up(sp)) {
2989 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
2992 goto hw_init_failed;
2995 /* After proper initialization of H/W, register ISR */
2996 err = request_irq((int) sp->pdev->irq, s2io_isr, SA_SHIRQ,
2999 DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
3001 goto isr_registration_failed;
3004 if (s2io_set_mac_addr(dev, dev->dev_addr) == FAILURE) {
3005 DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
3007 goto setting_mac_address_failed;
3010 netif_start_queue(dev);
3013 setting_mac_address_failed:
3014 free_irq(sp->pdev->irq, dev);
3015 isr_registration_failed:
3016 del_timer_sync(&sp->alarm_timer);
3023 * s2io_close -close entry point of the driver
3024 * @dev : device pointer.
3026 * This is the stop entry point of the driver. It needs to undo exactly
3027 * whatever was done by the open entry point,thus it's usually referred to
3028 * as the close function.Among other things this function mainly stops the
3029 * Rx side of the NIC and frees all the Rx buffers in the Rx rings.
3031 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3035 int s2io_close(struct net_device *dev)
3037 nic_t *sp = dev->priv;
3038 flush_scheduled_work();
3039 netif_stop_queue(dev);
3040 /* Reset card, kill tasklet and free Tx and Rx buffers. */
3043 free_irq(sp->pdev->irq, dev);
3044 sp->device_close_flag = TRUE; /* Device is shut down. */
3049 * s2io_xmit - Tx entry point of te driver
3050 * @skb : the socket buffer containing the Tx data.
3051 * @dev : device pointer.
3053 * This function is the Tx entry point of the driver. S2IO NIC supports
3054 * certain protocol assist features on Tx side, namely CSO, S/G, LSO.
3055 * NOTE: when device cant queue the pkt,just the trans_start variable will
3058 * 0 on success & 1 on failure.
3061 int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
3063 nic_t *sp = dev->priv;
3064 u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
3067 TxFIFO_element_t __iomem *tx_fifo;
3068 unsigned long flags;
3073 int vlan_priority = 0;
3074 mac_info_t *mac_control;
3075 struct config_param *config;
3077 mac_control = &sp->mac_control;
3078 config = &sp->config;
3080 DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
3081 spin_lock_irqsave(&sp->tx_lock, flags);
3082 if (atomic_read(&sp->card_state) == CARD_DOWN) {
3083 DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
3085 spin_unlock_irqrestore(&sp->tx_lock, flags);
3092 /* Get Fifo number to Transmit based on vlan priority */
3093 if (sp->vlgrp && vlan_tx_tag_present(skb)) {
3094 vlan_tag = vlan_tx_tag_get(skb);
3095 vlan_priority = vlan_tag >> 13;
3096 queue = config->fifo_mapping[vlan_priority];
3099 put_off = (u16) mac_control->fifos[queue].tx_curr_put_info.offset;
3100 get_off = (u16) mac_control->fifos[queue].tx_curr_get_info.offset;
3101 txdp = (TxD_t *) mac_control->fifos[queue].list_info[put_off].
3104 queue_len = mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1;
3105 /* Avoid "put" pointer going beyond "get" pointer */
3106 if (txdp->Host_Control || (((put_off + 1) % queue_len) == get_off)) {
3107 DBG_PRINT(ERR_DBG, "Error in xmit, No free TXDs.\n");
3108 netif_stop_queue(dev);
3110 spin_unlock_irqrestore(&sp->tx_lock, flags);
3114 /* A buffer with no data will be dropped */
3116 DBG_PRINT(TX_DBG, "%s:Buffer has no data..\n", dev->name);
3118 spin_unlock_irqrestore(&sp->tx_lock, flags);
3123 mss = skb_shinfo(skb)->tso_size;
3125 txdp->Control_1 |= TXD_TCP_LSO_EN;
3126 txdp->Control_1 |= TXD_TCP_LSO_MSS(mss);
3130 frg_cnt = skb_shinfo(skb)->nr_frags;
3131 frg_len = skb->len - skb->data_len;
3133 txdp->Buffer_Pointer = pci_map_single
3134 (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE);
3135 txdp->Host_Control = (unsigned long) skb;
3136 if (skb->ip_summed == CHECKSUM_HW) {
3138 (TXD_TX_CKO_IPV4_EN | TXD_TX_CKO_TCP_EN |
3142 txdp->Control_2 |= config->tx_intr_type;
3144 if (sp->vlgrp && vlan_tx_tag_present(skb)) {
3145 txdp->Control_2 |= TXD_VLAN_ENABLE;
3146 txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
3149 txdp->Control_1 |= (TXD_BUFFER0_SIZE(frg_len) |
3150 TXD_GATHER_CODE_FIRST);
3151 txdp->Control_1 |= TXD_LIST_OWN_XENA;
3153 /* For fragmented SKB. */
3154 for (i = 0; i < frg_cnt; i++) {
3155 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3156 /* A '0' length fragment will be ignored */
3160 txdp->Buffer_Pointer = (u64) pci_map_page
3161 (sp->pdev, frag->page, frag->page_offset,
3162 frag->size, PCI_DMA_TODEVICE);
3163 txdp->Control_1 |= TXD_BUFFER0_SIZE(frag->size);
3165 txdp->Control_1 |= TXD_GATHER_CODE_LAST;
3167 tx_fifo = mac_control->tx_FIFO_start[queue];
3168 val64 = mac_control->fifos[queue].list_info[put_off].list_phy_addr;
3169 writeq(val64, &tx_fifo->TxDL_Pointer);
3173 val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
3178 val64 |= TX_FIFO_SPECIAL_FUNC;
3180 writeq(val64, &tx_fifo->List_Control);
3183 put_off %= mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1;
3184 mac_control->fifos[queue].tx_curr_put_info.offset = put_off;
3186 /* Avoid "put" pointer going beyond "get" pointer */
3187 if (((put_off + 1) % queue_len) == get_off) {
3189 "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
3191 netif_stop_queue(dev);
3194 dev->trans_start = jiffies;
3195 spin_unlock_irqrestore(&sp->tx_lock, flags);
3201 s2io_alarm_handle(unsigned long data)
3203 nic_t *sp = (nic_t *)data;
3205 alarm_intr_handler(sp);
3206 mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
3209 static void s2io_txpic_intr_handle(nic_t *sp)
3211 XENA_dev_config_t *bar0 = (XENA_dev_config_t *) sp->bar0;
3214 val64 = readq(&bar0->pic_int_status);
3215 if (val64 & PIC_INT_GPIO) {
3216 val64 = readq(&bar0->gpio_int_reg);
3217 if ((val64 & GPIO_INT_REG_LINK_DOWN) &&
3218 (val64 & GPIO_INT_REG_LINK_UP)) {
3219 val64 |= GPIO_INT_REG_LINK_DOWN;
3220 val64 |= GPIO_INT_REG_LINK_UP;
3221 writeq(val64, &bar0->gpio_int_reg);
3225 if (((sp->last_link_state == LINK_UP) &&
3226 (val64 & GPIO_INT_REG_LINK_DOWN)) ||
3227 ((sp->last_link_state == LINK_DOWN) &&
3228 (val64 & GPIO_INT_REG_LINK_UP))) {
3229 val64 = readq(&bar0->gpio_int_mask);
3230 val64 |= GPIO_INT_MASK_LINK_DOWN;
3231 val64 |= GPIO_INT_MASK_LINK_UP;
3232 writeq(val64, &bar0->gpio_int_mask);
3233 s2io_set_link((unsigned long)sp);
3236 if (sp->last_link_state == LINK_UP) {
3237 /*enable down interrupt */
3238 val64 = readq(&bar0->gpio_int_mask);
3239 /* unmasks link down intr */
3240 val64 &= ~GPIO_INT_MASK_LINK_DOWN;
3241 /* masks link up intr */
3242 val64 |= GPIO_INT_MASK_LINK_UP;
3243 writeq(val64, &bar0->gpio_int_mask);
3245 /*enable UP Interrupt */
3246 val64 = readq(&bar0->gpio_int_mask);
3247 /* unmasks link up interrupt */
3248 val64 &= ~GPIO_INT_MASK_LINK_UP;
3249 /* masks link down interrupt */
3250 val64 |= GPIO_INT_MASK_LINK_DOWN;
3251 writeq(val64, &bar0->gpio_int_mask);
3257 * s2io_isr - ISR handler of the device .
3258 * @irq: the irq of the device.
3259 * @dev_id: a void pointer to the dev structure of the NIC.
3260 * @pt_regs: pointer to the registers pushed on the stack.
3261 * Description: This function is the ISR handler of the device. It
3262 * identifies the reason for the interrupt and calls the relevant
3263 * service routines. As a contongency measure, this ISR allocates the
3264 * recv buffers, if their numbers are below the panic value which is
3265 * presently set to 25% of the original number of rcv buffers allocated.
3267 * IRQ_HANDLED: will be returned if IRQ was handled by this routine
3268 * IRQ_NONE: will be returned if interrupt is not from our device
3270 static irqreturn_t s2io_isr(int irq, void *dev_id, struct pt_regs *regs)
3272 struct net_device *dev = (struct net_device *) dev_id;
3273 nic_t *sp = dev->priv;
3274 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3276 u64 reason = 0, val64;
3277 mac_info_t *mac_control;
3278 struct config_param *config;
3280 atomic_inc(&sp->isr_cnt);
3281 mac_control = &sp->mac_control;
3282 config = &sp->config;
3285 * Identify the cause for interrupt and call the appropriate
3286 * interrupt handler. Causes for the interrupt could be;
3290 * 4. Error in any functional blocks of the NIC.
3292 reason = readq(&bar0->general_int_status);
3295 /* The interrupt was not raised by Xena. */
3296 atomic_dec(&sp->isr_cnt);
3300 #ifdef CONFIG_S2IO_NAPI
3301 if (reason & GEN_INTR_RXTRAFFIC) {
3302 if (netif_rx_schedule_prep(dev)) {
3303 en_dis_able_nic_intrs(sp, RX_TRAFFIC_INTR,
3305 __netif_rx_schedule(dev);
3309 /* If Intr is because of Rx Traffic */
3310 if (reason & GEN_INTR_RXTRAFFIC) {
3312 * rx_traffic_int reg is an R1 register, writing all 1's
3313 * will ensure that the actual interrupt causing bit get's
3314 * cleared and hence a read can be avoided.
3316 val64 = 0xFFFFFFFFFFFFFFFFULL;
3317 writeq(val64, &bar0->rx_traffic_int);
3318 for (i = 0; i < config->rx_ring_num; i++) {
3319 rx_intr_handler(&mac_control->rings[i]);
3324 /* If Intr is because of Tx Traffic */
3325 if (reason & GEN_INTR_TXTRAFFIC) {
3327 * tx_traffic_int reg is an R1 register, writing all 1's
3328 * will ensure that the actual interrupt causing bit get's
3329 * cleared and hence a read can be avoided.
3331 val64 = 0xFFFFFFFFFFFFFFFFULL;
3332 writeq(val64, &bar0->tx_traffic_int);
3334 for (i = 0; i < config->tx_fifo_num; i++)
3335 tx_intr_handler(&mac_control->fifos[i]);
3338 if (reason & GEN_INTR_TXPIC)
3339 s2io_txpic_intr_handle(sp);
3341 * If the Rx buffer count is below the panic threshold then
3342 * reallocate the buffers from the interrupt handler itself,
3343 * else schedule a tasklet to reallocate the buffers.
3345 #ifndef CONFIG_S2IO_NAPI
3346 for (i = 0; i < config->rx_ring_num; i++) {
3348 int rxb_size = atomic_read(&sp->rx_bufs_left[i]);
3349 int level = rx_buffer_level(sp, rxb_size, i);
3351 if ((level == PANIC) && (!TASKLET_IN_USE)) {
3352 DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", dev->name);
3353 DBG_PRINT(INTR_DBG, "PANIC levels\n");
3354 if ((ret = fill_rx_buffers(sp, i)) == -ENOMEM) {
3355 DBG_PRINT(ERR_DBG, "%s:Out of memory",
3357 DBG_PRINT(ERR_DBG, " in ISR!!\n");
3358 clear_bit(0, (&sp->tasklet_status));
3359 atomic_dec(&sp->isr_cnt);
3362 clear_bit(0, (&sp->tasklet_status));
3363 } else if (level == LOW) {
3364 tasklet_schedule(&sp->task);
3369 atomic_dec(&sp->isr_cnt);
3376 static void s2io_updt_stats(nic_t *sp)
3378 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3382 if (atomic_read(&sp->card_state) == CARD_UP) {
3383 /* Apprx 30us on a 133 MHz bus */
3384 val64 = SET_UPDT_CLICKS(10) |
3385 STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN;
3386 writeq(val64, &bar0->stat_cfg);
3389 val64 = readq(&bar0->stat_cfg);
3390 if (!(val64 & BIT(0)))
3394 break; /* Updt failed */
3400 * s2io_get_stats - Updates the device statistics structure.
3401 * @dev : pointer to the device structure.
3403 * This function updates the device statistics structure in the s2io_nic
3404 * structure and returns a pointer to the same.
3406 * pointer to the updated net_device_stats structure.
3409 struct net_device_stats *s2io_get_stats(struct net_device *dev)
3411 nic_t *sp = dev->priv;
3412 mac_info_t *mac_control;
3413 struct config_param *config;
3416 mac_control = &sp->mac_control;
3417 config = &sp->config;
3419 /* Configure Stats for immediate updt */
3420 s2io_updt_stats(sp);
3422 sp->stats.tx_packets =
3423 le32_to_cpu(mac_control->stats_info->tmac_frms);
3424 sp->stats.tx_errors =
3425 le32_to_cpu(mac_control->stats_info->tmac_any_err_frms);
3426 sp->stats.rx_errors =
3427 le32_to_cpu(mac_control->stats_info->rmac_drop_frms);
3428 sp->stats.multicast =
3429 le32_to_cpu(mac_control->stats_info->rmac_vld_mcst_frms);
3430 sp->stats.rx_length_errors =
3431 le32_to_cpu(mac_control->stats_info->rmac_long_frms);
3433 return (&sp->stats);
3437 * s2io_set_multicast - entry point for multicast address enable/disable.
3438 * @dev : pointer to the device structure
3440 * This function is a driver entry point which gets called by the kernel
3441 * whenever multicast addresses must be enabled/disabled. This also gets
3442 * called to set/reset promiscuous mode. Depending on the deivce flag, we
3443 * determine, if multicast address must be enabled or if promiscuous mode
3444 * is to be disabled etc.
3449 static void s2io_set_multicast(struct net_device *dev)
3452 struct dev_mc_list *mclist;
3453 nic_t *sp = dev->priv;
3454 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3455 u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
3457 u64 dis_addr = 0xffffffffffffULL, mac_addr = 0;
3460 if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
3461 /* Enable all Multicast addresses */
3462 writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
3463 &bar0->rmac_addr_data0_mem);
3464 writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
3465 &bar0->rmac_addr_data1_mem);
3466 val64 = RMAC_ADDR_CMD_MEM_WE |
3467 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3468 RMAC_ADDR_CMD_MEM_OFFSET(MAC_MC_ALL_MC_ADDR_OFFSET);
3469 writeq(val64, &bar0->rmac_addr_cmd_mem);
3470 /* Wait till command completes */
3471 wait_for_cmd_complete(sp);
3474 sp->all_multi_pos = MAC_MC_ALL_MC_ADDR_OFFSET;
3475 } else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
3476 /* Disable all Multicast addresses */
3477 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
3478 &bar0->rmac_addr_data0_mem);
3479 writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
3480 &bar0->rmac_addr_data1_mem);
3481 val64 = RMAC_ADDR_CMD_MEM_WE |
3482 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3483 RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
3484 writeq(val64, &bar0->rmac_addr_cmd_mem);
3485 /* Wait till command completes */
3486 wait_for_cmd_complete(sp);
3489 sp->all_multi_pos = 0;
3492 if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
3493 /* Put the NIC into promiscuous mode */
3494 add = &bar0->mac_cfg;
3495 val64 = readq(&bar0->mac_cfg);
3496 val64 |= MAC_CFG_RMAC_PROM_ENABLE;
3498 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
3499 writel((u32) val64, add);
3500 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
3501 writel((u32) (val64 >> 32), (add + 4));
3503 val64 = readq(&bar0->mac_cfg);
3504 sp->promisc_flg = 1;
3505 DBG_PRINT(ERR_DBG, "%s: entered promiscuous mode\n",
3507 } else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
3508 /* Remove the NIC from promiscuous mode */
3509 add = &bar0->mac_cfg;
3510 val64 = readq(&bar0->mac_cfg);
3511 val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
3513 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
3514 writel((u32) val64, add);
3515 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
3516 writel((u32) (val64 >> 32), (add + 4));
3518 val64 = readq(&bar0->mac_cfg);
3519 sp->promisc_flg = 0;
3520 DBG_PRINT(ERR_DBG, "%s: left promiscuous mode\n",
3524 /* Update individual M_CAST address list */
3525 if ((!sp->m_cast_flg) && dev->mc_count) {
3527 (MAX_ADDRS_SUPPORTED - MAC_MC_ADDR_START_OFFSET - 1)) {
3528 DBG_PRINT(ERR_DBG, "%s: No more Rx filters ",
3530 DBG_PRINT(ERR_DBG, "can be added, please enable ");
3531 DBG_PRINT(ERR_DBG, "ALL_MULTI instead\n");
3535 prev_cnt = sp->mc_addr_count;
3536 sp->mc_addr_count = dev->mc_count;
3538 /* Clear out the previous list of Mc in the H/W. */
3539 for (i = 0; i < prev_cnt; i++) {
3540 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
3541 &bar0->rmac_addr_data0_mem);
3542 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
3543 &bar0->rmac_addr_data1_mem);
3544 val64 = RMAC_ADDR_CMD_MEM_WE |
3545 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3546 RMAC_ADDR_CMD_MEM_OFFSET
3547 (MAC_MC_ADDR_START_OFFSET + i);
3548 writeq(val64, &bar0->rmac_addr_cmd_mem);
3550 /* Wait for command completes */
3551 if (wait_for_cmd_complete(sp)) {
3552 DBG_PRINT(ERR_DBG, "%s: Adding ",
3554 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
3559 /* Create the new Rx filter list and update the same in H/W. */
3560 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
3561 i++, mclist = mclist->next) {
3562 memcpy(sp->usr_addrs[i].addr, mclist->dmi_addr,
3564 for (j = 0; j < ETH_ALEN; j++) {
3565 mac_addr |= mclist->dmi_addr[j];
3569 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
3570 &bar0->rmac_addr_data0_mem);
3571 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
3572 &bar0->rmac_addr_data1_mem);
3573 val64 = RMAC_ADDR_CMD_MEM_WE |
3574 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3575 RMAC_ADDR_CMD_MEM_OFFSET
3576 (i + MAC_MC_ADDR_START_OFFSET);
3577 writeq(val64, &bar0->rmac_addr_cmd_mem);
3579 /* Wait for command completes */
3580 if (wait_for_cmd_complete(sp)) {
3581 DBG_PRINT(ERR_DBG, "%s: Adding ",
3583 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
3591 * s2io_set_mac_addr - Programs the Xframe mac address
3592 * @dev : pointer to the device structure.
3593 * @addr: a uchar pointer to the new mac address which is to be set.
3594 * Description : This procedure will program the Xframe to receive
3595 * frames with new Mac Address
3596 * Return value: SUCCESS on success and an appropriate (-)ve integer
3597 * as defined in errno.h file on failure.
3600 int s2io_set_mac_addr(struct net_device *dev, u8 * addr)
3602 nic_t *sp = dev->priv;
3603 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3604 register u64 val64, mac_addr = 0;
3608 * Set the new MAC address as the new unicast filter and reflect this
3609 * change on the device address registered with the OS. It will be
3612 for (i = 0; i < ETH_ALEN; i++) {
3614 mac_addr |= addr[i];
3617 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
3618 &bar0->rmac_addr_data0_mem);
3621 RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3622 RMAC_ADDR_CMD_MEM_OFFSET(0);
3623 writeq(val64, &bar0->rmac_addr_cmd_mem);
3624 /* Wait till command completes */
3625 if (wait_for_cmd_complete(sp)) {
3626 DBG_PRINT(ERR_DBG, "%s: set_mac_addr failed\n", dev->name);
3634 * s2io_ethtool_sset - Sets different link parameters.
3635 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
3636 * @info: pointer to the structure with parameters given by ethtool to set
3639 * The function sets different link parameters provided by the user onto
3645 static int s2io_ethtool_sset(struct net_device *dev,
3646 struct ethtool_cmd *info)
3648 nic_t *sp = dev->priv;
3649 if ((info->autoneg == AUTONEG_ENABLE) ||
3650 (info->speed != SPEED_10000) || (info->duplex != DUPLEX_FULL))
3653 s2io_close(sp->dev);
3661 * s2io_ethtol_gset - Return link specific information.
3662 * @sp : private member of the device structure, pointer to the
3663 * s2io_nic structure.
3664 * @info : pointer to the structure with parameters given by ethtool
3665 * to return link information.
3667 * Returns link specific information like speed, duplex etc.. to ethtool.
3669 * return 0 on success.
3672 static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
3674 nic_t *sp = dev->priv;
3675 info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
3676 info->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
3677 info->port = PORT_FIBRE;
3678 /* info->transceiver?? TODO */
3680 if (netif_carrier_ok(sp->dev)) {
3681 info->speed = 10000;
3682 info->duplex = DUPLEX_FULL;
3688 info->autoneg = AUTONEG_DISABLE;
3693 * s2io_ethtool_gdrvinfo - Returns driver specific information.
3694 * @sp : private member of the device structure, which is a pointer to the
3695 * s2io_nic structure.
3696 * @info : pointer to the structure with parameters given by ethtool to
3697 * return driver information.
3699 * Returns driver specefic information like name, version etc.. to ethtool.
3704 static void s2io_ethtool_gdrvinfo(struct net_device *dev,
3705 struct ethtool_drvinfo *info)
3707 nic_t *sp = dev->priv;
3709 strncpy(info->driver, s2io_driver_name, sizeof(s2io_driver_name));
3710 strncpy(info->version, s2io_driver_version,
3711 sizeof(s2io_driver_version));
3712 strncpy(info->fw_version, "", 32);
3713 strncpy(info->bus_info, pci_name(sp->pdev), 32);
3714 info->regdump_len = XENA_REG_SPACE;
3715 info->eedump_len = XENA_EEPROM_SPACE;
3716 info->testinfo_len = S2IO_TEST_LEN;
3717 info->n_stats = S2IO_STAT_LEN;
3721 * s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
3722 * @sp: private member of the device structure, which is a pointer to the
3723 * s2io_nic structure.
3724 * @regs : pointer to the structure with parameters given by ethtool for
3725 * dumping the registers.
3726 * @reg_space: The input argumnet into which all the registers are dumped.
3728 * Dumps the entire register space of xFrame NIC into the user given
3734 static void s2io_ethtool_gregs(struct net_device *dev,
3735 struct ethtool_regs *regs, void *space)
3739 u8 *reg_space = (u8 *) space;
3740 nic_t *sp = dev->priv;
3742 regs->len = XENA_REG_SPACE;
3743 regs->version = sp->pdev->subsystem_device;
3745 for (i = 0; i < regs->len; i += 8) {
3746 reg = readq(sp->bar0 + i);
3747 memcpy((reg_space + i), ®, 8);
3752 * s2io_phy_id - timer function that alternates adapter LED.
3753 * @data : address of the private member of the device structure, which
3754 * is a pointer to the s2io_nic structure, provided as an u32.
3755 * Description: This is actually the timer function that alternates the
3756 * adapter LED bit of the adapter control bit to set/reset every time on
3757 * invocation. The timer is set for 1/2 a second, hence tha NIC blinks
3758 * once every second.
3760 static void s2io_phy_id(unsigned long data)
3762 nic_t *sp = (nic_t *) data;
3763 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3767 subid = sp->pdev->subsystem_device;
3768 if ((sp->device_type == XFRAME_II_DEVICE) ||
3769 ((subid & 0xFF) >= 0x07)) {
3770 val64 = readq(&bar0->gpio_control);
3771 val64 ^= GPIO_CTRL_GPIO_0;
3772 writeq(val64, &bar0->gpio_control);
3774 val64 = readq(&bar0->adapter_control);
3775 val64 ^= ADAPTER_LED_ON;
3776 writeq(val64, &bar0->adapter_control);
3779 mod_timer(&sp->id_timer, jiffies + HZ / 2);
3783 * s2io_ethtool_idnic - To physically identify the nic on the system.
3784 * @sp : private member of the device structure, which is a pointer to the
3785 * s2io_nic structure.
3786 * @id : pointer to the structure with identification parameters given by
3788 * Description: Used to physically identify the NIC on the system.
3789 * The Link LED will blink for a time specified by the user for
3791 * NOTE: The Link has to be Up to be able to blink the LED. Hence
3792 * identification is possible only if it's link is up.
3794 * int , returns 0 on success
3797 static int s2io_ethtool_idnic(struct net_device *dev, u32 data)
3799 u64 val64 = 0, last_gpio_ctrl_val;
3800 nic_t *sp = dev->priv;
3801 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3804 subid = sp->pdev->subsystem_device;
3805 last_gpio_ctrl_val = readq(&bar0->gpio_control);
3806 if ((sp->device_type == XFRAME_I_DEVICE) &&
3807 ((subid & 0xFF) < 0x07)) {
3808 val64 = readq(&bar0->adapter_control);
3809 if (!(val64 & ADAPTER_CNTL_EN)) {
3811 "Adapter Link down, cannot blink LED\n");
3815 if (sp->id_timer.function == NULL) {
3816 init_timer(&sp->id_timer);
3817 sp->id_timer.function = s2io_phy_id;
3818 sp->id_timer.data = (unsigned long) sp;
3820 mod_timer(&sp->id_timer, jiffies);
3822 msleep_interruptible(data * HZ);
3824 msleep_interruptible(MAX_FLICKER_TIME);
3825 del_timer_sync(&sp->id_timer);
3827 if (CARDS_WITH_FAULTY_LINK_INDICATORS(sp->device_type, subid)) {
3828 writeq(last_gpio_ctrl_val, &bar0->gpio_control);
3829 last_gpio_ctrl_val = readq(&bar0->gpio_control);
3836 * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
3837 * @sp : private member of the device structure, which is a pointer to the
3838 * s2io_nic structure.
3839 * @ep : pointer to the structure with pause parameters given by ethtool.
3841 * Returns the Pause frame generation and reception capability of the NIC.
3845 static void s2io_ethtool_getpause_data(struct net_device *dev,
3846 struct ethtool_pauseparam *ep)
3849 nic_t *sp = dev->priv;
3850 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3852 val64 = readq(&bar0->rmac_pause_cfg);
3853 if (val64 & RMAC_PAUSE_GEN_ENABLE)
3854 ep->tx_pause = TRUE;
3855 if (val64 & RMAC_PAUSE_RX_ENABLE)
3856 ep->rx_pause = TRUE;
3857 ep->autoneg = FALSE;
3861 * s2io_ethtool_setpause_data - set/reset pause frame generation.
3862 * @sp : private member of the device structure, which is a pointer to the
3863 * s2io_nic structure.
3864 * @ep : pointer to the structure with pause parameters given by ethtool.
3866 * It can be used to set or reset Pause frame generation or reception
3867 * support of the NIC.
3869 * int, returns 0 on Success
3872 static int s2io_ethtool_setpause_data(struct net_device *dev,
3873 struct ethtool_pauseparam *ep)
3876 nic_t *sp = dev->priv;
3877 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3879 val64 = readq(&bar0->rmac_pause_cfg);
3881 val64 |= RMAC_PAUSE_GEN_ENABLE;
3883 val64 &= ~RMAC_PAUSE_GEN_ENABLE;
3885 val64 |= RMAC_PAUSE_RX_ENABLE;
3887 val64 &= ~RMAC_PAUSE_RX_ENABLE;
3888 writeq(val64, &bar0->rmac_pause_cfg);
3893 * read_eeprom - reads 4 bytes of data from user given offset.
3894 * @sp : private member of the device structure, which is a pointer to the
3895 * s2io_nic structure.
3896 * @off : offset at which the data must be written
3897 * @data : Its an output parameter where the data read at the given
3900 * Will read 4 bytes of data from the user given offset and return the
3902 * NOTE: Will allow to read only part of the EEPROM visible through the
3905 * -1 on failure and 0 on success.
3908 #define S2IO_DEV_ID 5
3909 static int read_eeprom(nic_t * sp, int off, u32 * data)
3914 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3916 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
3917 I2C_CONTROL_BYTE_CNT(0x3) | I2C_CONTROL_READ |
3918 I2C_CONTROL_CNTL_START;
3919 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
3921 while (exit_cnt < 5) {
3922 val64 = readq(&bar0->i2c_control);
3923 if (I2C_CONTROL_CNTL_END(val64)) {
3924 *data = I2C_CONTROL_GET_DATA(val64);
3936 * write_eeprom - actually writes the relevant part of the data value.
3937 * @sp : private member of the device structure, which is a pointer to the
3938 * s2io_nic structure.
3939 * @off : offset at which the data must be written
3940 * @data : The data that is to be written
3941 * @cnt : Number of bytes of the data that are actually to be written into
3942 * the Eeprom. (max of 3)
3944 * Actually writes the relevant part of the data value into the Eeprom
3945 * through the I2C bus.
3947 * 0 on success, -1 on failure.
3950 static int write_eeprom(nic_t * sp, int off, u32 data, int cnt)
3952 int exit_cnt = 0, ret = -1;
3954 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3956 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
3957 I2C_CONTROL_BYTE_CNT(cnt) | I2C_CONTROL_SET_DATA(data) |
3958 I2C_CONTROL_CNTL_START;
3959 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
3961 while (exit_cnt < 5) {
3962 val64 = readq(&bar0->i2c_control);
3963 if (I2C_CONTROL_CNTL_END(val64)) {
3964 if (!(val64 & I2C_CONTROL_NACK))
3976 * s2io_ethtool_geeprom - reads the value stored in the Eeprom.
3977 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
3978 * @eeprom : pointer to the user level structure provided by ethtool,
3979 * containing all relevant information.
3980 * @data_buf : user defined value to be written into Eeprom.
3981 * Description: Reads the values stored in the Eeprom at given offset
3982 * for a given length. Stores these values int the input argument data
3983 * buffer 'data_buf' and returns these to the caller (ethtool.)
3988 static int s2io_ethtool_geeprom(struct net_device *dev,
3989 struct ethtool_eeprom *eeprom, u8 * data_buf)
3992 nic_t *sp = dev->priv;
3994 eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
3996 if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
3997 eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
3999 for (i = 0; i < eeprom->len; i += 4) {
4000 if (read_eeprom(sp, (eeprom->offset + i), &data)) {
4001 DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
4005 memcpy((data_buf + i), &valid, 4);
4011 * s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
4012 * @sp : private member of the device structure, which is a pointer to the
4013 * s2io_nic structure.
4014 * @eeprom : pointer to the user level structure provided by ethtool,
4015 * containing all relevant information.
4016 * @data_buf ; user defined value to be written into Eeprom.
4018 * Tries to write the user provided value in the Eeprom, at the offset
4019 * given by the user.
4021 * 0 on success, -EFAULT on failure.
4024 static int s2io_ethtool_seeprom(struct net_device *dev,
4025 struct ethtool_eeprom *eeprom,
4028 int len = eeprom->len, cnt = 0;
4029 u32 valid = 0, data;
4030 nic_t *sp = dev->priv;
4032 if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
4034 "ETHTOOL_WRITE_EEPROM Err: Magic value ");
4035 DBG_PRINT(ERR_DBG, "is wrong, Its not 0x%x\n",
4041 data = (u32) data_buf[cnt] & 0x000000FF;
4043 valid = (u32) (data << 24);
4047 if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
4049 "ETHTOOL_WRITE_EEPROM Err: Cannot ");
4051 "write into the specified offset\n");
4062 * s2io_register_test - reads and writes into all clock domains.
4063 * @sp : private member of the device structure, which is a pointer to the
4064 * s2io_nic structure.
4065 * @data : variable that returns the result of each of the test conducted b
4068 * Read and write into all clock domains. The NIC has 3 clock domains,
4069 * see that registers in all the three regions are accessible.
4074 static int s2io_register_test(nic_t * sp, uint64_t * data)
4076 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4080 val64 = readq(&bar0->pif_rd_swapper_fb);
4081 if (val64 != 0x123456789abcdefULL) {
4083 DBG_PRINT(INFO_DBG, "Read Test level 1 fails\n");
4086 val64 = readq(&bar0->rmac_pause_cfg);
4087 if (val64 != 0xc000ffff00000000ULL) {
4089 DBG_PRINT(INFO_DBG, "Read Test level 2 fails\n");
4092 val64 = readq(&bar0->rx_queue_cfg);
4093 if (val64 != 0x0808080808080808ULL) {
4095 DBG_PRINT(INFO_DBG, "Read Test level 3 fails\n");
4098 val64 = readq(&bar0->xgxs_efifo_cfg);
4099 if (val64 != 0x000000001923141EULL) {
4101 DBG_PRINT(INFO_DBG, "Read Test level 4 fails\n");
4104 val64 = 0x5A5A5A5A5A5A5A5AULL;
4105 writeq(val64, &bar0->xmsi_data);
4106 val64 = readq(&bar0->xmsi_data);
4107 if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
4109 DBG_PRINT(ERR_DBG, "Write Test level 1 fails\n");
4112 val64 = 0xA5A5A5A5A5A5A5A5ULL;
4113 writeq(val64, &bar0->xmsi_data);
4114 val64 = readq(&bar0->xmsi_data);
4115 if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
4117 DBG_PRINT(ERR_DBG, "Write Test level 2 fails\n");
4125 * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
4126 * @sp : private member of the device structure, which is a pointer to the
4127 * s2io_nic structure.
4128 * @data:variable that returns the result of each of the test conducted by
4131 * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
4137 static int s2io_eeprom_test(nic_t * sp, uint64_t * data)
4142 /* Test Write Error at offset 0 */
4143 if (!write_eeprom(sp, 0, 0, 3))
4146 /* Test Write at offset 4f0 */
4147 if (write_eeprom(sp, 0x4F0, 0x01234567, 3))
4149 if (read_eeprom(sp, 0x4F0, &ret_data))
4152 if (ret_data != 0x01234567)
4155 /* Reset the EEPROM data go FFFF */
4156 write_eeprom(sp, 0x4F0, 0xFFFFFFFF, 3);
4158 /* Test Write Request Error at offset 0x7c */
4159 if (!write_eeprom(sp, 0x07C, 0, 3))
4162 /* Test Write Request at offset 0x7fc */
4163 if (write_eeprom(sp, 0x7FC, 0x01234567, 3))
4165 if (read_eeprom(sp, 0x7FC, &ret_data))
4168 if (ret_data != 0x01234567)
4171 /* Reset the EEPROM data go FFFF */
4172 write_eeprom(sp, 0x7FC, 0xFFFFFFFF, 3);
4174 /* Test Write Error at offset 0x80 */
4175 if (!write_eeprom(sp, 0x080, 0, 3))
4178 /* Test Write Error at offset 0xfc */
4179 if (!write_eeprom(sp, 0x0FC, 0, 3))
4182 /* Test Write Error at offset 0x100 */
4183 if (!write_eeprom(sp, 0x100, 0, 3))
4186 /* Test Write Error at offset 4ec */
4187 if (!write_eeprom(sp, 0x4EC, 0, 3))
4195 * s2io_bist_test - invokes the MemBist test of the card .
4196 * @sp : private member of the device structure, which is a pointer to the
4197 * s2io_nic structure.
4198 * @data:variable that returns the result of each of the test conducted by
4201 * This invokes the MemBist test of the card. We give around
4202 * 2 secs time for the Test to complete. If it's still not complete
4203 * within this peiod, we consider that the test failed.
4205 * 0 on success and -1 on failure.
4208 static int s2io_bist_test(nic_t * sp, uint64_t * data)
4211 int cnt = 0, ret = -1;
4213 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
4214 bist |= PCI_BIST_START;
4215 pci_write_config_word(sp->pdev, PCI_BIST, bist);
4218 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
4219 if (!(bist & PCI_BIST_START)) {
4220 *data = (bist & PCI_BIST_CODE_MASK);
4232 * s2io-link_test - verifies the link state of the nic
4233 * @sp ; private member of the device structure, which is a pointer to the
4234 * s2io_nic structure.
4235 * @data: variable that returns the result of each of the test conducted by
4238 * The function verifies the link state of the NIC and updates the input
4239 * argument 'data' appropriately.
4244 static int s2io_link_test(nic_t * sp, uint64_t * data)
4246 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4249 val64 = readq(&bar0->adapter_status);
4250 if (val64 & ADAPTER_STATUS_RMAC_LOCAL_FAULT)
4257 * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
4258 * @sp - private member of the device structure, which is a pointer to the
4259 * s2io_nic structure.
4260 * @data - variable that returns the result of each of the test
4261 * conducted by the driver.
4263 * This is one of the offline test that tests the read and write
4264 * access to the RldRam chip on the NIC.
4269 static int s2io_rldram_test(nic_t * sp, uint64_t * data)
4271 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4273 int cnt, iteration = 0, test_pass = 0;
4275 val64 = readq(&bar0->adapter_control);
4276 val64 &= ~ADAPTER_ECC_EN;
4277 writeq(val64, &bar0->adapter_control);
4279 val64 = readq(&bar0->mc_rldram_test_ctrl);
4280 val64 |= MC_RLDRAM_TEST_MODE;
4281 writeq(val64, &bar0->mc_rldram_test_ctrl);
4283 val64 = readq(&bar0->mc_rldram_mrs);
4284 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
4285 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
4287 val64 |= MC_RLDRAM_MRS_ENABLE;
4288 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
4290 while (iteration < 2) {
4291 val64 = 0x55555555aaaa0000ULL;
4292 if (iteration == 1) {
4293 val64 ^= 0xFFFFFFFFFFFF0000ULL;
4295 writeq(val64, &bar0->mc_rldram_test_d0);
4297 val64 = 0xaaaa5a5555550000ULL;
4298 if (iteration == 1) {
4299 val64 ^= 0xFFFFFFFFFFFF0000ULL;
4301 writeq(val64, &bar0->mc_rldram_test_d1);
4303 val64 = 0x55aaaaaaaa5a0000ULL;
4304 if (iteration == 1) {
4305 val64 ^= 0xFFFFFFFFFFFF0000ULL;
4307 writeq(val64, &bar0->mc_rldram_test_d2);
4309 val64 = (u64) (0x0000003fffff0000ULL);
4310 writeq(val64, &bar0->mc_rldram_test_add);
4313 val64 = MC_RLDRAM_TEST_MODE;
4314 writeq(val64, &bar0->mc_rldram_test_ctrl);
4317 MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_WRITE |
4319 writeq(val64, &bar0->mc_rldram_test_ctrl);
4321 for (cnt = 0; cnt < 5; cnt++) {
4322 val64 = readq(&bar0->mc_rldram_test_ctrl);
4323 if (val64 & MC_RLDRAM_TEST_DONE)
4331 val64 = MC_RLDRAM_TEST_MODE;
4332 writeq(val64, &bar0->mc_rldram_test_ctrl);
4334 val64 |= MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
4335 writeq(val64, &bar0->mc_rldram_test_ctrl);
4337 for (cnt = 0; cnt < 5; cnt++) {
4338 val64 = readq(&bar0->mc_rldram_test_ctrl);
4339 if (val64 & MC_RLDRAM_TEST_DONE)
4347 val64 = readq(&bar0->mc_rldram_test_ctrl);
4348 if (val64 & MC_RLDRAM_TEST_PASS)
4363 * s2io_ethtool_test - conducts 6 tsets to determine the health of card.
4364 * @sp : private member of the device structure, which is a pointer to the
4365 * s2io_nic structure.
4366 * @ethtest : pointer to a ethtool command specific structure that will be
4367 * returned to the user.
4368 * @data : variable that returns the result of each of the test
4369 * conducted by the driver.
4371 * This function conducts 6 tests ( 4 offline and 2 online) to determine
4372 * the health of the card.
4377 static void s2io_ethtool_test(struct net_device *dev,
4378 struct ethtool_test *ethtest,
4381 nic_t *sp = dev->priv;
4382 int orig_state = netif_running(sp->dev);
4384 if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
4385 /* Offline Tests. */
4387 s2io_close(sp->dev);
4389 if (s2io_register_test(sp, &data[0]))
4390 ethtest->flags |= ETH_TEST_FL_FAILED;
4394 if (s2io_rldram_test(sp, &data[3]))
4395 ethtest->flags |= ETH_TEST_FL_FAILED;
4399 if (s2io_eeprom_test(sp, &data[1]))
4400 ethtest->flags |= ETH_TEST_FL_FAILED;
4402 if (s2io_bist_test(sp, &data[4]))
4403 ethtest->flags |= ETH_TEST_FL_FAILED;
4413 "%s: is not up, cannot run test\n",
4422 if (s2io_link_test(sp, &data[2]))
4423 ethtest->flags |= ETH_TEST_FL_FAILED;
4432 static void s2io_get_ethtool_stats(struct net_device *dev,
4433 struct ethtool_stats *estats,
4437 nic_t *sp = dev->priv;
4438 StatInfo_t *stat_info = sp->mac_control.stats_info;
4440 s2io_updt_stats(sp);
4442 (u64)le32_to_cpu(stat_info->tmac_frms_oflow) << 32 |
4443 le32_to_cpu(stat_info->tmac_frms);
4445 (u64)le32_to_cpu(stat_info->tmac_data_octets_oflow) << 32 |
4446 le32_to_cpu(stat_info->tmac_data_octets);
4447 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_drop_frms);
4449 (u64)le32_to_cpu(stat_info->tmac_mcst_frms_oflow) << 32 |
4450 le32_to_cpu(stat_info->tmac_mcst_frms);
4452 (u64)le32_to_cpu(stat_info->tmac_bcst_frms_oflow) << 32 |
4453 le32_to_cpu(stat_info->tmac_bcst_frms);
4454 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_pause_ctrl_frms);
4456 (u64)le32_to_cpu(stat_info->tmac_any_err_frms_oflow) << 32 |
4457 le32_to_cpu(stat_info->tmac_any_err_frms);
4458 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_vld_ip_octets);
4460 (u64)le32_to_cpu(stat_info->tmac_vld_ip_oflow) << 32 |
4461 le32_to_cpu(stat_info->tmac_vld_ip);
4463 (u64)le32_to_cpu(stat_info->tmac_drop_ip_oflow) << 32 |
4464 le32_to_cpu(stat_info->tmac_drop_ip);
4466 (u64)le32_to_cpu(stat_info->tmac_icmp_oflow) << 32 |
4467 le32_to_cpu(stat_info->tmac_icmp);
4469 (u64)le32_to_cpu(stat_info->tmac_rst_tcp_oflow) << 32 |
4470 le32_to_cpu(stat_info->tmac_rst_tcp);
4471 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_tcp);
4472 tmp_stats[i++] = (u64)le32_to_cpu(stat_info->tmac_udp_oflow) << 32 |
4473 le32_to_cpu(stat_info->tmac_udp);
4475 (u64)le32_to_cpu(stat_info->rmac_vld_frms_oflow) << 32 |
4476 le32_to_cpu(stat_info->rmac_vld_frms);
4478 (u64)le32_to_cpu(stat_info->rmac_data_octets_oflow) << 32 |
4479 le32_to_cpu(stat_info->rmac_data_octets);
4480 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_fcs_err_frms);
4481 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_drop_frms);
4483 (u64)le32_to_cpu(stat_info->rmac_vld_mcst_frms_oflow) << 32 |
4484 le32_to_cpu(stat_info->rmac_vld_mcst_frms);
4486 (u64)le32_to_cpu(stat_info->rmac_vld_bcst_frms_oflow) << 32 |
4487 le32_to_cpu(stat_info->rmac_vld_bcst_frms);
4488 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_in_rng_len_err_frms);
4489 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_long_frms);
4490 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_pause_ctrl_frms);
4492 (u64)le32_to_cpu(stat_info->rmac_discarded_frms_oflow) << 32 |
4493 le32_to_cpu(stat_info->rmac_discarded_frms);
4495 (u64)le32_to_cpu(stat_info->rmac_usized_frms_oflow) << 32 |
4496 le32_to_cpu(stat_info->rmac_usized_frms);
4498 (u64)le32_to_cpu(stat_info->rmac_osized_frms_oflow) << 32 |
4499 le32_to_cpu(stat_info->rmac_osized_frms);
4501 (u64)le32_to_cpu(stat_info->rmac_frag_frms_oflow) << 32 |
4502 le32_to_cpu(stat_info->rmac_frag_frms);
4504 (u64)le32_to_cpu(stat_info->rmac_jabber_frms_oflow) << 32 |
4505 le32_to_cpu(stat_info->rmac_jabber_frms);
4506 tmp_stats[i++] = (u64)le32_to_cpu(stat_info->rmac_ip_oflow) << 32 |
4507 le32_to_cpu(stat_info->rmac_ip);
4508 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ip_octets);
4509 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_hdr_err_ip);
4510 tmp_stats[i++] = (u64)le32_to_cpu(stat_info->rmac_drop_ip_oflow) << 32 |
4511 le32_to_cpu(stat_info->rmac_drop_ip);
4512 tmp_stats[i++] = (u64)le32_to_cpu(stat_info->rmac_icmp_oflow) << 32 |
4513 le32_to_cpu(stat_info->rmac_icmp);
4514 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_tcp);
4515 tmp_stats[i++] = (u64)le32_to_cpu(stat_info->rmac_udp_oflow) << 32 |
4516 le32_to_cpu(stat_info->rmac_udp);
4518 (u64)le32_to_cpu(stat_info->rmac_err_drp_udp_oflow) << 32 |
4519 le32_to_cpu(stat_info->rmac_err_drp_udp);
4521 (u64)le32_to_cpu(stat_info->rmac_pause_cnt_oflow) << 32 |
4522 le32_to_cpu(stat_info->rmac_pause_cnt);
4524 (u64)le32_to_cpu(stat_info->rmac_accepted_ip_oflow) << 32 |
4525 le32_to_cpu(stat_info->rmac_accepted_ip);
4526 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_tcp);
4528 tmp_stats[i++] = stat_info->sw_stat.single_ecc_errs;
4529 tmp_stats[i++] = stat_info->sw_stat.double_ecc_errs;
4532 int s2io_ethtool_get_regs_len(struct net_device *dev)
4534 return (XENA_REG_SPACE);
4538 u32 s2io_ethtool_get_rx_csum(struct net_device * dev)
4540 nic_t *sp = dev->priv;
4542 return (sp->rx_csum);
4544 int s2io_ethtool_set_rx_csum(struct net_device *dev, u32 data)
4546 nic_t *sp = dev->priv;
4555 int s2io_get_eeprom_len(struct net_device *dev)
4557 return (XENA_EEPROM_SPACE);
4560 int s2io_ethtool_self_test_count(struct net_device *dev)
4562 return (S2IO_TEST_LEN);
4564 void s2io_ethtool_get_strings(struct net_device *dev,
4565 u32 stringset, u8 * data)
4567 switch (stringset) {
4569 memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
4572 memcpy(data, ðtool_stats_keys,
4573 sizeof(ethtool_stats_keys));
4576 static int s2io_ethtool_get_stats_count(struct net_device *dev)
4578 return (S2IO_STAT_LEN);
4581 int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
4584 dev->features |= NETIF_F_IP_CSUM;
4586 dev->features &= ~NETIF_F_IP_CSUM;
4592 static struct ethtool_ops netdev_ethtool_ops = {
4593 .get_settings = s2io_ethtool_gset,
4594 .set_settings = s2io_ethtool_sset,
4595 .get_drvinfo = s2io_ethtool_gdrvinfo,
4596 .get_regs_len = s2io_ethtool_get_regs_len,
4597 .get_regs = s2io_ethtool_gregs,
4598 .get_link = ethtool_op_get_link,
4599 .get_eeprom_len = s2io_get_eeprom_len,
4600 .get_eeprom = s2io_ethtool_geeprom,
4601 .set_eeprom = s2io_ethtool_seeprom,
4602 .get_pauseparam = s2io_ethtool_getpause_data,
4603 .set_pauseparam = s2io_ethtool_setpause_data,
4604 .get_rx_csum = s2io_ethtool_get_rx_csum,
4605 .set_rx_csum = s2io_ethtool_set_rx_csum,
4606 .get_tx_csum = ethtool_op_get_tx_csum,
4607 .set_tx_csum = s2io_ethtool_op_set_tx_csum,
4608 .get_sg = ethtool_op_get_sg,
4609 .set_sg = ethtool_op_set_sg,
4611 .get_tso = ethtool_op_get_tso,
4612 .set_tso = ethtool_op_set_tso,
4614 .self_test_count = s2io_ethtool_self_test_count,
4615 .self_test = s2io_ethtool_test,
4616 .get_strings = s2io_ethtool_get_strings,
4617 .phys_id = s2io_ethtool_idnic,
4618 .get_stats_count = s2io_ethtool_get_stats_count,
4619 .get_ethtool_stats = s2io_get_ethtool_stats
4623 * s2io_ioctl - Entry point for the Ioctl
4624 * @dev : Device pointer.
4625 * @ifr : An IOCTL specefic structure, that can contain a pointer to
4626 * a proprietary structure used to pass information to the driver.
4627 * @cmd : This is used to distinguish between the different commands that
4628 * can be passed to the IOCTL functions.
4630 * Currently there are no special functionality supported in IOCTL, hence
4631 * function always return EOPNOTSUPPORTED
4634 int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
4640 * s2io_change_mtu - entry point to change MTU size for the device.
4641 * @dev : device pointer.
4642 * @new_mtu : the new MTU size for the device.
4643 * Description: A driver entry point to change MTU size for the device.
4644 * Before changing the MTU the device must be stopped.
4646 * 0 on success and an appropriate (-)ve integer as defined in errno.h
4650 int s2io_change_mtu(struct net_device *dev, int new_mtu)
4652 nic_t *sp = dev->priv;
4654 if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) {
4655 DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n",
4661 if (netif_running(dev)) {
4663 netif_stop_queue(dev);
4664 if (s2io_card_up(sp)) {
4665 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
4668 if (netif_queue_stopped(dev))
4669 netif_wake_queue(dev);
4670 } else { /* Device is down */
4671 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4672 u64 val64 = new_mtu;
4674 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
4681 * s2io_tasklet - Bottom half of the ISR.
4682 * @dev_adr : address of the device structure in dma_addr_t format.
4684 * This is the tasklet or the bottom half of the ISR. This is
4685 * an extension of the ISR which is scheduled by the scheduler to be run
4686 * when the load on the CPU is low. All low priority tasks of the ISR can
4687 * be pushed into the tasklet. For now the tasklet is used only to
4688 * replenish the Rx buffers in the Rx buffer descriptors.
4693 static void s2io_tasklet(unsigned long dev_addr)
4695 struct net_device *dev = (struct net_device *) dev_addr;
4696 nic_t *sp = dev->priv;
4698 mac_info_t *mac_control;
4699 struct config_param *config;
4701 mac_control = &sp->mac_control;
4702 config = &sp->config;
4704 if (!TASKLET_IN_USE) {
4705 for (i = 0; i < config->rx_ring_num; i++) {
4706 ret = fill_rx_buffers(sp, i);
4707 if (ret == -ENOMEM) {
4708 DBG_PRINT(ERR_DBG, "%s: Out of ",
4710 DBG_PRINT(ERR_DBG, "memory in tasklet\n");
4712 } else if (ret == -EFILL) {
4714 "%s: Rx Ring %d is full\n",
4719 clear_bit(0, (&sp->tasklet_status));
4724 * s2io_set_link - Set the LInk status
4725 * @data: long pointer to device private structue
4726 * Description: Sets the link status for the adapter
4729 static void s2io_set_link(unsigned long data)
4731 nic_t *nic = (nic_t *) data;
4732 struct net_device *dev = nic->dev;
4733 XENA_dev_config_t __iomem *bar0 = nic->bar0;
4737 if (test_and_set_bit(0, &(nic->link_state))) {
4738 /* The card is being reset, no point doing anything */
4742 subid = nic->pdev->subsystem_device;
4743 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
4745 * Allow a small delay for the NICs self initiated
4746 * cleanup to complete.
4751 val64 = readq(&bar0->adapter_status);
4752 if (verify_xena_quiescence(nic, val64, nic->device_enabled_once)) {
4753 if (LINK_IS_UP(val64)) {
4754 val64 = readq(&bar0->adapter_control);
4755 val64 |= ADAPTER_CNTL_EN;
4756 writeq(val64, &bar0->adapter_control);
4757 if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
4759 val64 = readq(&bar0->gpio_control);
4760 val64 |= GPIO_CTRL_GPIO_0;
4761 writeq(val64, &bar0->gpio_control);
4762 val64 = readq(&bar0->gpio_control);
4764 val64 |= ADAPTER_LED_ON;
4765 writeq(val64, &bar0->adapter_control);
4767 if (s2io_link_fault_indication(nic) ==
4768 MAC_RMAC_ERR_TIMER) {
4769 val64 = readq(&bar0->adapter_status);
4770 if (!LINK_IS_UP(val64)) {
4771 DBG_PRINT(ERR_DBG, "%s:", dev->name);
4772 DBG_PRINT(ERR_DBG, " Link down");
4773 DBG_PRINT(ERR_DBG, "after ");
4774 DBG_PRINT(ERR_DBG, "enabling ");
4775 DBG_PRINT(ERR_DBG, "device \n");
4778 if (nic->device_enabled_once == FALSE) {
4779 nic->device_enabled_once = TRUE;
4781 s2io_link(nic, LINK_UP);
4783 if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
4785 val64 = readq(&bar0->gpio_control);
4786 val64 &= ~GPIO_CTRL_GPIO_0;
4787 writeq(val64, &bar0->gpio_control);
4788 val64 = readq(&bar0->gpio_control);
4790 s2io_link(nic, LINK_DOWN);
4792 } else { /* NIC is not Quiescent. */
4793 DBG_PRINT(ERR_DBG, "%s: Error: ", dev->name);
4794 DBG_PRINT(ERR_DBG, "device is not Quiescent\n");
4795 netif_stop_queue(dev);
4797 clear_bit(0, &(nic->link_state));
4800 static void s2io_card_down(nic_t * sp)
4803 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4804 unsigned long flags;
4805 register u64 val64 = 0;
4807 del_timer_sync(&sp->alarm_timer);
4808 /* If s2io_set_link task is executing, wait till it completes. */
4809 while (test_and_set_bit(0, &(sp->link_state))) {
4812 atomic_set(&sp->card_state, CARD_DOWN);
4814 /* disable Tx and Rx traffic on the NIC */
4818 tasklet_kill(&sp->task);
4820 /* Check if the device is Quiescent and then Reset the NIC */
4822 val64 = readq(&bar0->adapter_status);
4823 if (verify_xena_quiescence(sp, val64, sp->device_enabled_once)) {
4831 "s2io_close:Device not Quiescent ");
4832 DBG_PRINT(ERR_DBG, "adaper status reads 0x%llx\n",
4833 (unsigned long long) val64);
4839 /* Waiting till all Interrupt handlers are complete */
4843 if (!atomic_read(&sp->isr_cnt))
4848 spin_lock_irqsave(&sp->tx_lock, flags);
4849 /* Free all Tx buffers */
4850 free_tx_buffers(sp);
4851 spin_unlock_irqrestore(&sp->tx_lock, flags);
4853 /* Free all Rx buffers */
4854 spin_lock_irqsave(&sp->rx_lock, flags);
4855 free_rx_buffers(sp);
4856 spin_unlock_irqrestore(&sp->rx_lock, flags);
4858 clear_bit(0, &(sp->link_state));
4861 static int s2io_card_up(nic_t * sp)
4864 mac_info_t *mac_control;
4865 struct config_param *config;
4866 struct net_device *dev = (struct net_device *) sp->dev;
4868 /* Initialize the H/W I/O registers */
4869 if (init_nic(sp) != 0) {
4870 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
4876 * Initializing the Rx buffers. For now we are considering only 1
4877 * Rx ring and initializing buffers into 30 Rx blocks
4879 mac_control = &sp->mac_control;
4880 config = &sp->config;
4882 for (i = 0; i < config->rx_ring_num; i++) {
4883 if ((ret = fill_rx_buffers(sp, i))) {
4884 DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
4887 free_rx_buffers(sp);
4890 DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
4891 atomic_read(&sp->rx_bufs_left[i]));
4894 /* Setting its receive mode */
4895 s2io_set_multicast(dev);
4897 /* Enable tasklet for the device */
4898 tasklet_init(&sp->task, s2io_tasklet, (unsigned long) dev);
4900 /* Enable Rx Traffic and interrupts on the NIC */
4901 if (start_nic(sp)) {
4902 DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
4903 tasklet_kill(&sp->task);
4905 free_irq(dev->irq, dev);
4906 free_rx_buffers(sp);
4910 S2IO_TIMER_CONF(sp->alarm_timer, s2io_alarm_handle, sp, (HZ/2));
4912 atomic_set(&sp->card_state, CARD_UP);
4917 * s2io_restart_nic - Resets the NIC.
4918 * @data : long pointer to the device private structure
4920 * This function is scheduled to be run by the s2io_tx_watchdog
4921 * function after 0.5 secs to reset the NIC. The idea is to reduce
4922 * the run time of the watch dog routine which is run holding a
4926 static void s2io_restart_nic(unsigned long data)
4928 struct net_device *dev = (struct net_device *) data;
4929 nic_t *sp = dev->priv;
4932 if (s2io_card_up(sp)) {
4933 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
4936 netif_wake_queue(dev);
4937 DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n",
4943 * s2io_tx_watchdog - Watchdog for transmit side.
4944 * @dev : Pointer to net device structure
4946 * This function is triggered if the Tx Queue is stopped
4947 * for a pre-defined amount of time when the Interface is still up.
4948 * If the Interface is jammed in such a situation, the hardware is
4949 * reset (by s2io_close) and restarted again (by s2io_open) to
4950 * overcome any problem that might have been caused in the hardware.
4955 static void s2io_tx_watchdog(struct net_device *dev)
4957 nic_t *sp = dev->priv;
4959 if (netif_carrier_ok(dev)) {
4960 schedule_work(&sp->rst_timer_task);
4965 * rx_osm_handler - To perform some OS related operations on SKB.
4966 * @sp: private member of the device structure,pointer to s2io_nic structure.
4967 * @skb : the socket buffer pointer.
4968 * @len : length of the packet
4969 * @cksum : FCS checksum of the frame.
4970 * @ring_no : the ring from which this RxD was extracted.
4972 * This function is called by the Tx interrupt serivce routine to perform
4973 * some OS related operations on the SKB before passing it to the upper
4974 * layers. It mainly checks if the checksum is OK, if so adds it to the
4975 * SKBs cksum variable, increments the Rx packet count and passes the SKB
4976 * to the upper layer. If the checksum is wrong, it increments the Rx
4977 * packet error count, frees the SKB and returns error.
4979 * SUCCESS on success and -1 on failure.
4981 static int rx_osm_handler(ring_info_t *ring_data, RxD_t * rxdp)
4983 nic_t *sp = ring_data->nic;
4984 struct net_device *dev = (struct net_device *) sp->dev;
4985 struct sk_buff *skb = (struct sk_buff *)
4986 ((unsigned long) rxdp->Host_Control);
4987 int ring_no = ring_data->ring_no;
4988 u16 l3_csum, l4_csum;
4989 #ifdef CONFIG_2BUFF_MODE
4990 int buf0_len = RXD_GET_BUFFER0_SIZE(rxdp->Control_2);
4991 int buf2_len = RXD_GET_BUFFER2_SIZE(rxdp->Control_2);
4992 int get_block = ring_data->rx_curr_get_info.block_index;
4993 int get_off = ring_data->rx_curr_get_info.offset;
4994 buffAdd_t *ba = &ring_data->ba[get_block][get_off];
4995 unsigned char *buff;
4997 u16 len = (u16) ((RXD_GET_BUFFER0_SIZE(rxdp->Control_2)) >> 48);;
5000 if (rxdp->Control_1 & RXD_T_CODE) {
5001 unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
5002 DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%llx\n",
5005 sp->stats.rx_crc_errors++;
5006 atomic_dec(&sp->rx_bufs_left[ring_no]);
5007 rxdp->Host_Control = 0;
5011 /* Updating statistics */
5012 rxdp->Host_Control = 0;
5014 sp->stats.rx_packets++;
5015 #ifndef CONFIG_2BUFF_MODE
5016 sp->stats.rx_bytes += len;
5018 sp->stats.rx_bytes += buf0_len + buf2_len;
5021 #ifndef CONFIG_2BUFF_MODE
5024 buff = skb_push(skb, buf0_len);
5025 memcpy(buff, ba->ba_0, buf0_len);
5026 skb_put(skb, buf2_len);
5029 if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) &&
5031 l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
5032 l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
5033 if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
5035 * NIC verifies if the Checksum of the received
5036 * frame is Ok or not and accordingly returns
5037 * a flag in the RxD.
5039 skb->ip_summed = CHECKSUM_UNNECESSARY;
5042 * Packet with erroneous checksum, let the
5043 * upper layers deal with it.
5045 skb->ip_summed = CHECKSUM_NONE;
5048 skb->ip_summed = CHECKSUM_NONE;
5051 skb->protocol = eth_type_trans(skb, dev);
5052 #ifdef CONFIG_S2IO_NAPI
5053 if (sp->vlgrp && RXD_GET_VLAN_TAG(rxdp->Control_2)) {
5054 /* Queueing the vlan frame to the upper layer */
5055 vlan_hwaccel_receive_skb(skb, sp->vlgrp,
5056 RXD_GET_VLAN_TAG(rxdp->Control_2));
5058 netif_receive_skb(skb);
5061 if (sp->vlgrp && RXD_GET_VLAN_TAG(rxdp->Control_2)) {
5062 /* Queueing the vlan frame to the upper layer */
5063 vlan_hwaccel_rx(skb, sp->vlgrp,
5064 RXD_GET_VLAN_TAG(rxdp->Control_2));
5069 dev->last_rx = jiffies;
5070 atomic_dec(&sp->rx_bufs_left[ring_no]);
5075 * s2io_link - stops/starts the Tx queue.
5076 * @sp : private member of the device structure, which is a pointer to the
5077 * s2io_nic structure.
5078 * @link : inidicates whether link is UP/DOWN.
5080 * This function stops/starts the Tx queue depending on whether the link
5081 * status of the NIC is is down or up. This is called by the Alarm
5082 * interrupt handler whenever a link change interrupt comes up.
5087 void s2io_link(nic_t * sp, int link)
5089 struct net_device *dev = (struct net_device *) sp->dev;
5091 if (link != sp->last_link_state) {
5092 if (link == LINK_DOWN) {
5093 DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
5094 netif_carrier_off(dev);
5096 DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
5097 netif_carrier_on(dev);
5100 sp->last_link_state = link;
5104 * get_xena_rev_id - to identify revision ID of xena.
5105 * @pdev : PCI Dev structure
5107 * Function to identify the Revision ID of xena.
5109 * returns the revision ID of the device.
5112 int get_xena_rev_id(struct pci_dev *pdev)
5116 ret = pci_read_config_byte(pdev, PCI_REVISION_ID, (u8 *) & id);
5121 * s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
5122 * @sp : private member of the device structure, which is a pointer to the
5123 * s2io_nic structure.
5125 * This function initializes a few of the PCI and PCI-X configuration registers
5126 * with recommended values.
5131 static void s2io_init_pci(nic_t * sp)
5133 u16 pci_cmd = 0, pcix_cmd = 0;
5135 /* Enable Data Parity Error Recovery in PCI-X command register. */
5136 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
5138 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
5140 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
5143 /* Set the PErr Response bit in PCI command register. */
5144 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
5145 pci_write_config_word(sp->pdev, PCI_COMMAND,
5146 (pci_cmd | PCI_COMMAND_PARITY));
5147 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
5149 /* Forcibly disabling relaxed ordering capability of the card. */
5151 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
5153 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
5157 MODULE_AUTHOR("Raghavendra Koushik <raghavendra.koushik@neterion.com>");
5158 MODULE_LICENSE("GPL");
5159 module_param(tx_fifo_num, int, 0);
5160 module_param(rx_ring_num, int, 0);
5161 module_param_array(tx_fifo_len, uint, NULL, 0);
5162 module_param_array(rx_ring_sz, uint, NULL, 0);
5163 module_param_array(rts_frm_len, uint, NULL, 0);
5164 module_param(use_continuous_tx_intrs, int, 1);
5165 module_param(rmac_pause_time, int, 0);
5166 module_param(mc_pause_threshold_q0q3, int, 0);
5167 module_param(mc_pause_threshold_q4q7, int, 0);
5168 module_param(shared_splits, int, 0);
5169 module_param(tmac_util_period, int, 0);
5170 module_param(rmac_util_period, int, 0);
5171 module_param(bimodal, bool, 0);
5172 #ifndef CONFIG_S2IO_NAPI
5173 module_param(indicate_max_pkts, int, 0);
5177 * s2io_init_nic - Initialization of the adapter .
5178 * @pdev : structure containing the PCI related information of the device.
5179 * @pre: List of PCI devices supported by the driver listed in s2io_tbl.
5181 * The function initializes an adapter identified by the pci_dec structure.
5182 * All OS related initialization including memory and device structure and
5183 * initlaization of the device private variable is done. Also the swapper
5184 * control register is initialized to enable read and write into the I/O
5185 * registers of the device.
5187 * returns 0 on success and negative on failure.
5190 static int __devinit
5191 s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
5194 struct net_device *dev;
5196 int dma_flag = FALSE;
5197 u32 mac_up, mac_down;
5198 u64 val64 = 0, tmp64 = 0;
5199 XENA_dev_config_t __iomem *bar0 = NULL;
5201 mac_info_t *mac_control;
5202 struct config_param *config;
5205 #ifdef CONFIG_S2IO_NAPI
5206 DBG_PRINT(ERR_DBG, "NAPI support has been enabled\n");
5209 if ((ret = pci_enable_device(pdev))) {
5211 "s2io_init_nic: pci_enable_device failed\n");
5215 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
5216 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 64bit DMA\n");
5218 if (pci_set_consistent_dma_mask
5219 (pdev, DMA_64BIT_MASK)) {
5221 "Unable to obtain 64bit DMA for \
5222 consistent allocations\n");
5223 pci_disable_device(pdev);
5226 } else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
5227 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 32bit DMA\n");
5229 pci_disable_device(pdev);
5233 if (pci_request_regions(pdev, s2io_driver_name)) {
5234 DBG_PRINT(ERR_DBG, "Request Regions failed\n"),
5235 pci_disable_device(pdev);
5239 dev = alloc_etherdev(sizeof(nic_t));
5241 DBG_PRINT(ERR_DBG, "Device allocation failed\n");
5242 pci_disable_device(pdev);
5243 pci_release_regions(pdev);
5247 pci_set_master(pdev);
5248 pci_set_drvdata(pdev, dev);
5249 SET_MODULE_OWNER(dev);
5250 SET_NETDEV_DEV(dev, &pdev->dev);
5252 /* Private member variable initialized to s2io NIC structure */
5254 memset(sp, 0, sizeof(nic_t));
5257 sp->high_dma_flag = dma_flag;
5258 sp->device_enabled_once = FALSE;
5260 if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) ||
5261 (pdev->device == PCI_DEVICE_ID_HERC_UNI))
5262 sp->device_type = XFRAME_II_DEVICE;
5264 sp->device_type = XFRAME_I_DEVICE;
5266 /* Initialize some PCI/PCI-X fields of the NIC. */
5270 * Setting the device configuration parameters.
5271 * Most of these parameters can be specified by the user during
5272 * module insertion as they are module loadable parameters. If
5273 * these parameters are not not specified during load time, they
5274 * are initialized with default values.
5276 mac_control = &sp->mac_control;
5277 config = &sp->config;
5279 /* Tx side parameters. */
5280 if (tx_fifo_len[0] == 0)
5281 tx_fifo_len[0] = DEFAULT_FIFO_LEN; /* Default value. */
5282 config->tx_fifo_num = tx_fifo_num;
5283 for (i = 0; i < MAX_TX_FIFOS; i++) {
5284 config->tx_cfg[i].fifo_len = tx_fifo_len[i];
5285 config->tx_cfg[i].fifo_priority = i;
5288 /* mapping the QoS priority to the configured fifos */
5289 for (i = 0; i < MAX_TX_FIFOS; i++)
5290 config->fifo_mapping[i] = fifo_map[config->tx_fifo_num][i];
5292 config->tx_intr_type = TXD_INT_TYPE_UTILZ;
5293 for (i = 0; i < config->tx_fifo_num; i++) {
5294 config->tx_cfg[i].f_no_snoop =
5295 (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
5296 if (config->tx_cfg[i].fifo_len < 65) {
5297 config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
5301 config->max_txds = MAX_SKB_FRAGS;
5303 /* Rx side parameters. */
5304 if (rx_ring_sz[0] == 0)
5305 rx_ring_sz[0] = SMALL_BLK_CNT; /* Default value. */
5306 config->rx_ring_num = rx_ring_num;
5307 for (i = 0; i < MAX_RX_RINGS; i++) {
5308 config->rx_cfg[i].num_rxd = rx_ring_sz[i] *
5309 (MAX_RXDS_PER_BLOCK + 1);
5310 config->rx_cfg[i].ring_priority = i;
5313 for (i = 0; i < rx_ring_num; i++) {
5314 config->rx_cfg[i].ring_org = RING_ORG_BUFF1;
5315 config->rx_cfg[i].f_no_snoop =
5316 (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
5319 /* Setting Mac Control parameters */
5320 mac_control->rmac_pause_time = rmac_pause_time;
5321 mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
5322 mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
5325 /* Initialize Ring buffer parameters. */
5326 for (i = 0; i < config->rx_ring_num; i++)
5327 atomic_set(&sp->rx_bufs_left[i], 0);
5329 /* Initialize the number of ISRs currently running */
5330 atomic_set(&sp->isr_cnt, 0);
5332 /* initialize the shared memory used by the NIC and the host */
5333 if (init_shared_mem(sp)) {
5334 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n",
5337 goto mem_alloc_failed;
5340 sp->bar0 = ioremap(pci_resource_start(pdev, 0),
5341 pci_resource_len(pdev, 0));
5343 DBG_PRINT(ERR_DBG, "%s: S2IO: cannot remap io mem1\n",
5346 goto bar0_remap_failed;
5349 sp->bar1 = ioremap(pci_resource_start(pdev, 2),
5350 pci_resource_len(pdev, 2));
5352 DBG_PRINT(ERR_DBG, "%s: S2IO: cannot remap io mem2\n",
5355 goto bar1_remap_failed;
5358 dev->irq = pdev->irq;
5359 dev->base_addr = (unsigned long) sp->bar0;
5361 /* Initializing the BAR1 address as the start of the FIFO pointer. */
5362 for (j = 0; j < MAX_TX_FIFOS; j++) {
5363 mac_control->tx_FIFO_start[j] = (TxFIFO_element_t __iomem *)
5364 (sp->bar1 + (j * 0x00020000));
5367 /* Driver entry points */
5368 dev->open = &s2io_open;
5369 dev->stop = &s2io_close;
5370 dev->hard_start_xmit = &s2io_xmit;
5371 dev->get_stats = &s2io_get_stats;
5372 dev->set_multicast_list = &s2io_set_multicast;
5373 dev->do_ioctl = &s2io_ioctl;
5374 dev->change_mtu = &s2io_change_mtu;
5375 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
5376 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
5377 dev->vlan_rx_register = s2io_vlan_rx_register;
5378 dev->vlan_rx_kill_vid = (void *)s2io_vlan_rx_kill_vid;
5381 * will use eth_mac_addr() for dev->set_mac_address
5382 * mac address will be set every time dev->open() is called
5384 #if defined(CONFIG_S2IO_NAPI)
5385 dev->poll = s2io_poll;
5389 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
5390 if (sp->high_dma_flag == TRUE)
5391 dev->features |= NETIF_F_HIGHDMA;
5393 dev->features |= NETIF_F_TSO;
5396 dev->tx_timeout = &s2io_tx_watchdog;
5397 dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
5398 INIT_WORK(&sp->rst_timer_task,
5399 (void (*)(void *)) s2io_restart_nic, dev);
5400 INIT_WORK(&sp->set_link_task,
5401 (void (*)(void *)) s2io_set_link, sp);
5403 if (!(sp->device_type & XFRAME_II_DEVICE)) {
5404 pci_save_state(sp->pdev);
5407 /* Setting swapper control on the NIC, for proper reset operation */
5408 if (s2io_set_swapper(sp)) {
5409 DBG_PRINT(ERR_DBG, "%s:swapper settings are wrong\n",
5412 goto set_swap_failed;
5415 /* Verify if the Herc works on the slot its placed into */
5416 if (sp->device_type & XFRAME_II_DEVICE) {
5417 mode = s2io_verify_pci_mode(sp);
5419 DBG_PRINT(ERR_DBG, "%s: ", __FUNCTION__);
5420 DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
5422 goto set_swap_failed;
5426 /* Not needed for Herc */
5427 if (sp->device_type & XFRAME_I_DEVICE) {
5429 * Fix for all "FFs" MAC address problems observed on
5432 fix_mac_address(sp);
5437 * MAC address initialization.
5438 * For now only one mac address will be read and used.
5441 val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5442 RMAC_ADDR_CMD_MEM_OFFSET(0 + MAC_MAC_ADDR_START_OFFSET);
5443 writeq(val64, &bar0->rmac_addr_cmd_mem);
5444 wait_for_cmd_complete(sp);
5446 tmp64 = readq(&bar0->rmac_addr_data0_mem);
5447 mac_down = (u32) tmp64;
5448 mac_up = (u32) (tmp64 >> 32);
5450 memset(sp->def_mac_addr[0].mac_addr, 0, sizeof(ETH_ALEN));
5452 sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
5453 sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
5454 sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
5455 sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
5456 sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
5457 sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
5459 /* Set the factory defined MAC address initially */
5460 dev->addr_len = ETH_ALEN;
5461 memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
5464 * Initialize the tasklet status and link state flags
5465 * and the card state parameter
5467 atomic_set(&(sp->card_state), 0);
5468 sp->tasklet_status = 0;
5471 /* Initialize spinlocks */
5472 spin_lock_init(&sp->tx_lock);
5473 #ifndef CONFIG_S2IO_NAPI
5474 spin_lock_init(&sp->put_lock);
5476 spin_lock_init(&sp->rx_lock);
5479 * SXE-002: Configure link and activity LED to init state
5482 subid = sp->pdev->subsystem_device;
5483 if ((subid & 0xFF) >= 0x07) {
5484 val64 = readq(&bar0->gpio_control);
5485 val64 |= 0x0000800000000000ULL;
5486 writeq(val64, &bar0->gpio_control);
5487 val64 = 0x0411040400000000ULL;
5488 writeq(val64, (void __iomem *) bar0 + 0x2700);
5489 val64 = readq(&bar0->gpio_control);
5492 sp->rx_csum = 1; /* Rx chksum verify enabled by default */
5494 if (register_netdev(dev)) {
5495 DBG_PRINT(ERR_DBG, "Device registration failed\n");
5497 goto register_failed;
5500 if (sp->device_type & XFRAME_II_DEVICE) {
5501 DBG_PRINT(ERR_DBG, "%s: Neterion Xframe II 10GbE adapter ",
5503 DBG_PRINT(ERR_DBG, "(rev %d), Driver %s\n",
5504 get_xena_rev_id(sp->pdev),
5505 s2io_driver_version);
5506 DBG_PRINT(ERR_DBG, "MAC ADDR: %02x:%02x:%02x:%02x:%02x:%02x\n",
5507 sp->def_mac_addr[0].mac_addr[0],
5508 sp->def_mac_addr[0].mac_addr[1],
5509 sp->def_mac_addr[0].mac_addr[2],
5510 sp->def_mac_addr[0].mac_addr[3],
5511 sp->def_mac_addr[0].mac_addr[4],
5512 sp->def_mac_addr[0].mac_addr[5]);
5513 mode = s2io_print_pci_mode(sp);
5515 DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode ");
5517 goto set_swap_failed;
5520 DBG_PRINT(ERR_DBG, "%s: Neterion Xframe I 10GbE adapter ",
5522 DBG_PRINT(ERR_DBG, "(rev %d), Driver %s\n",
5523 get_xena_rev_id(sp->pdev),
5524 s2io_driver_version);
5525 DBG_PRINT(ERR_DBG, "MAC ADDR: %02x:%02x:%02x:%02x:%02x:%02x\n",
5526 sp->def_mac_addr[0].mac_addr[0],
5527 sp->def_mac_addr[0].mac_addr[1],
5528 sp->def_mac_addr[0].mac_addr[2],
5529 sp->def_mac_addr[0].mac_addr[3],
5530 sp->def_mac_addr[0].mac_addr[4],
5531 sp->def_mac_addr[0].mac_addr[5]);
5534 /* Initialize device name */
5535 strcpy(sp->name, dev->name);
5536 if (sp->device_type & XFRAME_II_DEVICE)
5537 strcat(sp->name, ": Neterion Xframe II 10GbE adapter");
5539 strcat(sp->name, ": Neterion Xframe I 10GbE adapter");
5541 /* Initialize bimodal Interrupts */
5542 sp->config.bimodal = bimodal;
5543 if (!(sp->device_type & XFRAME_II_DEVICE) && bimodal) {
5544 sp->config.bimodal = 0;
5545 DBG_PRINT(ERR_DBG,"%s:Bimodal intr not supported by Xframe I\n",
5550 * Make Link state as off at this point, when the Link change
5551 * interrupt comes the state will be automatically changed to
5554 netif_carrier_off(dev);
5565 free_shared_mem(sp);
5566 pci_disable_device(pdev);
5567 pci_release_regions(pdev);
5568 pci_set_drvdata(pdev, NULL);
5575 * s2io_rem_nic - Free the PCI device
5576 * @pdev: structure containing the PCI related information of the device.
5577 * Description: This function is called by the Pci subsystem to release a
5578 * PCI device and free up all resource held up by the device. This could
5579 * be in response to a Hot plug event or when the driver is to be removed
5583 static void __devexit s2io_rem_nic(struct pci_dev *pdev)
5585 struct net_device *dev =
5586 (struct net_device *) pci_get_drvdata(pdev);
5590 DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
5595 unregister_netdev(dev);
5597 free_shared_mem(sp);
5600 pci_disable_device(pdev);
5601 pci_release_regions(pdev);
5602 pci_set_drvdata(pdev, NULL);
5607 * s2io_starter - Entry point for the driver
5608 * Description: This function is the entry point for the driver. It verifies
5609 * the module loadable parameters and initializes PCI configuration space.
5612 int __init s2io_starter(void)
5614 return pci_module_init(&s2io_driver);
5618 * s2io_closer - Cleanup routine for the driver
5619 * Description: This function is the cleanup routine for the driver. It unregist * ers the driver.
5622 void s2io_closer(void)
5624 pci_unregister_driver(&s2io_driver);
5625 DBG_PRINT(INIT_DBG, "cleanup done\n");
5628 module_init(s2io_starter);
5629 module_exit(s2io_closer);