]> Pileus Git - ~andy/linux/blob - drivers/net/s2io.c
[PATCH] S2io: Software fixes
[~andy/linux] / drivers / net / s2io.c
1 /************************************************************************
2  * s2io.c: A Linux PCI-X Ethernet driver for S2IO 10GbE Server NIC
3  * Copyright(c) 2002-2005 Neterion Inc.
4
5  * This software may be used and distributed according to the terms of
6  * the GNU General Public License (GPL), incorporated herein by reference.
7  * Drivers based on or derived from this code fall under the GPL and must
8  * retain the authorship, copyright and license notice.  This file is not
9  * a complete program and may only be used when the entire operating
10  * system is licensed under the GPL.
11  * See the file COPYING in this distribution for more information.
12  *
13  * Credits:
14  * Jeff Garzik          : For pointing out the improper error condition
15  *                        check in the s2io_xmit routine and also some
16  *                        issues in the Tx watch dog function. Also for
17  *                        patiently answering all those innumerable
18  *                        questions regaring the 2.6 porting issues.
19  * Stephen Hemminger    : Providing proper 2.6 porting mechanism for some
20  *                        macros available only in 2.6 Kernel.
21  * Francois Romieu      : For pointing out all code part that were
22  *                        deprecated and also styling related comments.
23  * Grant Grundler       : For helping me get rid of some Architecture
24  *                        dependent code.
25  * Christopher Hellwig  : Some more 2.6 specific issues in the driver.
26  *
27  * The module loadable parameters that are supported by the driver and a brief
28  * explaination of all the variables.
29  * rx_ring_num : This can be used to program the number of receive rings used
30  * in the driver.
31  * rx_ring_len: This defines the number of descriptors each ring can have. This
32  * is also an array of size 8.
33  * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
34  * tx_fifo_len: This too is an array of 8. Each element defines the number of
35  * Tx descriptors that can be associated with each corresponding FIFO.
36  ************************************************************************/
37
38 #include <linux/config.h>
39 #include <linux/module.h>
40 #include <linux/types.h>
41 #include <linux/errno.h>
42 #include <linux/ioport.h>
43 #include <linux/pci.h>
44 #include <linux/dma-mapping.h>
45 #include <linux/kernel.h>
46 #include <linux/netdevice.h>
47 #include <linux/etherdevice.h>
48 #include <linux/skbuff.h>
49 #include <linux/init.h>
50 #include <linux/delay.h>
51 #include <linux/stddef.h>
52 #include <linux/ioctl.h>
53 #include <linux/timex.h>
54 #include <linux/sched.h>
55 #include <linux/ethtool.h>
56 #include <linux/version.h>
57 #include <linux/workqueue.h>
58
59 #include <asm/system.h>
60 #include <asm/uaccess.h>
61 #include <asm/io.h>
62
63 /* local include */
64 #include "s2io.h"
65 #include "s2io-regs.h"
66
67 /* S2io Driver name & version. */
68 static char s2io_driver_name[] = "Neterion";
69 static char s2io_driver_version[] = "Version 1.7.7";
70
71 static inline int RXD_IS_UP2DT(RxD_t *rxdp)
72 {
73         int ret;
74
75         ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
76                 (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
77
78         return ret;
79 }
80
81 /*
82  * Cards with following subsystem_id have a link state indication
83  * problem, 600B, 600C, 600D, 640B, 640C and 640D.
84  * macro below identifies these cards given the subsystem_id.
85  */
86 #define CARDS_WITH_FAULTY_LINK_INDICATORS(subid) \
87                 (((subid >= 0x600B) && (subid <= 0x600D)) || \
88                  ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0
89
90 #define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
91                                       ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
92 #define TASKLET_IN_USE test_and_set_bit(0, (&sp->tasklet_status))
93 #define PANIC   1
94 #define LOW     2
95 static inline int rx_buffer_level(nic_t * sp, int rxb_size, int ring)
96 {
97         int level = 0;
98         mac_info_t *mac_control;
99
100         mac_control = &sp->mac_control;
101         if ((mac_control->rings[ring].pkt_cnt - rxb_size) > 16) {
102                 level = LOW;
103                 if ((mac_control->rings[ring].pkt_cnt - rxb_size) <
104                                 MAX_RXDS_PER_BLOCK) {
105                         level = PANIC;
106                 }
107         }
108
109         return level;
110 }
111
112 /* Ethtool related variables and Macros. */
113 static char s2io_gstrings[][ETH_GSTRING_LEN] = {
114         "Register test\t(offline)",
115         "Eeprom test\t(offline)",
116         "Link test\t(online)",
117         "RLDRAM test\t(offline)",
118         "BIST Test\t(offline)"
119 };
120
121 static char ethtool_stats_keys[][ETH_GSTRING_LEN] = {
122         {"tmac_frms"},
123         {"tmac_data_octets"},
124         {"tmac_drop_frms"},
125         {"tmac_mcst_frms"},
126         {"tmac_bcst_frms"},
127         {"tmac_pause_ctrl_frms"},
128         {"tmac_any_err_frms"},
129         {"tmac_vld_ip_octets"},
130         {"tmac_vld_ip"},
131         {"tmac_drop_ip"},
132         {"tmac_icmp"},
133         {"tmac_rst_tcp"},
134         {"tmac_tcp"},
135         {"tmac_udp"},
136         {"rmac_vld_frms"},
137         {"rmac_data_octets"},
138         {"rmac_fcs_err_frms"},
139         {"rmac_drop_frms"},
140         {"rmac_vld_mcst_frms"},
141         {"rmac_vld_bcst_frms"},
142         {"rmac_in_rng_len_err_frms"},
143         {"rmac_long_frms"},
144         {"rmac_pause_ctrl_frms"},
145         {"rmac_discarded_frms"},
146         {"rmac_usized_frms"},
147         {"rmac_osized_frms"},
148         {"rmac_frag_frms"},
149         {"rmac_jabber_frms"},
150         {"rmac_ip"},
151         {"rmac_ip_octets"},
152         {"rmac_hdr_err_ip"},
153         {"rmac_drop_ip"},
154         {"rmac_icmp"},
155         {"rmac_tcp"},
156         {"rmac_udp"},
157         {"rmac_err_drp_udp"},
158         {"rmac_pause_cnt"},
159         {"rmac_accepted_ip"},
160         {"rmac_err_tcp"},
161         {"\n DRIVER STATISTICS"},
162         {"single_bit_ecc_errs"},
163         {"double_bit_ecc_errs"},
164 };
165
166 #define S2IO_STAT_LEN sizeof(ethtool_stats_keys)/ ETH_GSTRING_LEN
167 #define S2IO_STAT_STRINGS_LEN S2IO_STAT_LEN * ETH_GSTRING_LEN
168
169 #define S2IO_TEST_LEN   sizeof(s2io_gstrings) / ETH_GSTRING_LEN
170 #define S2IO_STRINGS_LEN        S2IO_TEST_LEN * ETH_GSTRING_LEN
171
172 /*
173  * Constants to be programmed into the Xena's registers, to configure
174  * the XAUI.
175  */
176
177 #define SWITCH_SIGN     0xA5A5A5A5A5A5A5A5ULL
178 #define END_SIGN        0x0
179
180 static u64 default_mdio_cfg[] = {
181         /* Reset PMA PLL */
182         0xC001010000000000ULL, 0xC0010100000000E0ULL,
183         0xC0010100008000E4ULL,
184         /* Remove Reset from PMA PLL */
185         0xC001010000000000ULL, 0xC0010100000000E0ULL,
186         0xC0010100000000E4ULL,
187         END_SIGN
188 };
189
190 static u64 default_dtx_cfg[] = {
191         0x8000051500000000ULL, 0x80000515000000E0ULL,
192         0x80000515D93500E4ULL, 0x8001051500000000ULL,
193         0x80010515000000E0ULL, 0x80010515001E00E4ULL,
194         0x8002051500000000ULL, 0x80020515000000E0ULL,
195         0x80020515F21000E4ULL,
196         /* Set PADLOOPBACKN */
197         0x8002051500000000ULL, 0x80020515000000E0ULL,
198         0x80020515B20000E4ULL, 0x8003051500000000ULL,
199         0x80030515000000E0ULL, 0x80030515B20000E4ULL,
200         0x8004051500000000ULL, 0x80040515000000E0ULL,
201         0x80040515B20000E4ULL, 0x8005051500000000ULL,
202         0x80050515000000E0ULL, 0x80050515B20000E4ULL,
203         SWITCH_SIGN,
204         /* Remove PADLOOPBACKN */
205         0x8002051500000000ULL, 0x80020515000000E0ULL,
206         0x80020515F20000E4ULL, 0x8003051500000000ULL,
207         0x80030515000000E0ULL, 0x80030515F20000E4ULL,
208         0x8004051500000000ULL, 0x80040515000000E0ULL,
209         0x80040515F20000E4ULL, 0x8005051500000000ULL,
210         0x80050515000000E0ULL, 0x80050515F20000E4ULL,
211         END_SIGN
212 };
213
214 /*
215  * Constants for Fixing the MacAddress problem seen mostly on
216  * Alpha machines.
217  */
218 static u64 fix_mac[] = {
219         0x0060000000000000ULL, 0x0060600000000000ULL,
220         0x0040600000000000ULL, 0x0000600000000000ULL,
221         0x0020600000000000ULL, 0x0060600000000000ULL,
222         0x0020600000000000ULL, 0x0060600000000000ULL,
223         0x0020600000000000ULL, 0x0060600000000000ULL,
224         0x0020600000000000ULL, 0x0060600000000000ULL,
225         0x0020600000000000ULL, 0x0060600000000000ULL,
226         0x0020600000000000ULL, 0x0060600000000000ULL,
227         0x0020600000000000ULL, 0x0060600000000000ULL,
228         0x0020600000000000ULL, 0x0060600000000000ULL,
229         0x0020600000000000ULL, 0x0060600000000000ULL,
230         0x0020600000000000ULL, 0x0060600000000000ULL,
231         0x0020600000000000ULL, 0x0000600000000000ULL,
232         0x0040600000000000ULL, 0x0060600000000000ULL,
233         END_SIGN
234 };
235
236 /* Module Loadable parameters. */
237 static unsigned int tx_fifo_num = 1;
238 static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
239     {[0 ...(MAX_TX_FIFOS - 1)] = 0 };
240 static unsigned int rx_ring_num = 1;
241 static unsigned int rx_ring_sz[MAX_RX_RINGS] =
242     {[0 ...(MAX_RX_RINGS - 1)] = 0 };
243 static unsigned int rts_frm_len[MAX_RX_RINGS] =
244     {[0 ...(MAX_RX_RINGS - 1)] = 0 };
245 static unsigned int use_continuous_tx_intrs = 1;
246 static unsigned int rmac_pause_time = 65535;
247 static unsigned int mc_pause_threshold_q0q3 = 187;
248 static unsigned int mc_pause_threshold_q4q7 = 187;
249 static unsigned int shared_splits;
250 static unsigned int tmac_util_period = 5;
251 static unsigned int rmac_util_period = 5;
252 #ifndef CONFIG_S2IO_NAPI
253 static unsigned int indicate_max_pkts;
254 #endif
255
256 /*
257  * S2IO device table.
258  * This table lists all the devices that this driver supports.
259  */
260 static struct pci_device_id s2io_tbl[] __devinitdata = {
261         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
262          PCI_ANY_ID, PCI_ANY_ID},
263         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
264          PCI_ANY_ID, PCI_ANY_ID},
265         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
266          PCI_ANY_ID, PCI_ANY_ID},
267         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
268          PCI_ANY_ID, PCI_ANY_ID},
269         {0,}
270 };
271
272 MODULE_DEVICE_TABLE(pci, s2io_tbl);
273
274 static struct pci_driver s2io_driver = {
275       .name = "S2IO",
276       .id_table = s2io_tbl,
277       .probe = s2io_init_nic,
278       .remove = __devexit_p(s2io_rem_nic),
279 };
280
281 /* A simplifier macro used both by init and free shared_mem Fns(). */
282 #define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
283
284 /**
285  * init_shared_mem - Allocation and Initialization of Memory
286  * @nic: Device private variable.
287  * Description: The function allocates all the memory areas shared
288  * between the NIC and the driver. This includes Tx descriptors,
289  * Rx descriptors and the statistics block.
290  */
291
292 static int init_shared_mem(struct s2io_nic *nic)
293 {
294         u32 size;
295         void *tmp_v_addr, *tmp_v_addr_next;
296         dma_addr_t tmp_p_addr, tmp_p_addr_next;
297         RxD_block_t *pre_rxd_blk = NULL;
298         int i, j, blk_cnt, rx_sz, tx_sz;
299         int lst_size, lst_per_page;
300         struct net_device *dev = nic->dev;
301 #ifdef CONFIG_2BUFF_MODE
302         u64 tmp;
303         buffAdd_t *ba;
304 #endif
305
306         mac_info_t *mac_control;
307         struct config_param *config;
308
309         mac_control = &nic->mac_control;
310         config = &nic->config;
311
312
313         /* Allocation and initialization of TXDLs in FIOFs */
314         size = 0;
315         for (i = 0; i < config->tx_fifo_num; i++) {
316                 size += config->tx_cfg[i].fifo_len;
317         }
318         if (size > MAX_AVAILABLE_TXDS) {
319                 DBG_PRINT(ERR_DBG, "%s: Total number of Tx FIFOs ",
320                           dev->name);
321                 DBG_PRINT(ERR_DBG, "exceeds the maximum value ");
322                 DBG_PRINT(ERR_DBG, "that can be used\n");
323                 return FAILURE;
324         }
325
326         lst_size = (sizeof(TxD_t) * config->max_txds);
327         tx_sz = lst_size * size;
328         lst_per_page = PAGE_SIZE / lst_size;
329
330         for (i = 0; i < config->tx_fifo_num; i++) {
331                 int fifo_len = config->tx_cfg[i].fifo_len;
332                 int list_holder_size = fifo_len * sizeof(list_info_hold_t);
333                 mac_control->fifos[i].list_info = kmalloc(list_holder_size,
334                                                           GFP_KERNEL);
335                 if (!mac_control->fifos[i].list_info) {
336                         DBG_PRINT(ERR_DBG,
337                                   "Malloc failed for list_info\n");
338                         return -ENOMEM;
339                 }
340                 memset(mac_control->fifos[i].list_info, 0, list_holder_size);
341         }
342         for (i = 0; i < config->tx_fifo_num; i++) {
343                 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
344                                                 lst_per_page);
345                 mac_control->fifos[i].tx_curr_put_info.offset = 0;
346                 mac_control->fifos[i].tx_curr_put_info.fifo_len =
347                     config->tx_cfg[i].fifo_len - 1;
348                 mac_control->fifos[i].tx_curr_get_info.offset = 0;
349                 mac_control->fifos[i].tx_curr_get_info.fifo_len =
350                     config->tx_cfg[i].fifo_len - 1;
351                 mac_control->fifos[i].fifo_no = i;
352                 mac_control->fifos[i].nic = nic;
353                 mac_control->fifos[i].max_txds = MAX_SKB_FRAGS;
354
355                 for (j = 0; j < page_num; j++) {
356                         int k = 0;
357                         dma_addr_t tmp_p;
358                         void *tmp_v;
359                         tmp_v = pci_alloc_consistent(nic->pdev,
360                                                      PAGE_SIZE, &tmp_p);
361                         if (!tmp_v) {
362                                 DBG_PRINT(ERR_DBG,
363                                           "pci_alloc_consistent ");
364                                 DBG_PRINT(ERR_DBG, "failed for TxDL\n");
365                                 return -ENOMEM;
366                         }
367                         while (k < lst_per_page) {
368                                 int l = (j * lst_per_page) + k;
369                                 if (l == config->tx_cfg[i].fifo_len)
370                                         break;
371                                 mac_control->fifos[i].list_info[l].list_virt_addr =
372                                     tmp_v + (k * lst_size);
373                                 mac_control->fifos[i].list_info[l].list_phy_addr =
374                                     tmp_p + (k * lst_size);
375                                 k++;
376                         }
377                 }
378         }
379
380         /* Allocation and initialization of RXDs in Rings */
381         size = 0;
382         for (i = 0; i < config->rx_ring_num; i++) {
383                 if (config->rx_cfg[i].num_rxd % (MAX_RXDS_PER_BLOCK + 1)) {
384                         DBG_PRINT(ERR_DBG, "%s: RxD count of ", dev->name);
385                         DBG_PRINT(ERR_DBG, "Ring%d is not a multiple of ",
386                                   i);
387                         DBG_PRINT(ERR_DBG, "RxDs per Block");
388                         return FAILURE;
389                 }
390                 size += config->rx_cfg[i].num_rxd;
391                 mac_control->rings[i].block_count =
392                     config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
393                 mac_control->rings[i].pkt_cnt =
394                     config->rx_cfg[i].num_rxd - mac_control->rings[i].block_count;
395         }
396         size = (size * (sizeof(RxD_t)));
397         rx_sz = size;
398
399         for (i = 0; i < config->rx_ring_num; i++) {
400                 mac_control->rings[i].rx_curr_get_info.block_index = 0;
401                 mac_control->rings[i].rx_curr_get_info.offset = 0;
402                 mac_control->rings[i].rx_curr_get_info.ring_len =
403                     config->rx_cfg[i].num_rxd - 1;
404                 mac_control->rings[i].rx_curr_put_info.block_index = 0;
405                 mac_control->rings[i].rx_curr_put_info.offset = 0;
406                 mac_control->rings[i].rx_curr_put_info.ring_len =
407                     config->rx_cfg[i].num_rxd - 1;
408                 mac_control->rings[i].nic = nic;
409                 mac_control->rings[i].ring_no = i;
410
411                 blk_cnt =
412                     config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
413                 /*  Allocating all the Rx blocks */
414                 for (j = 0; j < blk_cnt; j++) {
415 #ifndef CONFIG_2BUFF_MODE
416                         size = (MAX_RXDS_PER_BLOCK + 1) * (sizeof(RxD_t));
417 #else
418                         size = SIZE_OF_BLOCK;
419 #endif
420                         tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
421                                                           &tmp_p_addr);
422                         if (tmp_v_addr == NULL) {
423                                 /*
424                                  * In case of failure, free_shared_mem()
425                                  * is called, which should free any
426                                  * memory that was alloced till the
427                                  * failure happened.
428                                  */
429                                 mac_control->rings[i].rx_blocks[j].block_virt_addr =
430                                     tmp_v_addr;
431                                 return -ENOMEM;
432                         }
433                         memset(tmp_v_addr, 0, size);
434                         mac_control->rings[i].rx_blocks[j].block_virt_addr =
435                                 tmp_v_addr;
436                         mac_control->rings[i].rx_blocks[j].block_dma_addr =
437                                 tmp_p_addr;
438                 }
439                 /* Interlinking all Rx Blocks */
440                 for (j = 0; j < blk_cnt; j++) {
441                         tmp_v_addr =
442                                 mac_control->rings[i].rx_blocks[j].block_virt_addr;
443                         tmp_v_addr_next =
444                                 mac_control->rings[i].rx_blocks[(j + 1) %
445                                               blk_cnt].block_virt_addr;
446                         tmp_p_addr =
447                                 mac_control->rings[i].rx_blocks[j].block_dma_addr;
448                         tmp_p_addr_next =
449                                 mac_control->rings[i].rx_blocks[(j + 1) %
450                                               blk_cnt].block_dma_addr;
451
452                         pre_rxd_blk = (RxD_block_t *) tmp_v_addr;
453                         pre_rxd_blk->reserved_1 = END_OF_BLOCK; /* last RxD
454                                                                  * marker.
455                                                                  */
456 #ifndef CONFIG_2BUFF_MODE
457                         pre_rxd_blk->reserved_2_pNext_RxD_block =
458                             (unsigned long) tmp_v_addr_next;
459 #endif
460                         pre_rxd_blk->pNext_RxD_Blk_physical =
461                             (u64) tmp_p_addr_next;
462                 }
463         }
464
465 #ifdef CONFIG_2BUFF_MODE
466         /*
467          * Allocation of Storages for buffer addresses in 2BUFF mode
468          * and the buffers as well.
469          */
470         for (i = 0; i < config->rx_ring_num; i++) {
471                 blk_cnt =
472                     config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
473                 mac_control->rings[i].ba = kmalloc((sizeof(buffAdd_t *) * blk_cnt),
474                                      GFP_KERNEL);
475                 if (!mac_control->rings[i].ba)
476                         return -ENOMEM;
477                 for (j = 0; j < blk_cnt; j++) {
478                         int k = 0;
479                         mac_control->rings[i].ba[j] = kmalloc((sizeof(buffAdd_t) *
480                                                  (MAX_RXDS_PER_BLOCK + 1)),
481                                                 GFP_KERNEL);
482                         if (!mac_control->rings[i].ba[j])
483                                 return -ENOMEM;
484                         while (k != MAX_RXDS_PER_BLOCK) {
485                                 ba = &mac_control->rings[i].ba[j][k];
486
487                                 ba->ba_0_org = (void *) kmalloc
488                                     (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL);
489                                 if (!ba->ba_0_org)
490                                         return -ENOMEM;
491                                 tmp = (u64) ba->ba_0_org;
492                                 tmp += ALIGN_SIZE;
493                                 tmp &= ~((u64) ALIGN_SIZE);
494                                 ba->ba_0 = (void *) tmp;
495
496                                 ba->ba_1_org = (void *) kmalloc
497                                     (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL);
498                                 if (!ba->ba_1_org)
499                                         return -ENOMEM;
500                                 tmp = (u64) ba->ba_1_org;
501                                 tmp += ALIGN_SIZE;
502                                 tmp &= ~((u64) ALIGN_SIZE);
503                                 ba->ba_1 = (void *) tmp;
504                                 k++;
505                         }
506                 }
507         }
508 #endif
509
510         /* Allocation and initialization of Statistics block */
511         size = sizeof(StatInfo_t);
512         mac_control->stats_mem = pci_alloc_consistent
513             (nic->pdev, size, &mac_control->stats_mem_phy);
514
515         if (!mac_control->stats_mem) {
516                 /*
517                  * In case of failure, free_shared_mem() is called, which
518                  * should free any memory that was alloced till the
519                  * failure happened.
520                  */
521                 return -ENOMEM;
522         }
523         mac_control->stats_mem_sz = size;
524
525         tmp_v_addr = mac_control->stats_mem;
526         mac_control->stats_info = (StatInfo_t *) tmp_v_addr;
527         memset(tmp_v_addr, 0, size);
528         DBG_PRINT(INIT_DBG, "%s:Ring Mem PHY: 0x%llx\n", dev->name,
529                   (unsigned long long) tmp_p_addr);
530
531         return SUCCESS;
532 }
533
534 /**
535  * free_shared_mem - Free the allocated Memory
536  * @nic:  Device private variable.
537  * Description: This function is to free all memory locations allocated by
538  * the init_shared_mem() function and return it to the kernel.
539  */
540
541 static void free_shared_mem(struct s2io_nic *nic)
542 {
543         int i, j, blk_cnt, size;
544         void *tmp_v_addr;
545         dma_addr_t tmp_p_addr;
546         mac_info_t *mac_control;
547         struct config_param *config;
548         int lst_size, lst_per_page;
549
550
551         if (!nic)
552                 return;
553
554         mac_control = &nic->mac_control;
555         config = &nic->config;
556
557         lst_size = (sizeof(TxD_t) * config->max_txds);
558         lst_per_page = PAGE_SIZE / lst_size;
559
560         for (i = 0; i < config->tx_fifo_num; i++) {
561                 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
562                                                 lst_per_page);
563                 for (j = 0; j < page_num; j++) {
564                         int mem_blks = (j * lst_per_page);
565                         if (!mac_control->fifos[i].list_info[mem_blks].
566                             list_virt_addr)
567                                 break;
568                         pci_free_consistent(nic->pdev, PAGE_SIZE,
569                                             mac_control->fifos[i].
570                                             list_info[mem_blks].
571                                             list_virt_addr,
572                                             mac_control->fifos[i].
573                                             list_info[mem_blks].
574                                             list_phy_addr);
575                 }
576                 kfree(mac_control->fifos[i].list_info);
577         }
578
579 #ifndef CONFIG_2BUFF_MODE
580         size = (MAX_RXDS_PER_BLOCK + 1) * (sizeof(RxD_t));
581 #else
582         size = SIZE_OF_BLOCK;
583 #endif
584         for (i = 0; i < config->rx_ring_num; i++) {
585                 blk_cnt = mac_control->rings[i].block_count;
586                 for (j = 0; j < blk_cnt; j++) {
587                         tmp_v_addr = mac_control->rings[i].rx_blocks[j].
588                                 block_virt_addr;
589                         tmp_p_addr = mac_control->rings[i].rx_blocks[j].
590                                 block_dma_addr;
591                         if (tmp_v_addr == NULL)
592                                 break;
593                         pci_free_consistent(nic->pdev, size,
594                                             tmp_v_addr, tmp_p_addr);
595                 }
596         }
597
598 #ifdef CONFIG_2BUFF_MODE
599         /* Freeing buffer storage addresses in 2BUFF mode. */
600         for (i = 0; i < config->rx_ring_num; i++) {
601                 blk_cnt =
602                     config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
603                 for (j = 0; j < blk_cnt; j++) {
604                         int k = 0;
605                         if (!mac_control->rings[i].ba[j])
606                                 continue;
607                         while (k != MAX_RXDS_PER_BLOCK) {
608                                 buffAdd_t *ba = &mac_control->rings[i].ba[j][k];
609                                 kfree(ba->ba_0_org);
610                                 kfree(ba->ba_1_org);
611                                 k++;
612                         }
613                         kfree(mac_control->rings[i].ba[j]);
614                 }
615                 if (mac_control->rings[i].ba)
616                         kfree(mac_control->rings[i].ba);
617         }
618 #endif
619
620         if (mac_control->stats_mem) {
621                 pci_free_consistent(nic->pdev,
622                                     mac_control->stats_mem_sz,
623                                     mac_control->stats_mem,
624                                     mac_control->stats_mem_phy);
625         }
626 }
627
628 /**
629  *  init_nic - Initialization of hardware
630  *  @nic: device peivate variable
631  *  Description: The function sequentially configures every block
632  *  of the H/W from their reset values.
633  *  Return Value:  SUCCESS on success and
634  *  '-1' on failure (endian settings incorrect).
635  */
636
637 static int init_nic(struct s2io_nic *nic)
638 {
639         XENA_dev_config_t __iomem *bar0 = nic->bar0;
640         struct net_device *dev = nic->dev;
641         register u64 val64 = 0;
642         void __iomem *add;
643         u32 time;
644         int i, j;
645         mac_info_t *mac_control;
646         struct config_param *config;
647         int mdio_cnt = 0, dtx_cnt = 0;
648         unsigned long long mem_share;
649         int mem_size;
650
651         mac_control = &nic->mac_control;
652         config = &nic->config;
653
654         /* to set the swapper controle on the card */
655         if(s2io_set_swapper(nic)) {
656                 DBG_PRINT(ERR_DBG,"ERROR: Setting Swapper failed\n");
657                 return -1;
658         }
659
660         /* Remove XGXS from reset state */
661         val64 = 0;
662         writeq(val64, &bar0->sw_reset);
663         msleep(500);
664         val64 = readq(&bar0->sw_reset);
665
666         /*  Enable Receiving broadcasts */
667         add = &bar0->mac_cfg;
668         val64 = readq(&bar0->mac_cfg);
669         val64 |= MAC_RMAC_BCAST_ENABLE;
670         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
671         writel((u32) val64, add);
672         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
673         writel((u32) (val64 >> 32), (add + 4));
674
675         /* Read registers in all blocks */
676         val64 = readq(&bar0->mac_int_mask);
677         val64 = readq(&bar0->mc_int_mask);
678         val64 = readq(&bar0->xgxs_int_mask);
679
680         /*  Set MTU */
681         val64 = dev->mtu;
682         writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
683
684         /*
685          * Configuring the XAUI Interface of Xena.
686          * ***************************************
687          * To Configure the Xena's XAUI, one has to write a series
688          * of 64 bit values into two registers in a particular
689          * sequence. Hence a macro 'SWITCH_SIGN' has been defined
690          * which will be defined in the array of configuration values
691          * (default_dtx_cfg & default_mdio_cfg) at appropriate places
692          * to switch writing from one regsiter to another. We continue
693          * writing these values until we encounter the 'END_SIGN' macro.
694          * For example, After making a series of 21 writes into
695          * dtx_control register the 'SWITCH_SIGN' appears and hence we
696          * start writing into mdio_control until we encounter END_SIGN.
697          */
698         while (1) {
699               dtx_cfg:
700                 while (default_dtx_cfg[dtx_cnt] != END_SIGN) {
701                         if (default_dtx_cfg[dtx_cnt] == SWITCH_SIGN) {
702                                 dtx_cnt++;
703                                 goto mdio_cfg;
704                         }
705                         SPECIAL_REG_WRITE(default_dtx_cfg[dtx_cnt],
706                                           &bar0->dtx_control, UF);
707                         val64 = readq(&bar0->dtx_control);
708                         dtx_cnt++;
709                 }
710               mdio_cfg:
711                 while (default_mdio_cfg[mdio_cnt] != END_SIGN) {
712                         if (default_mdio_cfg[mdio_cnt] == SWITCH_SIGN) {
713                                 mdio_cnt++;
714                                 goto dtx_cfg;
715                         }
716                         SPECIAL_REG_WRITE(default_mdio_cfg[mdio_cnt],
717                                           &bar0->mdio_control, UF);
718                         val64 = readq(&bar0->mdio_control);
719                         mdio_cnt++;
720                 }
721                 if ((default_dtx_cfg[dtx_cnt] == END_SIGN) &&
722                     (default_mdio_cfg[mdio_cnt] == END_SIGN)) {
723                         break;
724                 } else {
725                         goto dtx_cfg;
726                 }
727         }
728
729         /*  Tx DMA Initialization */
730         val64 = 0;
731         writeq(val64, &bar0->tx_fifo_partition_0);
732         writeq(val64, &bar0->tx_fifo_partition_1);
733         writeq(val64, &bar0->tx_fifo_partition_2);
734         writeq(val64, &bar0->tx_fifo_partition_3);
735
736
737         for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
738                 val64 |=
739                     vBIT(config->tx_cfg[i].fifo_len - 1, ((i * 32) + 19),
740                          13) | vBIT(config->tx_cfg[i].fifo_priority,
741                                     ((i * 32) + 5), 3);
742
743                 if (i == (config->tx_fifo_num - 1)) {
744                         if (i % 2 == 0)
745                                 i++;
746                 }
747
748                 switch (i) {
749                 case 1:
750                         writeq(val64, &bar0->tx_fifo_partition_0);
751                         val64 = 0;
752                         break;
753                 case 3:
754                         writeq(val64, &bar0->tx_fifo_partition_1);
755                         val64 = 0;
756                         break;
757                 case 5:
758                         writeq(val64, &bar0->tx_fifo_partition_2);
759                         val64 = 0;
760                         break;
761                 case 7:
762                         writeq(val64, &bar0->tx_fifo_partition_3);
763                         break;
764                 }
765         }
766
767         /* Enable Tx FIFO partition 0. */
768         val64 = readq(&bar0->tx_fifo_partition_0);
769         val64 |= BIT(0);        /* To enable the FIFO partition. */
770         writeq(val64, &bar0->tx_fifo_partition_0);
771
772         /*
773          * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
774          * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
775          */
776         if (get_xena_rev_id(nic->pdev) < 4)
777                 writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
778
779         val64 = readq(&bar0->tx_fifo_partition_0);
780         DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
781                   &bar0->tx_fifo_partition_0, (unsigned long long) val64);
782
783         /*
784          * Initialization of Tx_PA_CONFIG register to ignore packet
785          * integrity checking.
786          */
787         val64 = readq(&bar0->tx_pa_cfg);
788         val64 |= TX_PA_CFG_IGNORE_FRM_ERR | TX_PA_CFG_IGNORE_SNAP_OUI |
789             TX_PA_CFG_IGNORE_LLC_CTRL | TX_PA_CFG_IGNORE_L2_ERR;
790         writeq(val64, &bar0->tx_pa_cfg);
791
792         /* Rx DMA intialization. */
793         val64 = 0;
794         for (i = 0; i < config->rx_ring_num; i++) {
795                 val64 |=
796                     vBIT(config->rx_cfg[i].ring_priority, (5 + (i * 8)),
797                          3);
798         }
799         writeq(val64, &bar0->rx_queue_priority);
800
801         /*
802          * Allocating equal share of memory to all the
803          * configured Rings.
804          */
805         val64 = 0;
806         mem_size = 64;
807         for (i = 0; i < config->rx_ring_num; i++) {
808                 switch (i) {
809                 case 0:
810                         mem_share = (mem_size / config->rx_ring_num +
811                                      mem_size % config->rx_ring_num);
812                         val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
813                         continue;
814                 case 1:
815                         mem_share = (mem_size / config->rx_ring_num);
816                         val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
817                         continue;
818                 case 2:
819                         mem_share = (mem_size / config->rx_ring_num);
820                         val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
821                         continue;
822                 case 3:
823                         mem_share = (mem_size / config->rx_ring_num);
824                         val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
825                         continue;
826                 case 4:
827                         mem_share = (mem_size / config->rx_ring_num);
828                         val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
829                         continue;
830                 case 5:
831                         mem_share = (mem_size / config->rx_ring_num);
832                         val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
833                         continue;
834                 case 6:
835                         mem_share = (mem_size / config->rx_ring_num);
836                         val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
837                         continue;
838                 case 7:
839                         mem_share = (mem_size / config->rx_ring_num);
840                         val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
841                         continue;
842                 }
843         }
844         writeq(val64, &bar0->rx_queue_cfg);
845
846         /*
847          * Filling Tx round robin registers
848          * as per the number of FIFOs
849          */
850         switch (config->tx_fifo_num) {
851         case 1:
852                 val64 = 0x0000000000000000ULL;
853                 writeq(val64, &bar0->tx_w_round_robin_0);
854                 writeq(val64, &bar0->tx_w_round_robin_1);
855                 writeq(val64, &bar0->tx_w_round_robin_2);
856                 writeq(val64, &bar0->tx_w_round_robin_3);
857                 writeq(val64, &bar0->tx_w_round_robin_4);
858                 break;
859         case 2:
860                 val64 = 0x0000010000010000ULL;
861                 writeq(val64, &bar0->tx_w_round_robin_0);
862                 val64 = 0x0100000100000100ULL;
863                 writeq(val64, &bar0->tx_w_round_robin_1);
864                 val64 = 0x0001000001000001ULL;
865                 writeq(val64, &bar0->tx_w_round_robin_2);
866                 val64 = 0x0000010000010000ULL;
867                 writeq(val64, &bar0->tx_w_round_robin_3);
868                 val64 = 0x0100000000000000ULL;
869                 writeq(val64, &bar0->tx_w_round_robin_4);
870                 break;
871         case 3:
872                 val64 = 0x0001000102000001ULL;
873                 writeq(val64, &bar0->tx_w_round_robin_0);
874                 val64 = 0x0001020000010001ULL;
875                 writeq(val64, &bar0->tx_w_round_robin_1);
876                 val64 = 0x0200000100010200ULL;
877                 writeq(val64, &bar0->tx_w_round_robin_2);
878                 val64 = 0x0001000102000001ULL;
879                 writeq(val64, &bar0->tx_w_round_robin_3);
880                 val64 = 0x0001020000000000ULL;
881                 writeq(val64, &bar0->tx_w_round_robin_4);
882                 break;
883         case 4:
884                 val64 = 0x0001020300010200ULL;
885                 writeq(val64, &bar0->tx_w_round_robin_0);
886                 val64 = 0x0100000102030001ULL;
887                 writeq(val64, &bar0->tx_w_round_robin_1);
888                 val64 = 0x0200010000010203ULL;
889                 writeq(val64, &bar0->tx_w_round_robin_2);
890                 val64 = 0x0001020001000001ULL;
891                 writeq(val64, &bar0->tx_w_round_robin_3);
892                 val64 = 0x0203000100000000ULL;
893                 writeq(val64, &bar0->tx_w_round_robin_4);
894                 break;
895         case 5:
896                 val64 = 0x0001000203000102ULL;
897                 writeq(val64, &bar0->tx_w_round_robin_0);
898                 val64 = 0x0001020001030004ULL;
899                 writeq(val64, &bar0->tx_w_round_robin_1);
900                 val64 = 0x0001000203000102ULL;
901                 writeq(val64, &bar0->tx_w_round_robin_2);
902                 val64 = 0x0001020001030004ULL;
903                 writeq(val64, &bar0->tx_w_round_robin_3);
904                 val64 = 0x0001000000000000ULL;
905                 writeq(val64, &bar0->tx_w_round_robin_4);
906                 break;
907         case 6:
908                 val64 = 0x0001020304000102ULL;
909                 writeq(val64, &bar0->tx_w_round_robin_0);
910                 val64 = 0x0304050001020001ULL;
911                 writeq(val64, &bar0->tx_w_round_robin_1);
912                 val64 = 0x0203000100000102ULL;
913                 writeq(val64, &bar0->tx_w_round_robin_2);
914                 val64 = 0x0304000102030405ULL;
915                 writeq(val64, &bar0->tx_w_round_robin_3);
916                 val64 = 0x0001000200000000ULL;
917                 writeq(val64, &bar0->tx_w_round_robin_4);
918                 break;
919         case 7:
920                 val64 = 0x0001020001020300ULL;
921                 writeq(val64, &bar0->tx_w_round_robin_0);
922                 val64 = 0x0102030400010203ULL;
923                 writeq(val64, &bar0->tx_w_round_robin_1);
924                 val64 = 0x0405060001020001ULL;
925                 writeq(val64, &bar0->tx_w_round_robin_2);
926                 val64 = 0x0304050000010200ULL;
927                 writeq(val64, &bar0->tx_w_round_robin_3);
928                 val64 = 0x0102030000000000ULL;
929                 writeq(val64, &bar0->tx_w_round_robin_4);
930                 break;
931         case 8:
932                 val64 = 0x0001020300040105ULL;
933                 writeq(val64, &bar0->tx_w_round_robin_0);
934                 val64 = 0x0200030106000204ULL;
935                 writeq(val64, &bar0->tx_w_round_robin_1);
936                 val64 = 0x0103000502010007ULL;
937                 writeq(val64, &bar0->tx_w_round_robin_2);
938                 val64 = 0x0304010002060500ULL;
939                 writeq(val64, &bar0->tx_w_round_robin_3);
940                 val64 = 0x0103020400000000ULL;
941                 writeq(val64, &bar0->tx_w_round_robin_4);
942                 break;
943         }
944
945         /* Filling the Rx round robin registers as per the
946          * number of Rings and steering based on QoS.
947          */
948         switch (config->rx_ring_num) {
949         case 1:
950                 val64 = 0x8080808080808080ULL;
951                 writeq(val64, &bar0->rts_qos_steering);
952                 break;
953         case 2:
954                 val64 = 0x0000010000010000ULL;
955                 writeq(val64, &bar0->rx_w_round_robin_0);
956                 val64 = 0x0100000100000100ULL;
957                 writeq(val64, &bar0->rx_w_round_robin_1);
958                 val64 = 0x0001000001000001ULL;
959                 writeq(val64, &bar0->rx_w_round_robin_2);
960                 val64 = 0x0000010000010000ULL;
961                 writeq(val64, &bar0->rx_w_round_robin_3);
962                 val64 = 0x0100000000000000ULL;
963                 writeq(val64, &bar0->rx_w_round_robin_4);
964
965                 val64 = 0x8080808040404040ULL;
966                 writeq(val64, &bar0->rts_qos_steering);
967                 break;
968         case 3:
969                 val64 = 0x0001000102000001ULL;
970                 writeq(val64, &bar0->rx_w_round_robin_0);
971                 val64 = 0x0001020000010001ULL;
972                 writeq(val64, &bar0->rx_w_round_robin_1);
973                 val64 = 0x0200000100010200ULL;
974                 writeq(val64, &bar0->rx_w_round_robin_2);
975                 val64 = 0x0001000102000001ULL;
976                 writeq(val64, &bar0->rx_w_round_robin_3);
977                 val64 = 0x0001020000000000ULL;
978                 writeq(val64, &bar0->rx_w_round_robin_4);
979
980                 val64 = 0x8080804040402020ULL;
981                 writeq(val64, &bar0->rts_qos_steering);
982                 break;
983         case 4:
984                 val64 = 0x0001020300010200ULL;
985                 writeq(val64, &bar0->rx_w_round_robin_0);
986                 val64 = 0x0100000102030001ULL;
987                 writeq(val64, &bar0->rx_w_round_robin_1);
988                 val64 = 0x0200010000010203ULL;
989                 writeq(val64, &bar0->rx_w_round_robin_2);
990                 val64 = 0x0001020001000001ULL;  
991                 writeq(val64, &bar0->rx_w_round_robin_3);
992                 val64 = 0x0203000100000000ULL;
993                 writeq(val64, &bar0->rx_w_round_robin_4);
994
995                 val64 = 0x8080404020201010ULL;
996                 writeq(val64, &bar0->rts_qos_steering);
997                 break;
998         case 5:
999                 val64 = 0x0001000203000102ULL;
1000                 writeq(val64, &bar0->rx_w_round_robin_0);
1001                 val64 = 0x0001020001030004ULL;
1002                 writeq(val64, &bar0->rx_w_round_robin_1);
1003                 val64 = 0x0001000203000102ULL;
1004                 writeq(val64, &bar0->rx_w_round_robin_2);
1005                 val64 = 0x0001020001030004ULL;
1006                 writeq(val64, &bar0->rx_w_round_robin_3);
1007                 val64 = 0x0001000000000000ULL;
1008                 writeq(val64, &bar0->rx_w_round_robin_4);
1009
1010                 val64 = 0x8080404020201008ULL;
1011                 writeq(val64, &bar0->rts_qos_steering);
1012                 break;
1013         case 6:
1014                 val64 = 0x0001020304000102ULL;
1015                 writeq(val64, &bar0->rx_w_round_robin_0);
1016                 val64 = 0x0304050001020001ULL;
1017                 writeq(val64, &bar0->rx_w_round_robin_1);
1018                 val64 = 0x0203000100000102ULL;
1019                 writeq(val64, &bar0->rx_w_round_robin_2);
1020                 val64 = 0x0304000102030405ULL;
1021                 writeq(val64, &bar0->rx_w_round_robin_3);
1022                 val64 = 0x0001000200000000ULL;
1023                 writeq(val64, &bar0->rx_w_round_robin_4);
1024
1025                 val64 = 0x8080404020100804ULL;
1026                 writeq(val64, &bar0->rts_qos_steering);
1027                 break;
1028         case 7:
1029                 val64 = 0x0001020001020300ULL;
1030                 writeq(val64, &bar0->rx_w_round_robin_0);
1031                 val64 = 0x0102030400010203ULL;
1032                 writeq(val64, &bar0->rx_w_round_robin_1);
1033                 val64 = 0x0405060001020001ULL;
1034                 writeq(val64, &bar0->rx_w_round_robin_2);
1035                 val64 = 0x0304050000010200ULL;
1036                 writeq(val64, &bar0->rx_w_round_robin_3);
1037                 val64 = 0x0102030000000000ULL;
1038                 writeq(val64, &bar0->rx_w_round_robin_4);
1039
1040                 val64 = 0x8080402010080402ULL;
1041                 writeq(val64, &bar0->rts_qos_steering);
1042                 break;
1043         case 8:
1044                 val64 = 0x0001020300040105ULL;
1045                 writeq(val64, &bar0->rx_w_round_robin_0);
1046                 val64 = 0x0200030106000204ULL;
1047                 writeq(val64, &bar0->rx_w_round_robin_1);
1048                 val64 = 0x0103000502010007ULL;
1049                 writeq(val64, &bar0->rx_w_round_robin_2);
1050                 val64 = 0x0304010002060500ULL;
1051                 writeq(val64, &bar0->rx_w_round_robin_3);
1052                 val64 = 0x0103020400000000ULL;
1053                 writeq(val64, &bar0->rx_w_round_robin_4);
1054
1055                 val64 = 0x8040201008040201ULL;
1056                 writeq(val64, &bar0->rts_qos_steering);
1057                 break;
1058         }
1059
1060         /* UDP Fix */
1061         val64 = 0;
1062         for (i = 0; i < 8; i++)
1063                 writeq(val64, &bar0->rts_frm_len_n[i]);
1064
1065         /* Set the default rts frame length for the rings configured */
1066         val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1067         for (i = 0 ; i < config->rx_ring_num ; i++)
1068                 writeq(val64, &bar0->rts_frm_len_n[i]);
1069
1070         /* Set the frame length for the configured rings
1071          * desired by the user
1072          */
1073         for (i = 0; i < config->rx_ring_num; i++) {
1074                 /* If rts_frm_len[i] == 0 then it is assumed that user not
1075                  * specified frame length steering.
1076                  * If the user provides the frame length then program
1077                  * the rts_frm_len register for those values or else
1078                  * leave it as it is.
1079                  */
1080                 if (rts_frm_len[i] != 0) {
1081                         writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
1082                                 &bar0->rts_frm_len_n[i]);
1083                 }
1084         }
1085
1086         /* Program statistics memory */
1087         writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1088
1089         /*
1090          * Initializing the sampling rate for the device to calculate the
1091          * bandwidth utilization.
1092          */
1093         val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
1094             MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1095         writeq(val64, &bar0->mac_link_util);
1096
1097
1098         /*
1099          * Initializing the Transmit and Receive Traffic Interrupt
1100          * Scheme.
1101          */
1102         /*
1103          * TTI Initialization. Default Tx timer gets us about
1104          * 250 interrupts per sec. Continuous interrupts are enabled
1105          * by default.
1106          */
1107         val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078) |
1108             TTI_DATA1_MEM_TX_URNG_A(0xA) |
1109             TTI_DATA1_MEM_TX_URNG_B(0x10) |
1110             TTI_DATA1_MEM_TX_URNG_C(0x30) | TTI_DATA1_MEM_TX_TIMER_AC_EN;
1111         if (use_continuous_tx_intrs)
1112                 val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
1113         writeq(val64, &bar0->tti_data1_mem);
1114
1115         val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1116             TTI_DATA2_MEM_TX_UFC_B(0x20) |
1117             TTI_DATA2_MEM_TX_UFC_C(0x70) | TTI_DATA2_MEM_TX_UFC_D(0x80);
1118         writeq(val64, &bar0->tti_data2_mem);
1119
1120         val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1121         writeq(val64, &bar0->tti_command_mem);
1122
1123         /*
1124          * Once the operation completes, the Strobe bit of the command
1125          * register will be reset. We poll for this particular condition
1126          * We wait for a maximum of 500ms for the operation to complete,
1127          * if it's not complete by then we return error.
1128          */
1129         time = 0;
1130         while (TRUE) {
1131                 val64 = readq(&bar0->tti_command_mem);
1132                 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1133                         break;
1134                 }
1135                 if (time > 10) {
1136                         DBG_PRINT(ERR_DBG, "%s: TTI init Failed\n",
1137                                   dev->name);
1138                         return -1;
1139                 }
1140                 msleep(50);
1141                 time++;
1142         }
1143
1144         /* RTI Initialization */
1145         val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF) |
1146             RTI_DATA1_MEM_RX_URNG_A(0xA) |
1147             RTI_DATA1_MEM_RX_URNG_B(0x10) |
1148             RTI_DATA1_MEM_RX_URNG_C(0x30) | RTI_DATA1_MEM_RX_TIMER_AC_EN;
1149
1150         writeq(val64, &bar0->rti_data1_mem);
1151
1152         val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
1153             RTI_DATA2_MEM_RX_UFC_B(0x2) |
1154             RTI_DATA2_MEM_RX_UFC_C(0x40) | RTI_DATA2_MEM_RX_UFC_D(0x80);
1155         writeq(val64, &bar0->rti_data2_mem);
1156
1157         val64 = RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE_NEW_CMD;
1158         writeq(val64, &bar0->rti_command_mem);
1159
1160         /*
1161          * Once the operation completes, the Strobe bit of the
1162          * command register will be reset. We poll for this
1163          * particular condition. We wait for a maximum of 500ms
1164          * for the operation to complete, if it's not complete
1165          * by then we return error.
1166          */
1167         time = 0;
1168         while (TRUE) {
1169                 val64 = readq(&bar0->rti_command_mem);
1170                 if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD)) {
1171                         break;
1172                 }
1173                 if (time > 10) {
1174                         DBG_PRINT(ERR_DBG, "%s: RTI init Failed\n",
1175                                   dev->name);
1176                         return -1;
1177                 }
1178                 time++;
1179                 msleep(50);
1180         }
1181
1182         /*
1183          * Initializing proper values as Pause threshold into all
1184          * the 8 Queues on Rx side.
1185          */
1186         writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1187         writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1188
1189         /* Disable RMAC PAD STRIPPING */
1190         add = (void *) &bar0->mac_cfg;
1191         val64 = readq(&bar0->mac_cfg);
1192         val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1193         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1194         writel((u32) (val64), add);
1195         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1196         writel((u32) (val64 >> 32), (add + 4));
1197         val64 = readq(&bar0->mac_cfg);
1198
1199         /*
1200          * Set the time value to be inserted in the pause frame
1201          * generated by xena.
1202          */
1203         val64 = readq(&bar0->rmac_pause_cfg);
1204         val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1205         val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1206         writeq(val64, &bar0->rmac_pause_cfg);
1207
1208         /*
1209          * Set the Threshold Limit for Generating the pause frame
1210          * If the amount of data in any Queue exceeds ratio of
1211          * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1212          * pause frame is generated
1213          */
1214         val64 = 0;
1215         for (i = 0; i < 4; i++) {
1216                 val64 |=
1217                     (((u64) 0xFF00 | nic->mac_control.
1218                       mc_pause_threshold_q0q3)
1219                      << (i * 2 * 8));
1220         }
1221         writeq(val64, &bar0->mc_pause_thresh_q0q3);
1222
1223         val64 = 0;
1224         for (i = 0; i < 4; i++) {
1225                 val64 |=
1226                     (((u64) 0xFF00 | nic->mac_control.
1227                       mc_pause_threshold_q4q7)
1228                      << (i * 2 * 8));
1229         }
1230         writeq(val64, &bar0->mc_pause_thresh_q4q7);
1231
1232         /*
1233          * TxDMA will stop Read request if the number of read split has
1234          * exceeded the limit pointed by shared_splits
1235          */
1236         val64 = readq(&bar0->pic_control);
1237         val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1238         writeq(val64, &bar0->pic_control);
1239
1240         return SUCCESS;
1241 }
1242
1243 /**
1244  *  en_dis_able_nic_intrs - Enable or Disable the interrupts
1245  *  @nic: device private variable,
1246  *  @mask: A mask indicating which Intr block must be modified and,
1247  *  @flag: A flag indicating whether to enable or disable the Intrs.
1248  *  Description: This function will either disable or enable the interrupts
1249  *  depending on the flag argument. The mask argument can be used to
1250  *  enable/disable any Intr block.
1251  *  Return Value: NONE.
1252  */
1253
1254 static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1255 {
1256         XENA_dev_config_t __iomem *bar0 = nic->bar0;
1257         register u64 val64 = 0, temp64 = 0;
1258
1259         /*  Top level interrupt classification */
1260         /*  PIC Interrupts */
1261         if ((mask & (TX_PIC_INTR | RX_PIC_INTR))) {
1262                 /*  Enable PIC Intrs in the general intr mask register */
1263                 val64 = TXPIC_INT_M | PIC_RX_INT_M;
1264                 if (flag == ENABLE_INTRS) {
1265                         temp64 = readq(&bar0->general_int_mask);
1266                         temp64 &= ~((u64) val64);
1267                         writeq(temp64, &bar0->general_int_mask);
1268                         /*
1269                          * Disabled all PCIX, Flash, MDIO, IIC and GPIO
1270                          * interrupts for now.
1271                          * TODO
1272                          */
1273                         writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1274                         /*
1275                          * No MSI Support is available presently, so TTI and
1276                          * RTI interrupts are also disabled.
1277                          */
1278                 } else if (flag == DISABLE_INTRS) {
1279                         /*
1280                          * Disable PIC Intrs in the general
1281                          * intr mask register
1282                          */
1283                         writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1284                         temp64 = readq(&bar0->general_int_mask);
1285                         val64 |= temp64;
1286                         writeq(val64, &bar0->general_int_mask);
1287                 }
1288         }
1289
1290         /*  DMA Interrupts */
1291         /*  Enabling/Disabling Tx DMA interrupts */
1292         if (mask & TX_DMA_INTR) {
1293                 /* Enable TxDMA Intrs in the general intr mask register */
1294                 val64 = TXDMA_INT_M;
1295                 if (flag == ENABLE_INTRS) {
1296                         temp64 = readq(&bar0->general_int_mask);
1297                         temp64 &= ~((u64) val64);
1298                         writeq(temp64, &bar0->general_int_mask);
1299                         /*
1300                          * Keep all interrupts other than PFC interrupt
1301                          * and PCC interrupt disabled in DMA level.
1302                          */
1303                         val64 = DISABLE_ALL_INTRS & ~(TXDMA_PFC_INT_M |
1304                                                       TXDMA_PCC_INT_M);
1305                         writeq(val64, &bar0->txdma_int_mask);
1306                         /*
1307                          * Enable only the MISC error 1 interrupt in PFC block
1308                          */
1309                         val64 = DISABLE_ALL_INTRS & (~PFC_MISC_ERR_1);
1310                         writeq(val64, &bar0->pfc_err_mask);
1311                         /*
1312                          * Enable only the FB_ECC error interrupt in PCC block
1313                          */
1314                         val64 = DISABLE_ALL_INTRS & (~PCC_FB_ECC_ERR);
1315                         writeq(val64, &bar0->pcc_err_mask);
1316                 } else if (flag == DISABLE_INTRS) {
1317                         /*
1318                          * Disable TxDMA Intrs in the general intr mask
1319                          * register
1320                          */
1321                         writeq(DISABLE_ALL_INTRS, &bar0->txdma_int_mask);
1322                         writeq(DISABLE_ALL_INTRS, &bar0->pfc_err_mask);
1323                         temp64 = readq(&bar0->general_int_mask);
1324                         val64 |= temp64;
1325                         writeq(val64, &bar0->general_int_mask);
1326                 }
1327         }
1328
1329         /*  Enabling/Disabling Rx DMA interrupts */
1330         if (mask & RX_DMA_INTR) {
1331                 /*  Enable RxDMA Intrs in the general intr mask register */
1332                 val64 = RXDMA_INT_M;
1333                 if (flag == ENABLE_INTRS) {
1334                         temp64 = readq(&bar0->general_int_mask);
1335                         temp64 &= ~((u64) val64);
1336                         writeq(temp64, &bar0->general_int_mask);
1337                         /*
1338                          * All RxDMA block interrupts are disabled for now
1339                          * TODO
1340                          */
1341                         writeq(DISABLE_ALL_INTRS, &bar0->rxdma_int_mask);
1342                 } else if (flag == DISABLE_INTRS) {
1343                         /*
1344                          * Disable RxDMA Intrs in the general intr mask
1345                          * register
1346                          */
1347                         writeq(DISABLE_ALL_INTRS, &bar0->rxdma_int_mask);
1348                         temp64 = readq(&bar0->general_int_mask);
1349                         val64 |= temp64;
1350                         writeq(val64, &bar0->general_int_mask);
1351                 }
1352         }
1353
1354         /*  MAC Interrupts */
1355         /*  Enabling/Disabling MAC interrupts */
1356         if (mask & (TX_MAC_INTR | RX_MAC_INTR)) {
1357                 val64 = TXMAC_INT_M | RXMAC_INT_M;
1358                 if (flag == ENABLE_INTRS) {
1359                         temp64 = readq(&bar0->general_int_mask);
1360                         temp64 &= ~((u64) val64);
1361                         writeq(temp64, &bar0->general_int_mask);
1362                         /*
1363                          * All MAC block error interrupts are disabled for now
1364                          * except the link status change interrupt.
1365                          * TODO
1366                          */
1367                         val64 = MAC_INT_STATUS_RMAC_INT;
1368                         temp64 = readq(&bar0->mac_int_mask);
1369                         temp64 &= ~((u64) val64);
1370                         writeq(temp64, &bar0->mac_int_mask);
1371
1372                         val64 = readq(&bar0->mac_rmac_err_mask);
1373                         val64 &= ~((u64) RMAC_LINK_STATE_CHANGE_INT);
1374                         writeq(val64, &bar0->mac_rmac_err_mask);
1375                 } else if (flag == DISABLE_INTRS) {
1376                         /*
1377                          * Disable MAC Intrs in the general intr mask register
1378                          */
1379                         writeq(DISABLE_ALL_INTRS, &bar0->mac_int_mask);
1380                         writeq(DISABLE_ALL_INTRS,
1381                                &bar0->mac_rmac_err_mask);
1382
1383                         temp64 = readq(&bar0->general_int_mask);
1384                         val64 |= temp64;
1385                         writeq(val64, &bar0->general_int_mask);
1386                 }
1387         }
1388
1389         /*  XGXS Interrupts */
1390         if (mask & (TX_XGXS_INTR | RX_XGXS_INTR)) {
1391                 val64 = TXXGXS_INT_M | RXXGXS_INT_M;
1392                 if (flag == ENABLE_INTRS) {
1393                         temp64 = readq(&bar0->general_int_mask);
1394                         temp64 &= ~((u64) val64);
1395                         writeq(temp64, &bar0->general_int_mask);
1396                         /*
1397                          * All XGXS block error interrupts are disabled for now
1398                          * TODO
1399                          */
1400                         writeq(DISABLE_ALL_INTRS, &bar0->xgxs_int_mask);
1401                 } else if (flag == DISABLE_INTRS) {
1402                         /*
1403                          * Disable MC Intrs in the general intr mask register
1404                          */
1405                         writeq(DISABLE_ALL_INTRS, &bar0->xgxs_int_mask);
1406                         temp64 = readq(&bar0->general_int_mask);
1407                         val64 |= temp64;
1408                         writeq(val64, &bar0->general_int_mask);
1409                 }
1410         }
1411
1412         /*  Memory Controller(MC) interrupts */
1413         if (mask & MC_INTR) {
1414                 val64 = MC_INT_M;
1415                 if (flag == ENABLE_INTRS) {
1416                         temp64 = readq(&bar0->general_int_mask);
1417                         temp64 &= ~((u64) val64);
1418                         writeq(temp64, &bar0->general_int_mask);
1419                         /*
1420                          * Enable all MC Intrs.
1421                          */
1422                         writeq(0x0, &bar0->mc_int_mask);
1423                         writeq(0x0, &bar0->mc_err_mask);
1424                 } else if (flag == DISABLE_INTRS) {
1425                         /*
1426                          * Disable MC Intrs in the general intr mask register
1427                          */
1428                         writeq(DISABLE_ALL_INTRS, &bar0->mc_int_mask);
1429                         temp64 = readq(&bar0->general_int_mask);
1430                         val64 |= temp64;
1431                         writeq(val64, &bar0->general_int_mask);
1432                 }
1433         }
1434
1435
1436         /*  Tx traffic interrupts */
1437         if (mask & TX_TRAFFIC_INTR) {
1438                 val64 = TXTRAFFIC_INT_M;
1439                 if (flag == ENABLE_INTRS) {
1440                         temp64 = readq(&bar0->general_int_mask);
1441                         temp64 &= ~((u64) val64);
1442                         writeq(temp64, &bar0->general_int_mask);
1443                         /*
1444                          * Enable all the Tx side interrupts
1445                          * writing 0 Enables all 64 TX interrupt levels
1446                          */
1447                         writeq(0x0, &bar0->tx_traffic_mask);
1448                 } else if (flag == DISABLE_INTRS) {
1449                         /*
1450                          * Disable Tx Traffic Intrs in the general intr mask
1451                          * register.
1452                          */
1453                         writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
1454                         temp64 = readq(&bar0->general_int_mask);
1455                         val64 |= temp64;
1456                         writeq(val64, &bar0->general_int_mask);
1457                 }
1458         }
1459
1460         /*  Rx traffic interrupts */
1461         if (mask & RX_TRAFFIC_INTR) {
1462                 val64 = RXTRAFFIC_INT_M;
1463                 if (flag == ENABLE_INTRS) {
1464                         temp64 = readq(&bar0->general_int_mask);
1465                         temp64 &= ~((u64) val64);
1466                         writeq(temp64, &bar0->general_int_mask);
1467                         /* writing 0 Enables all 8 RX interrupt levels */
1468                         writeq(0x0, &bar0->rx_traffic_mask);
1469                 } else if (flag == DISABLE_INTRS) {
1470                         /*
1471                          * Disable Rx Traffic Intrs in the general intr mask
1472                          * register.
1473                          */
1474                         writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
1475                         temp64 = readq(&bar0->general_int_mask);
1476                         val64 |= temp64;
1477                         writeq(val64, &bar0->general_int_mask);
1478                 }
1479         }
1480 }
1481
1482 static int check_prc_pcc_state(u64 val64, int flag, int rev_id)
1483 {
1484         int ret = 0;
1485
1486         if (flag == FALSE) {
1487                 if (rev_id >= 4) {
1488                         if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) &&
1489                             ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1490                              ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
1491                                 ret = 1;
1492                         }
1493                 } else {
1494                         if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) &&
1495                             ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1496                              ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
1497                                 ret = 1;
1498                         }
1499                 }
1500         } else {
1501                 if (rev_id >= 4) {
1502                         if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
1503                              ADAPTER_STATUS_RMAC_PCC_IDLE) &&
1504                             (!(val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ||
1505                              ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1506                               ADAPTER_STATUS_RC_PRC_QUIESCENT))) {
1507                                 ret = 1;
1508                         }
1509                 } else {
1510                         if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
1511                              ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) &&
1512                             (!(val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ||
1513                              ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1514                               ADAPTER_STATUS_RC_PRC_QUIESCENT))) {
1515                                 ret = 1;
1516                         }
1517                 }
1518         }
1519
1520         return ret;
1521 }
1522 /**
1523  *  verify_xena_quiescence - Checks whether the H/W is ready
1524  *  @val64 :  Value read from adapter status register.
1525  *  @flag : indicates if the adapter enable bit was ever written once
1526  *  before.
1527  *  Description: Returns whether the H/W is ready to go or not. Depending
1528  *  on whether adapter enable bit was written or not the comparison
1529  *  differs and the calling function passes the input argument flag to
1530  *  indicate this.
1531  *  Return: 1 If xena is quiescence
1532  *          0 If Xena is not quiescence
1533  */
1534
1535 static int verify_xena_quiescence(nic_t *sp, u64 val64, int flag)
1536 {
1537         int ret = 0;
1538         u64 tmp64 = ~((u64) val64);
1539         int rev_id = get_xena_rev_id(sp->pdev);
1540
1541         if (!
1542             (tmp64 &
1543              (ADAPTER_STATUS_TDMA_READY | ADAPTER_STATUS_RDMA_READY |
1544               ADAPTER_STATUS_PFC_READY | ADAPTER_STATUS_TMAC_BUF_EMPTY |
1545               ADAPTER_STATUS_PIC_QUIESCENT | ADAPTER_STATUS_MC_DRAM_READY |
1546               ADAPTER_STATUS_MC_QUEUES_READY | ADAPTER_STATUS_M_PLL_LOCK |
1547               ADAPTER_STATUS_P_PLL_LOCK))) {
1548                 ret = check_prc_pcc_state(val64, flag, rev_id);
1549         }
1550
1551         return ret;
1552 }
1553
1554 /**
1555  * fix_mac_address -  Fix for Mac addr problem on Alpha platforms
1556  * @sp: Pointer to device specifc structure
1557  * Description :
1558  * New procedure to clear mac address reading  problems on Alpha platforms
1559  *
1560  */
1561
1562 void fix_mac_address(nic_t * sp)
1563 {
1564         XENA_dev_config_t __iomem *bar0 = sp->bar0;
1565         u64 val64;
1566         int i = 0;
1567
1568         while (fix_mac[i] != END_SIGN) {
1569                 writeq(fix_mac[i++], &bar0->gpio_control);
1570                 udelay(10);
1571                 val64 = readq(&bar0->gpio_control);
1572         }
1573 }
1574
1575 /**
1576  *  start_nic - Turns the device on
1577  *  @nic : device private variable.
1578  *  Description:
1579  *  This function actually turns the device on. Before this  function is
1580  *  called,all Registers are configured from their reset states
1581  *  and shared memory is allocated but the NIC is still quiescent. On
1582  *  calling this function, the device interrupts are cleared and the NIC is
1583  *  literally switched on by writing into the adapter control register.
1584  *  Return Value:
1585  *  SUCCESS on success and -1 on failure.
1586  */
1587
1588 static int start_nic(struct s2io_nic *nic)
1589 {
1590         XENA_dev_config_t __iomem *bar0 = nic->bar0;
1591         struct net_device *dev = nic->dev;
1592         register u64 val64 = 0;
1593         u16 interruptible;
1594         u16 subid, i;
1595         mac_info_t *mac_control;
1596         struct config_param *config;
1597
1598         mac_control = &nic->mac_control;
1599         config = &nic->config;
1600
1601         /*  PRC Initialization and configuration */
1602         for (i = 0; i < config->rx_ring_num; i++) {
1603                 writeq((u64) mac_control->rings[i].rx_blocks[0].block_dma_addr,
1604                        &bar0->prc_rxd0_n[i]);
1605
1606                 val64 = readq(&bar0->prc_ctrl_n[i]);
1607 #ifndef CONFIG_2BUFF_MODE
1608                 val64 |= PRC_CTRL_RC_ENABLED;
1609 #else
1610                 val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
1611 #endif
1612                 writeq(val64, &bar0->prc_ctrl_n[i]);
1613         }
1614
1615 #ifdef CONFIG_2BUFF_MODE
1616         /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
1617         val64 = readq(&bar0->rx_pa_cfg);
1618         val64 |= RX_PA_CFG_IGNORE_L2_ERR;
1619         writeq(val64, &bar0->rx_pa_cfg);
1620 #endif
1621
1622         /*
1623          * Enabling MC-RLDRAM. After enabling the device, we timeout
1624          * for around 100ms, which is approximately the time required
1625          * for the device to be ready for operation.
1626          */
1627         val64 = readq(&bar0->mc_rldram_mrs);
1628         val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
1629         SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
1630         val64 = readq(&bar0->mc_rldram_mrs);
1631
1632         msleep(100);    /* Delay by around 100 ms. */
1633
1634         /* Enabling ECC Protection. */
1635         val64 = readq(&bar0->adapter_control);
1636         val64 &= ~ADAPTER_ECC_EN;
1637         writeq(val64, &bar0->adapter_control);
1638
1639         /*
1640          * Clearing any possible Link state change interrupts that
1641          * could have popped up just before Enabling the card.
1642          */
1643         val64 = readq(&bar0->mac_rmac_err_reg);
1644         if (val64)
1645                 writeq(val64, &bar0->mac_rmac_err_reg);
1646
1647         /*
1648          * Verify if the device is ready to be enabled, if so enable
1649          * it.
1650          */
1651         val64 = readq(&bar0->adapter_status);
1652         if (!verify_xena_quiescence(nic, val64, nic->device_enabled_once)) {
1653                 DBG_PRINT(ERR_DBG, "%s: device is not ready, ", dev->name);
1654                 DBG_PRINT(ERR_DBG, "Adapter status reads: 0x%llx\n",
1655                           (unsigned long long) val64);
1656                 return FAILURE;
1657         }
1658
1659         /*  Enable select interrupts */
1660         interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR | TX_MAC_INTR |
1661             RX_MAC_INTR | MC_INTR;
1662         en_dis_able_nic_intrs(nic, interruptible, ENABLE_INTRS);
1663
1664         /*
1665          * With some switches, link might be already up at this point.
1666          * Because of this weird behavior, when we enable laser,
1667          * we may not get link. We need to handle this. We cannot
1668          * figure out which switch is misbehaving. So we are forced to
1669          * make a global change.
1670          */
1671
1672         /* Enabling Laser. */
1673         val64 = readq(&bar0->adapter_control);
1674         val64 |= ADAPTER_EOI_TX_ON;
1675         writeq(val64, &bar0->adapter_control);
1676
1677         /* SXE-002: Initialize link and activity LED */
1678         subid = nic->pdev->subsystem_device;
1679         if ((subid & 0xFF) >= 0x07) {
1680                 val64 = readq(&bar0->gpio_control);
1681                 val64 |= 0x0000800000000000ULL;
1682                 writeq(val64, &bar0->gpio_control);
1683                 val64 = 0x0411040400000000ULL;
1684                 writeq(val64, (void __iomem *) ((u8 *) bar0 + 0x2700));
1685         }
1686
1687         /*
1688          * Don't see link state interrupts on certain switches, so
1689          * directly scheduling a link state task from here.
1690          */
1691         schedule_work(&nic->set_link_task);
1692
1693         return SUCCESS;
1694 }
1695
1696 /**
1697  *  free_tx_buffers - Free all queued Tx buffers
1698  *  @nic : device private variable.
1699  *  Description:
1700  *  Free all queued Tx buffers.
1701  *  Return Value: void
1702 */
1703
1704 static void free_tx_buffers(struct s2io_nic *nic)
1705 {
1706         struct net_device *dev = nic->dev;
1707         struct sk_buff *skb;
1708         TxD_t *txdp;
1709         int i, j;
1710         mac_info_t *mac_control;
1711         struct config_param *config;
1712         int cnt = 0;
1713
1714         mac_control = &nic->mac_control;
1715         config = &nic->config;
1716
1717         for (i = 0; i < config->tx_fifo_num; i++) {
1718                 for (j = 0; j < config->tx_cfg[i].fifo_len - 1; j++) {
1719                         txdp = (TxD_t *) mac_control->fifos[i].list_info[j].
1720                             list_virt_addr;
1721                         skb =
1722                             (struct sk_buff *) ((unsigned long) txdp->
1723                                                 Host_Control);
1724                         if (skb == NULL) {
1725                                 memset(txdp, 0, sizeof(TxD_t));
1726                                 continue;
1727                         }
1728                         dev_kfree_skb(skb);
1729                         memset(txdp, 0, sizeof(TxD_t));
1730                         cnt++;
1731                 }
1732                 DBG_PRINT(INTR_DBG,
1733                           "%s:forcibly freeing %d skbs on FIFO%d\n",
1734                           dev->name, cnt, i);
1735                 mac_control->fifos[i].tx_curr_get_info.offset = 0;
1736                 mac_control->fifos[i].tx_curr_put_info.offset = 0;
1737         }
1738 }
1739
1740 /**
1741  *   stop_nic -  To stop the nic
1742  *   @nic ; device private variable.
1743  *   Description:
1744  *   This function does exactly the opposite of what the start_nic()
1745  *   function does. This function is called to stop the device.
1746  *   Return Value:
1747  *   void.
1748  */
1749
1750 static void stop_nic(struct s2io_nic *nic)
1751 {
1752         XENA_dev_config_t __iomem *bar0 = nic->bar0;
1753         register u64 val64 = 0;
1754         u16 interruptible, i;
1755         mac_info_t *mac_control;
1756         struct config_param *config;
1757
1758         mac_control = &nic->mac_control;
1759         config = &nic->config;
1760
1761         /*  Disable all interrupts */
1762         interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR | TX_MAC_INTR |
1763             RX_MAC_INTR | MC_INTR;
1764         en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
1765
1766         /*  Disable PRCs */
1767         for (i = 0; i < config->rx_ring_num; i++) {
1768                 val64 = readq(&bar0->prc_ctrl_n[i]);
1769                 val64 &= ~((u64) PRC_CTRL_RC_ENABLED);
1770                 writeq(val64, &bar0->prc_ctrl_n[i]);
1771         }
1772 }
1773
1774 /**
1775  *  fill_rx_buffers - Allocates the Rx side skbs
1776  *  @nic:  device private variable
1777  *  @ring_no: ring number
1778  *  Description:
1779  *  The function allocates Rx side skbs and puts the physical
1780  *  address of these buffers into the RxD buffer pointers, so that the NIC
1781  *  can DMA the received frame into these locations.
1782  *  The NIC supports 3 receive modes, viz
1783  *  1. single buffer,
1784  *  2. three buffer and
1785  *  3. Five buffer modes.
1786  *  Each mode defines how many fragments the received frame will be split
1787  *  up into by the NIC. The frame is split into L3 header, L4 Header,
1788  *  L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
1789  *  is split into 3 fragments. As of now only single buffer mode is
1790  *  supported.
1791  *   Return Value:
1792  *  SUCCESS on success or an appropriate -ve value on failure.
1793  */
1794
1795 int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
1796 {
1797         struct net_device *dev = nic->dev;
1798         struct sk_buff *skb;
1799         RxD_t *rxdp;
1800         int off, off1, size, block_no, block_no1;
1801         int offset, offset1;
1802         u32 alloc_tab = 0;
1803         u32 alloc_cnt;
1804         mac_info_t *mac_control;
1805         struct config_param *config;
1806 #ifdef CONFIG_2BUFF_MODE
1807         RxD_t *rxdpnext;
1808         int nextblk;
1809         u64 tmp;
1810         buffAdd_t *ba;
1811         dma_addr_t rxdpphys;
1812 #endif
1813 #ifndef CONFIG_S2IO_NAPI
1814         unsigned long flags;
1815 #endif
1816
1817         mac_control = &nic->mac_control;
1818         config = &nic->config;
1819         alloc_cnt = mac_control->rings[ring_no].pkt_cnt -
1820             atomic_read(&nic->rx_bufs_left[ring_no]);
1821         size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
1822             HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
1823
1824         while (alloc_tab < alloc_cnt) {
1825                 block_no = mac_control->rings[ring_no].rx_curr_put_info.
1826                     block_index;
1827                 block_no1 = mac_control->rings[ring_no].rx_curr_get_info.
1828                     block_index;
1829                 off = mac_control->rings[ring_no].rx_curr_put_info.offset;
1830                 off1 = mac_control->rings[ring_no].rx_curr_get_info.offset;
1831 #ifndef CONFIG_2BUFF_MODE
1832                 offset = block_no * (MAX_RXDS_PER_BLOCK + 1) + off;
1833                 offset1 = block_no1 * (MAX_RXDS_PER_BLOCK + 1) + off1;
1834 #else
1835                 offset = block_no * (MAX_RXDS_PER_BLOCK) + off;
1836                 offset1 = block_no1 * (MAX_RXDS_PER_BLOCK) + off1;
1837 #endif
1838
1839                 rxdp = mac_control->rings[ring_no].rx_blocks[block_no].
1840                     block_virt_addr + off;
1841                 if ((offset == offset1) && (rxdp->Host_Control)) {
1842                         DBG_PRINT(INTR_DBG, "%s: Get and Put", dev->name);
1843                         DBG_PRINT(INTR_DBG, " info equated\n");
1844                         goto end;
1845                 }
1846 #ifndef CONFIG_2BUFF_MODE
1847                 if (rxdp->Control_1 == END_OF_BLOCK) {
1848                         mac_control->rings[ring_no].rx_curr_put_info.
1849                             block_index++;
1850                         mac_control->rings[ring_no].rx_curr_put_info.
1851                             block_index %= mac_control->rings[ring_no].block_count;
1852                         block_no = mac_control->rings[ring_no].rx_curr_put_info.
1853                                 block_index;
1854                         off++;
1855                         off %= (MAX_RXDS_PER_BLOCK + 1);
1856                         mac_control->rings[ring_no].rx_curr_put_info.offset =
1857                             off;
1858                         rxdp = (RxD_t *) ((unsigned long) rxdp->Control_2);
1859                         DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
1860                                   dev->name, rxdp);
1861                 }
1862 #ifndef CONFIG_S2IO_NAPI
1863                 spin_lock_irqsave(&nic->put_lock, flags);
1864                 mac_control->rings[ring_no].put_pos =
1865                     (block_no * (MAX_RXDS_PER_BLOCK + 1)) + off;
1866                 spin_unlock_irqrestore(&nic->put_lock, flags);
1867 #endif
1868 #else
1869                 if (rxdp->Host_Control == END_OF_BLOCK) {
1870                         mac_control->rings[ring_no].rx_curr_put_info.
1871                             block_index++;
1872                         mac_control->rings[ring_no].rx_curr_put_info.block_index
1873                             %= mac_control->rings[ring_no].block_count;
1874                         block_no = mac_control->rings[ring_no].rx_curr_put_info
1875                             .block_index;
1876                         off = 0;
1877                         DBG_PRINT(INTR_DBG, "%s: block%d at: 0x%llx\n",
1878                                   dev->name, block_no,
1879                                   (unsigned long long) rxdp->Control_1);
1880                         mac_control->rings[ring_no].rx_curr_put_info.offset =
1881                             off;
1882                         rxdp = mac_control->rings[ring_no].rx_blocks[block_no].
1883                             block_virt_addr;
1884                 }
1885 #ifndef CONFIG_S2IO_NAPI
1886                 spin_lock_irqsave(&nic->put_lock, flags);
1887                 mac_control->rings[ring_no].put_pos = (block_no *
1888                                          (MAX_RXDS_PER_BLOCK + 1)) + off;
1889                 spin_unlock_irqrestore(&nic->put_lock, flags);
1890 #endif
1891 #endif
1892
1893 #ifndef CONFIG_2BUFF_MODE
1894                 if (rxdp->Control_1 & RXD_OWN_XENA)
1895 #else
1896                 if (rxdp->Control_2 & BIT(0))
1897 #endif
1898                 {
1899                         mac_control->rings[ring_no].rx_curr_put_info.
1900                             offset = off;
1901                         goto end;
1902                 }
1903 #ifdef  CONFIG_2BUFF_MODE
1904                 /*
1905                  * RxDs Spanning cache lines will be replenished only
1906                  * if the succeeding RxD is also owned by Host. It
1907                  * will always be the ((8*i)+3) and ((8*i)+6)
1908                  * descriptors for the 48 byte descriptor. The offending
1909                  * decsriptor is of-course the 3rd descriptor.
1910                  */
1911                 rxdpphys = mac_control->rings[ring_no].rx_blocks[block_no].
1912                     block_dma_addr + (off * sizeof(RxD_t));
1913                 if (((u64) (rxdpphys)) % 128 > 80) {
1914                         rxdpnext = mac_control->rings[ring_no].rx_blocks[block_no].
1915                             block_virt_addr + (off + 1);
1916                         if (rxdpnext->Host_Control == END_OF_BLOCK) {
1917                                 nextblk = (block_no + 1) %
1918                                     (mac_control->rings[ring_no].block_count);
1919                                 rxdpnext = mac_control->rings[ring_no].rx_blocks
1920                                     [nextblk].block_virt_addr;
1921                         }
1922                         if (rxdpnext->Control_2 & BIT(0))
1923                                 goto end;
1924                 }
1925 #endif
1926
1927 #ifndef CONFIG_2BUFF_MODE
1928                 skb = dev_alloc_skb(size + NET_IP_ALIGN);
1929 #else
1930                 skb = dev_alloc_skb(dev->mtu + ALIGN_SIZE + BUF0_LEN + 4);
1931 #endif
1932                 if (!skb) {
1933                         DBG_PRINT(ERR_DBG, "%s: Out of ", dev->name);
1934                         DBG_PRINT(ERR_DBG, "memory to allocate SKBs\n");
1935                         return -ENOMEM;
1936                 }
1937 #ifndef CONFIG_2BUFF_MODE
1938                 skb_reserve(skb, NET_IP_ALIGN);
1939                 memset(rxdp, 0, sizeof(RxD_t));
1940                 rxdp->Buffer0_ptr = pci_map_single
1941                     (nic->pdev, skb->data, size, PCI_DMA_FROMDEVICE);
1942                 rxdp->Control_2 &= (~MASK_BUFFER0_SIZE);
1943                 rxdp->Control_2 |= SET_BUFFER0_SIZE(size);
1944                 rxdp->Host_Control = (unsigned long) (skb);
1945                 rxdp->Control_1 |= RXD_OWN_XENA;
1946                 off++;
1947                 off %= (MAX_RXDS_PER_BLOCK + 1);
1948                 mac_control->rings[ring_no].rx_curr_put_info.offset = off;
1949 #else
1950                 ba = &mac_control->rings[ring_no].ba[block_no][off];
1951                 skb_reserve(skb, BUF0_LEN);
1952                 tmp = ((unsigned long) skb->data & ALIGN_SIZE);
1953                 if (tmp)
1954                         skb_reserve(skb, (ALIGN_SIZE + 1) - tmp);
1955
1956                 memset(rxdp, 0, sizeof(RxD_t));
1957                 rxdp->Buffer2_ptr = pci_map_single
1958                     (nic->pdev, skb->data, dev->mtu + BUF0_LEN + 4,
1959                      PCI_DMA_FROMDEVICE);
1960                 rxdp->Buffer0_ptr =
1961                     pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN,
1962                                    PCI_DMA_FROMDEVICE);
1963                 rxdp->Buffer1_ptr =
1964                     pci_map_single(nic->pdev, ba->ba_1, BUF1_LEN,
1965                                    PCI_DMA_FROMDEVICE);
1966
1967                 rxdp->Control_2 = SET_BUFFER2_SIZE(dev->mtu + 4);
1968                 rxdp->Control_2 |= SET_BUFFER0_SIZE(BUF0_LEN);
1969                 rxdp->Control_2 |= SET_BUFFER1_SIZE(1); /* dummy. */
1970                 rxdp->Control_2 |= BIT(0);      /* Set Buffer_Empty bit. */
1971                 rxdp->Host_Control = (u64) ((unsigned long) (skb));
1972                 rxdp->Control_1 |= RXD_OWN_XENA;
1973                 off++;
1974                 mac_control->rings[ring_no].rx_curr_put_info.offset = off;
1975 #endif
1976                 rxdp->Control_2 |= SET_RXD_MARKER;
1977
1978                 atomic_inc(&nic->rx_bufs_left[ring_no]);
1979                 alloc_tab++;
1980         }
1981
1982       end:
1983         return SUCCESS;
1984 }
1985
1986 /**
1987  *  free_rx_buffers - Frees all Rx buffers
1988  *  @sp: device private variable.
1989  *  Description:
1990  *  This function will free all Rx buffers allocated by host.
1991  *  Return Value:
1992  *  NONE.
1993  */
1994
1995 static void free_rx_buffers(struct s2io_nic *sp)
1996 {
1997         struct net_device *dev = sp->dev;
1998         int i, j, blk = 0, off, buf_cnt = 0;
1999         RxD_t *rxdp;
2000         struct sk_buff *skb;
2001         mac_info_t *mac_control;
2002         struct config_param *config;
2003 #ifdef CONFIG_2BUFF_MODE
2004         buffAdd_t *ba;
2005 #endif
2006
2007         mac_control = &sp->mac_control;
2008         config = &sp->config;
2009
2010         for (i = 0; i < config->rx_ring_num; i++) {
2011                 for (j = 0, blk = 0; j < config->rx_cfg[i].num_rxd; j++) {
2012                         off = j % (MAX_RXDS_PER_BLOCK + 1);
2013                         rxdp = mac_control->rings[i].rx_blocks[blk].
2014                                 block_virt_addr + off;
2015
2016 #ifndef CONFIG_2BUFF_MODE
2017                         if (rxdp->Control_1 == END_OF_BLOCK) {
2018                                 rxdp =
2019                                     (RxD_t *) ((unsigned long) rxdp->
2020                                                Control_2);
2021                                 j++;
2022                                 blk++;
2023                         }
2024 #else
2025                         if (rxdp->Host_Control == END_OF_BLOCK) {
2026                                 blk++;
2027                                 continue;
2028                         }
2029 #endif
2030
2031                         if (!(rxdp->Control_1 & RXD_OWN_XENA)) {
2032                                 memset(rxdp, 0, sizeof(RxD_t));
2033                                 continue;
2034                         }
2035
2036                         skb =
2037                             (struct sk_buff *) ((unsigned long) rxdp->
2038                                                 Host_Control);
2039                         if (skb) {
2040 #ifndef CONFIG_2BUFF_MODE
2041                                 pci_unmap_single(sp->pdev, (dma_addr_t)
2042                                                  rxdp->Buffer0_ptr,
2043                                                  dev->mtu +
2044                                                  HEADER_ETHERNET_II_802_3_SIZE
2045                                                  + HEADER_802_2_SIZE +
2046                                                  HEADER_SNAP_SIZE,
2047                                                  PCI_DMA_FROMDEVICE);
2048 #else
2049                                 ba = &mac_control->rings[i].ba[blk][off];
2050                                 pci_unmap_single(sp->pdev, (dma_addr_t)
2051                                                  rxdp->Buffer0_ptr,
2052                                                  BUF0_LEN,
2053                                                  PCI_DMA_FROMDEVICE);
2054                                 pci_unmap_single(sp->pdev, (dma_addr_t)
2055                                                  rxdp->Buffer1_ptr,
2056                                                  BUF1_LEN,
2057                                                  PCI_DMA_FROMDEVICE);
2058                                 pci_unmap_single(sp->pdev, (dma_addr_t)
2059                                                  rxdp->Buffer2_ptr,
2060                                                  dev->mtu + BUF0_LEN + 4,
2061                                                  PCI_DMA_FROMDEVICE);
2062 #endif
2063                                 dev_kfree_skb(skb);
2064                                 atomic_dec(&sp->rx_bufs_left[i]);
2065                                 buf_cnt++;
2066                         }
2067                         memset(rxdp, 0, sizeof(RxD_t));
2068                 }
2069                 mac_control->rings[i].rx_curr_put_info.block_index = 0;
2070                 mac_control->rings[i].rx_curr_get_info.block_index = 0;
2071                 mac_control->rings[i].rx_curr_put_info.offset = 0;
2072                 mac_control->rings[i].rx_curr_get_info.offset = 0;
2073                 atomic_set(&sp->rx_bufs_left[i], 0);
2074                 DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n",
2075                           dev->name, buf_cnt, i);
2076         }
2077 }
2078
2079 /**
2080  * s2io_poll - Rx interrupt handler for NAPI support
2081  * @dev : pointer to the device structure.
2082  * @budget : The number of packets that were budgeted to be processed
2083  * during  one pass through the 'Poll" function.
2084  * Description:
2085  * Comes into picture only if NAPI support has been incorporated. It does
2086  * the same thing that rx_intr_handler does, but not in a interrupt context
2087  * also It will process only a given number of packets.
2088  * Return value:
2089  * 0 on success and 1 if there are No Rx packets to be processed.
2090  */
2091
2092 #if defined(CONFIG_S2IO_NAPI)
2093 static int s2io_poll(struct net_device *dev, int *budget)
2094 {
2095         nic_t *nic = dev->priv;
2096         int pkt_cnt = 0, org_pkts_to_process;
2097         mac_info_t *mac_control;
2098         struct config_param *config;
2099         XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0;
2100         u64 val64;
2101         int i;
2102
2103         atomic_inc(&nic->isr_cnt);
2104         mac_control = &nic->mac_control;
2105         config = &nic->config;
2106
2107         nic->pkts_to_process = *budget;
2108         if (nic->pkts_to_process > dev->quota)
2109                 nic->pkts_to_process = dev->quota;
2110         org_pkts_to_process = nic->pkts_to_process;
2111
2112         val64 = readq(&bar0->rx_traffic_int);
2113         writeq(val64, &bar0->rx_traffic_int);
2114
2115         for (i = 0; i < config->rx_ring_num; i++) {
2116                 rx_intr_handler(&mac_control->rings[i]);
2117                 pkt_cnt = org_pkts_to_process - nic->pkts_to_process;
2118                 if (!nic->pkts_to_process) {
2119                         /* Quota for the current iteration has been met */
2120                         goto no_rx;
2121                 }
2122         }
2123         if (!pkt_cnt)
2124                 pkt_cnt = 1;
2125
2126         dev->quota -= pkt_cnt;
2127         *budget -= pkt_cnt;
2128         netif_rx_complete(dev);
2129
2130         for (i = 0; i < config->rx_ring_num; i++) {
2131                 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2132                         DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2133                         DBG_PRINT(ERR_DBG, " in Rx Poll!!\n");
2134                         break;
2135                 }
2136         }
2137         /* Re enable the Rx interrupts. */
2138         en_dis_able_nic_intrs(nic, RX_TRAFFIC_INTR, ENABLE_INTRS);
2139         atomic_dec(&nic->isr_cnt);
2140         return 0;
2141
2142 no_rx:
2143         dev->quota -= pkt_cnt;
2144         *budget -= pkt_cnt;
2145
2146         for (i = 0; i < config->rx_ring_num; i++) {
2147                 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2148                         DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2149                         DBG_PRINT(ERR_DBG, " in Rx Poll!!\n");
2150                         break;
2151                 }
2152         }
2153         atomic_dec(&nic->isr_cnt);
2154         return 1;
2155 }
2156 #endif
2157
2158 /**
2159  *  rx_intr_handler - Rx interrupt handler
2160  *  @nic: device private variable.
2161  *  Description:
2162  *  If the interrupt is because of a received frame or if the
2163  *  receive ring contains fresh as yet un-processed frames,this function is
2164  *  called. It picks out the RxD at which place the last Rx processing had
2165  *  stopped and sends the skb to the OSM's Rx handler and then increments
2166  *  the offset.
2167  *  Return Value:
2168  *  NONE.
2169  */
2170 static void rx_intr_handler(ring_info_t *ring_data)
2171 {
2172         nic_t *nic = ring_data->nic;
2173         struct net_device *dev = (struct net_device *) nic->dev;
2174         XENA_dev_config_t __iomem *bar0 = nic->bar0;
2175         int get_block, get_offset, put_block, put_offset, ring_bufs;
2176         rx_curr_get_info_t get_info, put_info;
2177         RxD_t *rxdp;
2178         struct sk_buff *skb;
2179 #ifndef CONFIG_S2IO_NAPI
2180         int pkt_cnt = 0;
2181 #endif
2182         register u64 val64;
2183
2184         spin_lock(&nic->rx_lock);
2185         if (atomic_read(&nic->card_state) == CARD_DOWN) {
2186                 DBG_PRINT(ERR_DBG, "%s: %s going down for reset\n",
2187                           __FUNCTION__, dev->name);
2188                 spin_unlock(&nic->rx_lock);
2189         }
2190
2191         /*
2192          * rx_traffic_int reg is an R1 register, hence we read and write
2193          * back the same value in the register to clear it
2194          */
2195         val64 = readq(&bar0->tx_traffic_int);
2196         writeq(val64, &bar0->tx_traffic_int);
2197
2198         get_info = ring_data->rx_curr_get_info;
2199         get_block = get_info.block_index;
2200         put_info = ring_data->rx_curr_put_info;
2201         put_block = put_info.block_index;
2202         ring_bufs = get_info.ring_len+1;
2203         rxdp = ring_data->rx_blocks[get_block].block_virt_addr +
2204                     get_info.offset;
2205         get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
2206                 get_info.offset;
2207 #ifndef CONFIG_S2IO_NAPI
2208         spin_lock(&nic->put_lock);
2209         put_offset = ring_data->put_pos;
2210         spin_unlock(&nic->put_lock);
2211 #else
2212         put_offset = (put_block * (MAX_RXDS_PER_BLOCK + 1)) +
2213                 put_info.offset;
2214 #endif
2215         while (RXD_IS_UP2DT(rxdp) &&
2216                (((get_offset + 1) % ring_bufs) != put_offset)) {
2217                 skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control);
2218                 if (skb == NULL) {
2219                         DBG_PRINT(ERR_DBG, "%s: The skb is ",
2220                                   dev->name);
2221                         DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
2222                         spin_unlock(&nic->rx_lock);
2223                         return;
2224                 }
2225 #ifndef CONFIG_2BUFF_MODE
2226                 pci_unmap_single(nic->pdev, (dma_addr_t)
2227                                  rxdp->Buffer0_ptr,
2228                                  dev->mtu +
2229                                  HEADER_ETHERNET_II_802_3_SIZE +
2230                                  HEADER_802_2_SIZE +
2231                                  HEADER_SNAP_SIZE,
2232                                  PCI_DMA_FROMDEVICE);
2233 #else
2234                 pci_unmap_single(nic->pdev, (dma_addr_t)
2235                                  rxdp->Buffer0_ptr,
2236                                  BUF0_LEN, PCI_DMA_FROMDEVICE);
2237                 pci_unmap_single(nic->pdev, (dma_addr_t)
2238                                  rxdp->Buffer1_ptr,
2239                                  BUF1_LEN, PCI_DMA_FROMDEVICE);
2240                 pci_unmap_single(nic->pdev, (dma_addr_t)
2241                                  rxdp->Buffer2_ptr,
2242                                  dev->mtu + BUF0_LEN + 4,
2243                                  PCI_DMA_FROMDEVICE);
2244 #endif
2245                 rx_osm_handler(ring_data, rxdp);
2246                 get_info.offset++;
2247                 ring_data->rx_curr_get_info.offset =
2248                     get_info.offset;
2249                 rxdp = ring_data->rx_blocks[get_block].block_virt_addr +
2250                     get_info.offset;
2251                 if (get_info.offset &&
2252                     (!(get_info.offset % MAX_RXDS_PER_BLOCK))) {
2253                         get_info.offset = 0;
2254                         ring_data->rx_curr_get_info.offset
2255                             = get_info.offset;
2256                         get_block++;
2257                         get_block %= ring_data->block_count;
2258                         ring_data->rx_curr_get_info.block_index
2259                             = get_block;
2260                         rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
2261                 }
2262
2263                 get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
2264                             get_info.offset;
2265 #ifdef CONFIG_S2IO_NAPI
2266                 nic->pkts_to_process -= 1;
2267                 if (!nic->pkts_to_process)
2268                         break;
2269 #else
2270                 pkt_cnt++;
2271                 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
2272                         break;
2273 #endif
2274         }
2275         spin_unlock(&nic->rx_lock);
2276 }
2277
2278 /**
2279  *  tx_intr_handler - Transmit interrupt handler
2280  *  @nic : device private variable
2281  *  Description:
2282  *  If an interrupt was raised to indicate DMA complete of the
2283  *  Tx packet, this function is called. It identifies the last TxD
2284  *  whose buffer was freed and frees all skbs whose data have already
2285  *  DMA'ed into the NICs internal memory.
2286  *  Return Value:
2287  *  NONE
2288  */
2289
2290 static void tx_intr_handler(fifo_info_t *fifo_data)
2291 {
2292         nic_t *nic = fifo_data->nic;
2293         XENA_dev_config_t __iomem *bar0 = nic->bar0;
2294         struct net_device *dev = (struct net_device *) nic->dev;
2295         tx_curr_get_info_t get_info, put_info;
2296         struct sk_buff *skb;
2297         TxD_t *txdlp;
2298         u16 j, frg_cnt;
2299         register u64 val64 = 0;
2300
2301         /*
2302          * tx_traffic_int reg is an R1 register, hence we read and write
2303          * back the same value in the register to clear it
2304          */
2305         val64 = readq(&bar0->tx_traffic_int);
2306         writeq(val64, &bar0->tx_traffic_int);
2307
2308         get_info = fifo_data->tx_curr_get_info;
2309         put_info = fifo_data->tx_curr_put_info;
2310         txdlp = (TxD_t *) fifo_data->list_info[get_info.offset].
2311             list_virt_addr;
2312         while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
2313                (get_info.offset != put_info.offset) &&
2314                (txdlp->Host_Control)) {
2315                 /* Check for TxD errors */
2316                 if (txdlp->Control_1 & TXD_T_CODE) {
2317                         unsigned long long err;
2318                         err = txdlp->Control_1 & TXD_T_CODE;
2319                         DBG_PRINT(ERR_DBG, "***TxD error %llx\n",
2320                                   err);
2321                 }
2322
2323                 skb = (struct sk_buff *) ((unsigned long)
2324                                 txdlp->Host_Control);
2325                 if (skb == NULL) {
2326                         DBG_PRINT(ERR_DBG, "%s: Null skb ",
2327                         __FUNCTION__);
2328                         DBG_PRINT(ERR_DBG, "in Tx Free Intr\n");
2329                         return;
2330                 }
2331
2332                 frg_cnt = skb_shinfo(skb)->nr_frags;
2333                 nic->tx_pkt_count++;
2334
2335                 pci_unmap_single(nic->pdev, (dma_addr_t)
2336                                  txdlp->Buffer_Pointer,
2337                                  skb->len - skb->data_len,
2338                                  PCI_DMA_TODEVICE);
2339                 if (frg_cnt) {
2340                         TxD_t *temp;
2341                         temp = txdlp;
2342                         txdlp++;
2343                         for (j = 0; j < frg_cnt; j++, txdlp++) {
2344                                 skb_frag_t *frag =
2345                                     &skb_shinfo(skb)->frags[j];
2346                                 pci_unmap_page(nic->pdev,
2347                                                (dma_addr_t)
2348                                                txdlp->
2349                                                Buffer_Pointer,
2350                                                frag->size,
2351                                                PCI_DMA_TODEVICE);
2352                         }
2353                         txdlp = temp;
2354                 }
2355                 memset(txdlp, 0,
2356                        (sizeof(TxD_t) * fifo_data->max_txds));
2357
2358                 /* Updating the statistics block */
2359                 nic->stats.tx_bytes += skb->len;
2360                 dev_kfree_skb_irq(skb);
2361
2362                 get_info.offset++;
2363                 get_info.offset %= get_info.fifo_len + 1;
2364                 txdlp = (TxD_t *) fifo_data->list_info
2365                     [get_info.offset].list_virt_addr;
2366                 fifo_data->tx_curr_get_info.offset =
2367                     get_info.offset;
2368         }
2369
2370         spin_lock(&nic->tx_lock);
2371         if (netif_queue_stopped(dev))
2372                 netif_wake_queue(dev);
2373         spin_unlock(&nic->tx_lock);
2374 }
2375
2376 /**
2377  *  alarm_intr_handler - Alarm Interrrupt handler
2378  *  @nic: device private variable
2379  *  Description: If the interrupt was neither because of Rx packet or Tx
2380  *  complete, this function is called. If the interrupt was to indicate
2381  *  a loss of link, the OSM link status handler is invoked for any other
2382  *  alarm interrupt the block that raised the interrupt is displayed
2383  *  and a H/W reset is issued.
2384  *  Return Value:
2385  *  NONE
2386 */
2387
2388 static void alarm_intr_handler(struct s2io_nic *nic)
2389 {
2390         struct net_device *dev = (struct net_device *) nic->dev;
2391         XENA_dev_config_t __iomem *bar0 = nic->bar0;
2392         register u64 val64 = 0, err_reg = 0;
2393
2394         /* Handling link status change error Intr */
2395         err_reg = readq(&bar0->mac_rmac_err_reg);
2396         writeq(err_reg, &bar0->mac_rmac_err_reg);
2397         if (err_reg & RMAC_LINK_STATE_CHANGE_INT) {
2398                 schedule_work(&nic->set_link_task);
2399         }
2400
2401         /* Handling Ecc errors */
2402         val64 = readq(&bar0->mc_err_reg);
2403         writeq(val64, &bar0->mc_err_reg);
2404         if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
2405                 if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
2406                         nic->mac_control.stats_info->sw_stat.
2407                                 double_ecc_errs++;
2408                         DBG_PRINT(ERR_DBG, "%s: Device indicates ",
2409                                   dev->name);
2410                         DBG_PRINT(ERR_DBG, "double ECC error!!\n");
2411                         netif_stop_queue(dev);
2412                         schedule_work(&nic->rst_timer_task);
2413                 } else {
2414                         nic->mac_control.stats_info->sw_stat.
2415                                 single_ecc_errs++;
2416                 }
2417         }
2418
2419         /* In case of a serious error, the device will be Reset. */
2420         val64 = readq(&bar0->serr_source);
2421         if (val64 & SERR_SOURCE_ANY) {
2422                 DBG_PRINT(ERR_DBG, "%s: Device indicates ", dev->name);
2423                 DBG_PRINT(ERR_DBG, "serious error!!\n");
2424                 netif_stop_queue(dev);
2425                 schedule_work(&nic->rst_timer_task);
2426         }
2427
2428         /*
2429          * Also as mentioned in the latest Errata sheets if the PCC_FB_ECC
2430          * Error occurs, the adapter will be recycled by disabling the
2431          * adapter enable bit and enabling it again after the device
2432          * becomes Quiescent.
2433          */
2434         val64 = readq(&bar0->pcc_err_reg);
2435         writeq(val64, &bar0->pcc_err_reg);
2436         if (val64 & PCC_FB_ECC_DB_ERR) {
2437                 u64 ac = readq(&bar0->adapter_control);
2438                 ac &= ~(ADAPTER_CNTL_EN);
2439                 writeq(ac, &bar0->adapter_control);
2440                 ac = readq(&bar0->adapter_control);
2441                 schedule_work(&nic->set_link_task);
2442         }
2443
2444         /* Other type of interrupts are not being handled now,  TODO */
2445 }
2446
2447 /**
2448  *  wait_for_cmd_complete - waits for a command to complete.
2449  *  @sp : private member of the device structure, which is a pointer to the
2450  *  s2io_nic structure.
2451  *  Description: Function that waits for a command to Write into RMAC
2452  *  ADDR DATA registers to be completed and returns either success or
2453  *  error depending on whether the command was complete or not.
2454  *  Return value:
2455  *   SUCCESS on success and FAILURE on failure.
2456  */
2457
2458 int wait_for_cmd_complete(nic_t * sp)
2459 {
2460         XENA_dev_config_t __iomem *bar0 = sp->bar0;
2461         int ret = FAILURE, cnt = 0;
2462         u64 val64;
2463
2464         while (TRUE) {
2465                 val64 = readq(&bar0->rmac_addr_cmd_mem);
2466                 if (!(val64 & RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING)) {
2467                         ret = SUCCESS;
2468                         break;
2469                 }
2470                 msleep(50);
2471                 if (cnt++ > 10)
2472                         break;
2473         }
2474
2475         return ret;
2476 }
2477
2478 /**
2479  *  s2io_reset - Resets the card.
2480  *  @sp : private member of the device structure.
2481  *  Description: Function to Reset the card. This function then also
2482  *  restores the previously saved PCI configuration space registers as
2483  *  the card reset also resets the configuration space.
2484  *  Return value:
2485  *  void.
2486  */
2487
2488 void s2io_reset(nic_t * sp)
2489 {
2490         XENA_dev_config_t __iomem *bar0 = sp->bar0;
2491         u64 val64;
2492         u16 subid, pci_cmd;
2493
2494         val64 = SW_RESET_ALL;
2495         writeq(val64, &bar0->sw_reset);
2496
2497         /*
2498          * At this stage, if the PCI write is indeed completed, the
2499          * card is reset and so is the PCI Config space of the device.
2500          * So a read cannot be issued at this stage on any of the
2501          * registers to ensure the write into "sw_reset" register
2502          * has gone through.
2503          * Question: Is there any system call that will explicitly force
2504          * all the write commands still pending on the bus to be pushed
2505          * through?
2506          * As of now I'am just giving a 250ms delay and hoping that the
2507          * PCI write to sw_reset register is done by this time.
2508          */
2509         msleep(250);
2510
2511         /* Restore the PCI state saved during initializarion. */
2512         pci_restore_state(sp->pdev);
2513
2514         s2io_init_pci(sp);
2515
2516         msleep(250);
2517
2518         /* Set swapper to enable I/O register access */
2519         s2io_set_swapper(sp);
2520
2521         /* Clear certain PCI/PCI-X fields after reset */
2522         pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
2523         pci_cmd &= 0x7FFF; /* Clear parity err detect bit */
2524         pci_write_config_word(sp->pdev, PCI_COMMAND, pci_cmd);
2525
2526         val64 = readq(&bar0->txpic_int_reg);
2527         val64 &= ~BIT(62); /* Clearing PCI_STATUS error reflected here */
2528         writeq(val64, &bar0->txpic_int_reg);
2529
2530         /* Clearing PCIX Ecc status register */
2531         pci_write_config_dword(sp->pdev, 0x68, 0);
2532
2533         /* Reset device statistics maintained by OS */
2534         memset(&sp->stats, 0, sizeof (struct net_device_stats));
2535
2536         /* SXE-002: Configure link and activity LED to turn it off */
2537         subid = sp->pdev->subsystem_device;
2538         if ((subid & 0xFF) >= 0x07) {
2539                 val64 = readq(&bar0->gpio_control);
2540                 val64 |= 0x0000800000000000ULL;
2541                 writeq(val64, &bar0->gpio_control);
2542                 val64 = 0x0411040400000000ULL;
2543                 writeq(val64, (void __iomem *) ((u8 *) bar0 + 0x2700));
2544         }
2545
2546         sp->device_enabled_once = FALSE;
2547 }
2548
2549 /**
2550  *  s2io_set_swapper - to set the swapper controle on the card
2551  *  @sp : private member of the device structure,
2552  *  pointer to the s2io_nic structure.
2553  *  Description: Function to set the swapper control on the card
2554  *  correctly depending on the 'endianness' of the system.
2555  *  Return value:
2556  *  SUCCESS on success and FAILURE on failure.
2557  */
2558
2559 int s2io_set_swapper(nic_t * sp)
2560 {
2561         struct net_device *dev = sp->dev;
2562         XENA_dev_config_t __iomem *bar0 = sp->bar0;
2563         u64 val64, valt, valr;
2564
2565         /*
2566          * Set proper endian settings and verify the same by reading
2567          * the PIF Feed-back register.
2568          */
2569
2570         val64 = readq(&bar0->pif_rd_swapper_fb);
2571         if (val64 != 0x0123456789ABCDEFULL) {
2572                 int i = 0;
2573                 u64 value[] = { 0xC30000C3C30000C3ULL,   /* FE=1, SE=1 */
2574                                 0x8100008181000081ULL,  /* FE=1, SE=0 */
2575                                 0x4200004242000042ULL,  /* FE=0, SE=1 */
2576                                 0};                     /* FE=0, SE=0 */
2577
2578                 while(i<4) {
2579                         writeq(value[i], &bar0->swapper_ctrl);
2580                         val64 = readq(&bar0->pif_rd_swapper_fb);
2581                         if (val64 == 0x0123456789ABCDEFULL)
2582                                 break;
2583                         i++;
2584                 }
2585                 if (i == 4) {
2586                         DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
2587                                 dev->name);
2588                         DBG_PRINT(ERR_DBG, "feedback read %llx\n",
2589                                 (unsigned long long) val64);
2590                         return FAILURE;
2591                 }
2592                 valr = value[i];
2593         } else {
2594                 valr = readq(&bar0->swapper_ctrl);
2595         }
2596
2597         valt = 0x0123456789ABCDEFULL;
2598         writeq(valt, &bar0->xmsi_address);
2599         val64 = readq(&bar0->xmsi_address);
2600
2601         if(val64 != valt) {
2602                 int i = 0;
2603                 u64 value[] = { 0x00C3C30000C3C300ULL,  /* FE=1, SE=1 */
2604                                 0x0081810000818100ULL,  /* FE=1, SE=0 */
2605                                 0x0042420000424200ULL,  /* FE=0, SE=1 */
2606                                 0};                     /* FE=0, SE=0 */
2607
2608                 while(i<4) {
2609                         writeq((value[i] | valr), &bar0->swapper_ctrl);
2610                         writeq(valt, &bar0->xmsi_address);
2611                         val64 = readq(&bar0->xmsi_address);
2612                         if(val64 == valt)
2613                                 break;
2614                         i++;
2615                 }
2616                 if(i == 4) {
2617                         unsigned long long x = val64;
2618                         DBG_PRINT(ERR_DBG, "Write failed, Xmsi_addr ");
2619                         DBG_PRINT(ERR_DBG, "reads:0x%llx\n", x);
2620                         return FAILURE;
2621                 }
2622         }
2623         val64 = readq(&bar0->swapper_ctrl);
2624         val64 &= 0xFFFF000000000000ULL;
2625
2626 #ifdef  __BIG_ENDIAN
2627         /*
2628          * The device by default set to a big endian format, so a
2629          * big endian driver need not set anything.
2630          */
2631         val64 |= (SWAPPER_CTRL_TXP_FE |
2632                  SWAPPER_CTRL_TXP_SE |
2633                  SWAPPER_CTRL_TXD_R_FE |
2634                  SWAPPER_CTRL_TXD_W_FE |
2635                  SWAPPER_CTRL_TXF_R_FE |
2636                  SWAPPER_CTRL_RXD_R_FE |
2637                  SWAPPER_CTRL_RXD_W_FE |
2638                  SWAPPER_CTRL_RXF_W_FE |
2639                  SWAPPER_CTRL_XMSI_FE |
2640                  SWAPPER_CTRL_XMSI_SE |
2641                  SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
2642         writeq(val64, &bar0->swapper_ctrl);
2643 #else
2644         /*
2645          * Initially we enable all bits to make it accessible by the
2646          * driver, then we selectively enable only those bits that
2647          * we want to set.
2648          */
2649         val64 |= (SWAPPER_CTRL_TXP_FE |
2650                  SWAPPER_CTRL_TXP_SE |
2651                  SWAPPER_CTRL_TXD_R_FE |
2652                  SWAPPER_CTRL_TXD_R_SE |
2653                  SWAPPER_CTRL_TXD_W_FE |
2654                  SWAPPER_CTRL_TXD_W_SE |
2655                  SWAPPER_CTRL_TXF_R_FE |
2656                  SWAPPER_CTRL_RXD_R_FE |
2657                  SWAPPER_CTRL_RXD_R_SE |
2658                  SWAPPER_CTRL_RXD_W_FE |
2659                  SWAPPER_CTRL_RXD_W_SE |
2660                  SWAPPER_CTRL_RXF_W_FE |
2661                  SWAPPER_CTRL_XMSI_FE |
2662                  SWAPPER_CTRL_XMSI_SE |
2663                  SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
2664         writeq(val64, &bar0->swapper_ctrl);
2665 #endif
2666         val64 = readq(&bar0->swapper_ctrl);
2667
2668         /*
2669          * Verifying if endian settings are accurate by reading a
2670          * feedback register.
2671          */
2672         val64 = readq(&bar0->pif_rd_swapper_fb);
2673         if (val64 != 0x0123456789ABCDEFULL) {
2674                 /* Endian settings are incorrect, calls for another dekko. */
2675                 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
2676                           dev->name);
2677                 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
2678                           (unsigned long long) val64);
2679                 return FAILURE;
2680         }
2681
2682         return SUCCESS;
2683 }
2684
2685 /* ********************************************************* *
2686  * Functions defined below concern the OS part of the driver *
2687  * ********************************************************* */
2688
2689 /**
2690  *  s2io_open - open entry point of the driver
2691  *  @dev : pointer to the device structure.
2692  *  Description:
2693  *  This function is the open entry point of the driver. It mainly calls a
2694  *  function to allocate Rx buffers and inserts them into the buffer
2695  *  descriptors and then enables the Rx part of the NIC.
2696  *  Return value:
2697  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2698  *   file on failure.
2699  */
2700
2701 int s2io_open(struct net_device *dev)
2702 {
2703         nic_t *sp = dev->priv;
2704         int err = 0;
2705
2706         /*
2707          * Make sure you have link off by default every time
2708          * Nic is initialized
2709          */
2710         netif_carrier_off(dev);
2711         sp->last_link_state = 0; /* Unkown link state */
2712
2713         /* Initialize H/W and enable interrupts */
2714         if (s2io_card_up(sp)) {
2715                 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
2716                           dev->name);
2717                 err = -ENODEV;
2718                 goto hw_init_failed;
2719         }
2720
2721         /* After proper initialization of H/W, register ISR */
2722         err = request_irq((int) sp->pdev->irq, s2io_isr, SA_SHIRQ,
2723                           sp->name, dev);
2724         if (err) {
2725                 DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
2726                           dev->name);
2727                 goto isr_registration_failed;
2728         }
2729
2730         if (s2io_set_mac_addr(dev, dev->dev_addr) == FAILURE) {
2731                 DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
2732                 err = -ENODEV;
2733                 goto setting_mac_address_failed;
2734         }
2735
2736         netif_start_queue(dev);
2737         return 0;
2738
2739 setting_mac_address_failed:
2740         free_irq(sp->pdev->irq, dev);
2741 isr_registration_failed:
2742         s2io_reset(sp);
2743 hw_init_failed:
2744         return err;
2745 }
2746
2747 /**
2748  *  s2io_close -close entry point of the driver
2749  *  @dev : device pointer.
2750  *  Description:
2751  *  This is the stop entry point of the driver. It needs to undo exactly
2752  *  whatever was done by the open entry point,thus it's usually referred to
2753  *  as the close function.Among other things this function mainly stops the
2754  *  Rx side of the NIC and frees all the Rx buffers in the Rx rings.
2755  *  Return value:
2756  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2757  *  file on failure.
2758  */
2759
2760 int s2io_close(struct net_device *dev)
2761 {
2762         nic_t *sp = dev->priv;
2763         flush_scheduled_work();
2764         netif_stop_queue(dev);
2765         /* Reset card, kill tasklet and free Tx and Rx buffers. */
2766         s2io_card_down(sp);
2767
2768         free_irq(sp->pdev->irq, dev);
2769         sp->device_close_flag = TRUE;   /* Device is shut down. */
2770         return 0;
2771 }
2772
2773 /**
2774  *  s2io_xmit - Tx entry point of te driver
2775  *  @skb : the socket buffer containing the Tx data.
2776  *  @dev : device pointer.
2777  *  Description :
2778  *  This function is the Tx entry point of the driver. S2IO NIC supports
2779  *  certain protocol assist features on Tx side, namely  CSO, S/G, LSO.
2780  *  NOTE: when device cant queue the pkt,just the trans_start variable will
2781  *  not be upadted.
2782  *  Return value:
2783  *  0 on success & 1 on failure.
2784  */
2785
2786 int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
2787 {
2788         nic_t *sp = dev->priv;
2789         u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
2790         register u64 val64;
2791         TxD_t *txdp;
2792         TxFIFO_element_t __iomem *tx_fifo;
2793         unsigned long flags;
2794 #ifdef NETIF_F_TSO
2795         int mss;
2796 #endif
2797         mac_info_t *mac_control;
2798         struct config_param *config;
2799         XENA_dev_config_t __iomem *bar0 = sp->bar0;
2800
2801         mac_control = &sp->mac_control;
2802         config = &sp->config;
2803
2804         DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
2805         spin_lock_irqsave(&sp->tx_lock, flags);
2806         if (atomic_read(&sp->card_state) == CARD_DOWN) {
2807                 DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
2808                           dev->name);
2809                 spin_unlock_irqrestore(&sp->tx_lock, flags);
2810                 dev_kfree_skb(skb);
2811                 return 0;
2812         }
2813
2814         queue = 0;
2815
2816         put_off = (u16) mac_control->fifos[queue].tx_curr_put_info.offset;
2817         get_off = (u16) mac_control->fifos[queue].tx_curr_get_info.offset;
2818         txdp = (TxD_t *) mac_control->fifos[queue].list_info[put_off].
2819                 list_virt_addr;
2820
2821         queue_len = mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1;
2822         /* Avoid "put" pointer going beyond "get" pointer */
2823         if (txdp->Host_Control || (((put_off + 1) % queue_len) == get_off)) {
2824                 DBG_PRINT(ERR_DBG, "Error in xmit, No free TXDs.\n");
2825                 netif_stop_queue(dev);
2826                 dev_kfree_skb(skb);
2827                 spin_unlock_irqrestore(&sp->tx_lock, flags);
2828                 return 0;
2829         }
2830 #ifdef NETIF_F_TSO
2831         mss = skb_shinfo(skb)->tso_size;
2832         if (mss) {
2833                 txdp->Control_1 |= TXD_TCP_LSO_EN;
2834                 txdp->Control_1 |= TXD_TCP_LSO_MSS(mss);
2835         }
2836 #endif
2837
2838         frg_cnt = skb_shinfo(skb)->nr_frags;
2839         frg_len = skb->len - skb->data_len;
2840
2841         txdp->Buffer_Pointer = pci_map_single
2842             (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE);
2843         txdp->Host_Control = (unsigned long) skb;
2844         if (skb->ip_summed == CHECKSUM_HW) {
2845                 txdp->Control_2 |=
2846                     (TXD_TX_CKO_IPV4_EN | TXD_TX_CKO_TCP_EN |
2847                      TXD_TX_CKO_UDP_EN);
2848         }
2849
2850         txdp->Control_2 |= config->tx_intr_type;
2851
2852         txdp->Control_1 |= (TXD_BUFFER0_SIZE(frg_len) |
2853                             TXD_GATHER_CODE_FIRST);
2854         txdp->Control_1 |= TXD_LIST_OWN_XENA;
2855
2856         /* For fragmented SKB. */
2857         for (i = 0; i < frg_cnt; i++) {
2858                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2859                 txdp++;
2860                 txdp->Buffer_Pointer = (u64) pci_map_page
2861                     (sp->pdev, frag->page, frag->page_offset,
2862                      frag->size, PCI_DMA_TODEVICE);
2863                 txdp->Control_1 |= TXD_BUFFER0_SIZE(frag->size);
2864         }
2865         txdp->Control_1 |= TXD_GATHER_CODE_LAST;
2866
2867         tx_fifo = mac_control->tx_FIFO_start[queue];
2868         val64 = mac_control->fifos[queue].list_info[put_off].list_phy_addr;
2869         writeq(val64, &tx_fifo->TxDL_Pointer);
2870
2871         val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
2872                  TX_FIFO_LAST_LIST);
2873
2874 #ifdef NETIF_F_TSO
2875         if (mss)
2876                 val64 |= TX_FIFO_SPECIAL_FUNC;
2877 #endif
2878         writeq(val64, &tx_fifo->List_Control);
2879
2880         /* Perform a PCI read to flush previous writes */
2881         val64 = readq(&bar0->general_int_status);
2882
2883         put_off++;
2884         put_off %= mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1;
2885         mac_control->fifos[queue].tx_curr_put_info.offset = put_off;
2886
2887         /* Avoid "put" pointer going beyond "get" pointer */
2888         if (((put_off + 1) % queue_len) == get_off) {
2889                 DBG_PRINT(TX_DBG,
2890                           "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
2891                           put_off, get_off);
2892                 netif_stop_queue(dev);
2893         }
2894
2895         dev->trans_start = jiffies;
2896         spin_unlock_irqrestore(&sp->tx_lock, flags);
2897
2898         return 0;
2899 }
2900
2901 /**
2902  *  s2io_isr - ISR handler of the device .
2903  *  @irq: the irq of the device.
2904  *  @dev_id: a void pointer to the dev structure of the NIC.
2905  *  @pt_regs: pointer to the registers pushed on the stack.
2906  *  Description:  This function is the ISR handler of the device. It
2907  *  identifies the reason for the interrupt and calls the relevant
2908  *  service routines. As a contongency measure, this ISR allocates the
2909  *  recv buffers, if their numbers are below the panic value which is
2910  *  presently set to 25% of the original number of rcv buffers allocated.
2911  *  Return value:
2912  *   IRQ_HANDLED: will be returned if IRQ was handled by this routine
2913  *   IRQ_NONE: will be returned if interrupt is not from our device
2914  */
2915 static irqreturn_t s2io_isr(int irq, void *dev_id, struct pt_regs *regs)
2916 {
2917         struct net_device *dev = (struct net_device *) dev_id;
2918         nic_t *sp = dev->priv;
2919         XENA_dev_config_t __iomem *bar0 = sp->bar0;
2920         int i;
2921         u64 reason = 0;
2922         mac_info_t *mac_control;
2923         struct config_param *config;
2924
2925         atomic_inc(&sp->isr_cnt);
2926         mac_control = &sp->mac_control;
2927         config = &sp->config;
2928
2929         /*
2930          * Identify the cause for interrupt and call the appropriate
2931          * interrupt handler. Causes for the interrupt could be;
2932          * 1. Rx of packet.
2933          * 2. Tx complete.
2934          * 3. Link down.
2935          * 4. Error in any functional blocks of the NIC.
2936          */
2937         reason = readq(&bar0->general_int_status);
2938
2939         if (!reason) {
2940                 /* The interrupt was not raised by Xena. */
2941                 atomic_dec(&sp->isr_cnt);
2942                 return IRQ_NONE;
2943         }
2944
2945         if (reason & (GEN_ERROR_INTR))
2946                 alarm_intr_handler(sp);
2947
2948 #ifdef CONFIG_S2IO_NAPI
2949         if (reason & GEN_INTR_RXTRAFFIC) {
2950                 if (netif_rx_schedule_prep(dev)) {
2951                         en_dis_able_nic_intrs(sp, RX_TRAFFIC_INTR,
2952                                               DISABLE_INTRS);
2953                         __netif_rx_schedule(dev);
2954                 }
2955         }
2956 #else
2957         /* If Intr is because of Rx Traffic */
2958         if (reason & GEN_INTR_RXTRAFFIC) {
2959                 for (i = 0; i < config->rx_ring_num; i++) {
2960                         rx_intr_handler(&mac_control->rings[i]);
2961                 }
2962         }
2963 #endif
2964
2965         /* If Intr is because of Tx Traffic */
2966         if (reason & GEN_INTR_TXTRAFFIC) {
2967                 for (i = 0; i < config->tx_fifo_num; i++)
2968                         tx_intr_handler(&mac_control->fifos[i]);
2969         }
2970
2971         /*
2972          * If the Rx buffer count is below the panic threshold then
2973          * reallocate the buffers from the interrupt handler itself,
2974          * else schedule a tasklet to reallocate the buffers.
2975          */
2976 #ifndef CONFIG_S2IO_NAPI
2977         for (i = 0; i < config->rx_ring_num; i++) {
2978                 int ret;
2979                 int rxb_size = atomic_read(&sp->rx_bufs_left[i]);
2980                 int level = rx_buffer_level(sp, rxb_size, i);
2981
2982                 if ((level == PANIC) && (!TASKLET_IN_USE)) {
2983                         DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", dev->name);
2984                         DBG_PRINT(INTR_DBG, "PANIC levels\n");
2985                         if ((ret = fill_rx_buffers(sp, i)) == -ENOMEM) {
2986                                 DBG_PRINT(ERR_DBG, "%s:Out of memory",
2987                                           dev->name);
2988                                 DBG_PRINT(ERR_DBG, " in ISR!!\n");
2989                                 clear_bit(0, (&sp->tasklet_status));
2990                                 atomic_dec(&sp->isr_cnt);
2991                                 return IRQ_HANDLED;
2992                         }
2993                         clear_bit(0, (&sp->tasklet_status));
2994                 } else if (level == LOW) {
2995                         tasklet_schedule(&sp->task);
2996                 }
2997         }
2998 #endif
2999
3000         atomic_dec(&sp->isr_cnt);
3001         return IRQ_HANDLED;
3002 }
3003
3004 /**
3005  * s2io_updt_stats -
3006  */
3007 static void s2io_updt_stats(nic_t *sp)
3008 {
3009         XENA_dev_config_t __iomem *bar0 = sp->bar0;
3010         u64 val64;
3011         int cnt = 0;
3012
3013         if (atomic_read(&sp->card_state) == CARD_UP) {
3014                 /* Apprx 30us on a 133 MHz bus */
3015                 val64 = SET_UPDT_CLICKS(10) |
3016                         STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN;
3017                 writeq(val64, &bar0->stat_cfg);
3018                 do {
3019                         udelay(100);
3020                         val64 = readq(&bar0->stat_cfg);
3021                         if (!(val64 & BIT(0)))
3022                                 break;
3023                         cnt++;
3024                         if (cnt == 5)
3025                                 break; /* Updt failed */
3026                 } while(1);
3027         }
3028 }
3029
3030 /**
3031  *  s2io_get_stats - Updates the device statistics structure.
3032  *  @dev : pointer to the device structure.
3033  *  Description:
3034  *  This function updates the device statistics structure in the s2io_nic
3035  *  structure and returns a pointer to the same.
3036  *  Return value:
3037  *  pointer to the updated net_device_stats structure.
3038  */
3039
3040 struct net_device_stats *s2io_get_stats(struct net_device *dev)
3041 {
3042         nic_t *sp = dev->priv;
3043         mac_info_t *mac_control;
3044         struct config_param *config;
3045
3046
3047         mac_control = &sp->mac_control;
3048         config = &sp->config;
3049
3050         /* Configure Stats for immediate updt */
3051         s2io_updt_stats(sp);
3052
3053         sp->stats.tx_packets =
3054                 le32_to_cpu(mac_control->stats_info->tmac_frms);
3055         sp->stats.tx_errors =
3056                 le32_to_cpu(mac_control->stats_info->tmac_any_err_frms);
3057         sp->stats.rx_errors =
3058                 le32_to_cpu(mac_control->stats_info->rmac_drop_frms);
3059         sp->stats.multicast =
3060                 le32_to_cpu(mac_control->stats_info->rmac_vld_mcst_frms);
3061         sp->stats.rx_length_errors =
3062                 le32_to_cpu(mac_control->stats_info->rmac_long_frms);
3063
3064         return (&sp->stats);
3065 }
3066
3067 /**
3068  *  s2io_set_multicast - entry point for multicast address enable/disable.
3069  *  @dev : pointer to the device structure
3070  *  Description:
3071  *  This function is a driver entry point which gets called by the kernel
3072  *  whenever multicast addresses must be enabled/disabled. This also gets
3073  *  called to set/reset promiscuous mode. Depending on the deivce flag, we
3074  *  determine, if multicast address must be enabled or if promiscuous mode
3075  *  is to be disabled etc.
3076  *  Return value:
3077  *  void.
3078  */
3079
3080 static void s2io_set_multicast(struct net_device *dev)
3081 {
3082         int i, j, prev_cnt;
3083         struct dev_mc_list *mclist;
3084         nic_t *sp = dev->priv;
3085         XENA_dev_config_t __iomem *bar0 = sp->bar0;
3086         u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
3087             0xfeffffffffffULL;
3088         u64 dis_addr = 0xffffffffffffULL, mac_addr = 0;
3089         void __iomem *add;
3090
3091         if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
3092                 /*  Enable all Multicast addresses */
3093                 writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
3094                        &bar0->rmac_addr_data0_mem);
3095                 writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
3096                        &bar0->rmac_addr_data1_mem);
3097                 val64 = RMAC_ADDR_CMD_MEM_WE |
3098                     RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3099                     RMAC_ADDR_CMD_MEM_OFFSET(MAC_MC_ALL_MC_ADDR_OFFSET);
3100                 writeq(val64, &bar0->rmac_addr_cmd_mem);
3101                 /* Wait till command completes */
3102                 wait_for_cmd_complete(sp);
3103
3104                 sp->m_cast_flg = 1;
3105                 sp->all_multi_pos = MAC_MC_ALL_MC_ADDR_OFFSET;
3106         } else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
3107                 /*  Disable all Multicast addresses */
3108                 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
3109                        &bar0->rmac_addr_data0_mem);
3110                 writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
3111                        &bar0->rmac_addr_data1_mem);
3112                 val64 = RMAC_ADDR_CMD_MEM_WE |
3113                     RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3114                     RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
3115                 writeq(val64, &bar0->rmac_addr_cmd_mem);
3116                 /* Wait till command completes */
3117                 wait_for_cmd_complete(sp);
3118
3119                 sp->m_cast_flg = 0;
3120                 sp->all_multi_pos = 0;
3121         }
3122
3123         if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
3124                 /*  Put the NIC into promiscuous mode */
3125                 add = &bar0->mac_cfg;
3126                 val64 = readq(&bar0->mac_cfg);
3127                 val64 |= MAC_CFG_RMAC_PROM_ENABLE;
3128
3129                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
3130                 writel((u32) val64, add);
3131                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
3132                 writel((u32) (val64 >> 32), (add + 4));
3133
3134                 val64 = readq(&bar0->mac_cfg);
3135                 sp->promisc_flg = 1;
3136                 DBG_PRINT(ERR_DBG, "%s: entered promiscuous mode\n",
3137                           dev->name);
3138         } else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
3139                 /*  Remove the NIC from promiscuous mode */
3140                 add = &bar0->mac_cfg;
3141                 val64 = readq(&bar0->mac_cfg);
3142                 val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
3143
3144                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
3145                 writel((u32) val64, add);
3146                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
3147                 writel((u32) (val64 >> 32), (add + 4));
3148
3149                 val64 = readq(&bar0->mac_cfg);
3150                 sp->promisc_flg = 0;
3151                 DBG_PRINT(ERR_DBG, "%s: left promiscuous mode\n",
3152                           dev->name);
3153         }
3154
3155         /*  Update individual M_CAST address list */
3156         if ((!sp->m_cast_flg) && dev->mc_count) {
3157                 if (dev->mc_count >
3158                     (MAX_ADDRS_SUPPORTED - MAC_MC_ADDR_START_OFFSET - 1)) {
3159                         DBG_PRINT(ERR_DBG, "%s: No more Rx filters ",
3160                                   dev->name);
3161                         DBG_PRINT(ERR_DBG, "can be added, please enable ");
3162                         DBG_PRINT(ERR_DBG, "ALL_MULTI instead\n");
3163                         return;
3164                 }
3165
3166                 prev_cnt = sp->mc_addr_count;
3167                 sp->mc_addr_count = dev->mc_count;
3168
3169                 /* Clear out the previous list of Mc in the H/W. */
3170                 for (i = 0; i < prev_cnt; i++) {
3171                         writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
3172                                &bar0->rmac_addr_data0_mem);
3173                         writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
3174                                 &bar0->rmac_addr_data1_mem);
3175                         val64 = RMAC_ADDR_CMD_MEM_WE |
3176                             RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3177                             RMAC_ADDR_CMD_MEM_OFFSET
3178                             (MAC_MC_ADDR_START_OFFSET + i);
3179                         writeq(val64, &bar0->rmac_addr_cmd_mem);
3180
3181                         /* Wait for command completes */
3182                         if (wait_for_cmd_complete(sp)) {
3183                                 DBG_PRINT(ERR_DBG, "%s: Adding ",
3184                                           dev->name);
3185                                 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
3186                                 return;
3187                         }
3188                 }
3189
3190                 /* Create the new Rx filter list and update the same in H/W. */
3191                 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
3192                      i++, mclist = mclist->next) {
3193                         memcpy(sp->usr_addrs[i].addr, mclist->dmi_addr,
3194                                ETH_ALEN);
3195                         for (j = 0; j < ETH_ALEN; j++) {
3196                                 mac_addr |= mclist->dmi_addr[j];
3197                                 mac_addr <<= 8;
3198                         }
3199                         mac_addr >>= 8;
3200                         writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
3201                                &bar0->rmac_addr_data0_mem);
3202                         writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
3203                                 &bar0->rmac_addr_data1_mem);
3204                         val64 = RMAC_ADDR_CMD_MEM_WE |
3205                             RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3206                             RMAC_ADDR_CMD_MEM_OFFSET
3207                             (i + MAC_MC_ADDR_START_OFFSET);
3208                         writeq(val64, &bar0->rmac_addr_cmd_mem);
3209
3210                         /* Wait for command completes */
3211                         if (wait_for_cmd_complete(sp)) {
3212                                 DBG_PRINT(ERR_DBG, "%s: Adding ",
3213                                           dev->name);
3214                                 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
3215                                 return;
3216                         }
3217                 }
3218         }
3219 }
3220
3221 /**
3222  *  s2io_set_mac_addr - Programs the Xframe mac address
3223  *  @dev : pointer to the device structure.
3224  *  @addr: a uchar pointer to the new mac address which is to be set.
3225  *  Description : This procedure will program the Xframe to receive
3226  *  frames with new Mac Address
3227  *  Return value: SUCCESS on success and an appropriate (-)ve integer
3228  *  as defined in errno.h file on failure.
3229  */
3230
3231 int s2io_set_mac_addr(struct net_device *dev, u8 * addr)
3232 {
3233         nic_t *sp = dev->priv;
3234         XENA_dev_config_t __iomem *bar0 = sp->bar0;
3235         register u64 val64, mac_addr = 0;
3236         int i;
3237
3238         /*
3239          * Set the new MAC address as the new unicast filter and reflect this
3240          * change on the device address registered with the OS. It will be
3241          * at offset 0.
3242          */
3243         for (i = 0; i < ETH_ALEN; i++) {
3244                 mac_addr <<= 8;
3245                 mac_addr |= addr[i];
3246         }
3247
3248         writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
3249                &bar0->rmac_addr_data0_mem);
3250
3251         val64 =
3252             RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3253             RMAC_ADDR_CMD_MEM_OFFSET(0);
3254         writeq(val64, &bar0->rmac_addr_cmd_mem);
3255         /* Wait till command completes */
3256         if (wait_for_cmd_complete(sp)) {
3257                 DBG_PRINT(ERR_DBG, "%s: set_mac_addr failed\n", dev->name);
3258                 return FAILURE;
3259         }
3260
3261         return SUCCESS;
3262 }
3263
3264 /**
3265  * s2io_ethtool_sset - Sets different link parameters.
3266  * @sp : private member of the device structure, which is a pointer to the  * s2io_nic structure.
3267  * @info: pointer to the structure with parameters given by ethtool to set
3268  * link information.
3269  * Description:
3270  * The function sets different link parameters provided by the user onto
3271  * the NIC.
3272  * Return value:
3273  * 0 on success.
3274 */
3275
3276 static int s2io_ethtool_sset(struct net_device *dev,
3277                              struct ethtool_cmd *info)
3278 {
3279         nic_t *sp = dev->priv;
3280         if ((info->autoneg == AUTONEG_ENABLE) ||
3281             (info->speed != SPEED_10000) || (info->duplex != DUPLEX_FULL))
3282                 return -EINVAL;
3283         else {
3284                 s2io_close(sp->dev);
3285                 s2io_open(sp->dev);
3286         }
3287
3288         return 0;
3289 }
3290
3291 /**
3292  * s2io_ethtol_gset - Return link specific information.
3293  * @sp : private member of the device structure, pointer to the
3294  *      s2io_nic structure.
3295  * @info : pointer to the structure with parameters given by ethtool
3296  * to return link information.
3297  * Description:
3298  * Returns link specific information like speed, duplex etc.. to ethtool.
3299  * Return value :
3300  * return 0 on success.
3301  */
3302
3303 static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
3304 {
3305         nic_t *sp = dev->priv;
3306         info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
3307         info->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
3308         info->port = PORT_FIBRE;
3309         /* info->transceiver?? TODO */
3310
3311         if (netif_carrier_ok(sp->dev)) {
3312                 info->speed = 10000;
3313                 info->duplex = DUPLEX_FULL;
3314         } else {
3315                 info->speed = -1;
3316                 info->duplex = -1;
3317         }
3318
3319         info->autoneg = AUTONEG_DISABLE;
3320         return 0;
3321 }
3322
3323 /**
3324  * s2io_ethtool_gdrvinfo - Returns driver specific information.
3325  * @sp : private member of the device structure, which is a pointer to the
3326  * s2io_nic structure.
3327  * @info : pointer to the structure with parameters given by ethtool to
3328  * return driver information.
3329  * Description:
3330  * Returns driver specefic information like name, version etc.. to ethtool.
3331  * Return value:
3332  *  void
3333  */
3334
3335 static void s2io_ethtool_gdrvinfo(struct net_device *dev,
3336                                   struct ethtool_drvinfo *info)
3337 {
3338         nic_t *sp = dev->priv;
3339
3340         strncpy(info->driver, s2io_driver_name, sizeof(s2io_driver_name));
3341         strncpy(info->version, s2io_driver_version,
3342                 sizeof(s2io_driver_version));
3343         strncpy(info->fw_version, "", 32);
3344         strncpy(info->bus_info, pci_name(sp->pdev), 32);
3345         info->regdump_len = XENA_REG_SPACE;
3346         info->eedump_len = XENA_EEPROM_SPACE;
3347         info->testinfo_len = S2IO_TEST_LEN;
3348         info->n_stats = S2IO_STAT_LEN;
3349 }
3350
3351 /**
3352  *  s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
3353  *  @sp: private member of the device structure, which is a pointer to the
3354  *  s2io_nic structure.
3355  *  @regs : pointer to the structure with parameters given by ethtool for
3356  *  dumping the registers.
3357  *  @reg_space: The input argumnet into which all the registers are dumped.
3358  *  Description:
3359  *  Dumps the entire register space of xFrame NIC into the user given
3360  *  buffer area.
3361  * Return value :
3362  * void .
3363 */
3364
3365 static void s2io_ethtool_gregs(struct net_device *dev,
3366                                struct ethtool_regs *regs, void *space)
3367 {
3368         int i;
3369         u64 reg;
3370         u8 *reg_space = (u8 *) space;
3371         nic_t *sp = dev->priv;
3372
3373         regs->len = XENA_REG_SPACE;
3374         regs->version = sp->pdev->subsystem_device;
3375
3376         for (i = 0; i < regs->len; i += 8) {
3377                 reg = readq(sp->bar0 + i);
3378                 memcpy((reg_space + i), &reg, 8);
3379         }
3380 }
3381
3382 /**
3383  *  s2io_phy_id  - timer function that alternates adapter LED.
3384  *  @data : address of the private member of the device structure, which
3385  *  is a pointer to the s2io_nic structure, provided as an u32.
3386  * Description: This is actually the timer function that alternates the
3387  * adapter LED bit of the adapter control bit to set/reset every time on
3388  * invocation. The timer is set for 1/2 a second, hence tha NIC blinks
3389  *  once every second.
3390 */
3391 static void s2io_phy_id(unsigned long data)
3392 {
3393         nic_t *sp = (nic_t *) data;
3394         XENA_dev_config_t __iomem *bar0 = sp->bar0;
3395         u64 val64 = 0;
3396         u16 subid;
3397
3398         subid = sp->pdev->subsystem_device;
3399         if ((subid & 0xFF) >= 0x07) {
3400                 val64 = readq(&bar0->gpio_control);
3401                 val64 ^= GPIO_CTRL_GPIO_0;
3402                 writeq(val64, &bar0->gpio_control);
3403         } else {
3404                 val64 = readq(&bar0->adapter_control);
3405                 val64 ^= ADAPTER_LED_ON;
3406                 writeq(val64, &bar0->adapter_control);
3407         }
3408
3409         mod_timer(&sp->id_timer, jiffies + HZ / 2);
3410 }
3411
3412 /**
3413  * s2io_ethtool_idnic - To physically identify the nic on the system.
3414  * @sp : private member of the device structure, which is a pointer to the
3415  * s2io_nic structure.
3416  * @id : pointer to the structure with identification parameters given by
3417  * ethtool.
3418  * Description: Used to physically identify the NIC on the system.
3419  * The Link LED will blink for a time specified by the user for
3420  * identification.
3421  * NOTE: The Link has to be Up to be able to blink the LED. Hence
3422  * identification is possible only if it's link is up.
3423  * Return value:
3424  * int , returns 0 on success
3425  */
3426
3427 static int s2io_ethtool_idnic(struct net_device *dev, u32 data)
3428 {
3429         u64 val64 = 0, last_gpio_ctrl_val;
3430         nic_t *sp = dev->priv;
3431         XENA_dev_config_t __iomem *bar0 = sp->bar0;
3432         u16 subid;
3433
3434         subid = sp->pdev->subsystem_device;
3435         last_gpio_ctrl_val = readq(&bar0->gpio_control);
3436         if ((subid & 0xFF) < 0x07) {
3437                 val64 = readq(&bar0->adapter_control);
3438                 if (!(val64 & ADAPTER_CNTL_EN)) {
3439                         printk(KERN_ERR
3440                                "Adapter Link down, cannot blink LED\n");
3441                         return -EFAULT;
3442                 }
3443         }
3444         if (sp->id_timer.function == NULL) {
3445                 init_timer(&sp->id_timer);
3446                 sp->id_timer.function = s2io_phy_id;
3447                 sp->id_timer.data = (unsigned long) sp;
3448         }
3449         mod_timer(&sp->id_timer, jiffies);
3450         if (data)
3451                 msleep_interruptible(data * HZ);
3452         else
3453                 msleep_interruptible(MAX_FLICKER_TIME);
3454         del_timer_sync(&sp->id_timer);
3455
3456         if (CARDS_WITH_FAULTY_LINK_INDICATORS(subid)) {
3457                 writeq(last_gpio_ctrl_val, &bar0->gpio_control);
3458                 last_gpio_ctrl_val = readq(&bar0->gpio_control);
3459         }
3460
3461         return 0;
3462 }
3463
3464 /**
3465  * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
3466  * @sp : private member of the device structure, which is a pointer to the
3467  *      s2io_nic structure.
3468  * @ep : pointer to the structure with pause parameters given by ethtool.
3469  * Description:
3470  * Returns the Pause frame generation and reception capability of the NIC.
3471  * Return value:
3472  *  void
3473  */
3474 static void s2io_ethtool_getpause_data(struct net_device *dev,
3475                                        struct ethtool_pauseparam *ep)
3476 {
3477         u64 val64;
3478         nic_t *sp = dev->priv;
3479         XENA_dev_config_t __iomem *bar0 = sp->bar0;
3480
3481         val64 = readq(&bar0->rmac_pause_cfg);
3482         if (val64 & RMAC_PAUSE_GEN_ENABLE)
3483                 ep->tx_pause = TRUE;
3484         if (val64 & RMAC_PAUSE_RX_ENABLE)
3485                 ep->rx_pause = TRUE;
3486         ep->autoneg = FALSE;
3487 }
3488
3489 /**
3490  * s2io_ethtool_setpause_data -  set/reset pause frame generation.
3491  * @sp : private member of the device structure, which is a pointer to the
3492  *      s2io_nic structure.
3493  * @ep : pointer to the structure with pause parameters given by ethtool.
3494  * Description:
3495  * It can be used to set or reset Pause frame generation or reception
3496  * support of the NIC.
3497  * Return value:
3498  * int, returns 0 on Success
3499  */
3500
3501 static int s2io_ethtool_setpause_data(struct net_device *dev,
3502                                struct ethtool_pauseparam *ep)
3503 {
3504         u64 val64;
3505         nic_t *sp = dev->priv;
3506         XENA_dev_config_t __iomem *bar0 = sp->bar0;
3507
3508         val64 = readq(&bar0->rmac_pause_cfg);
3509         if (ep->tx_pause)
3510                 val64 |= RMAC_PAUSE_GEN_ENABLE;
3511         else
3512                 val64 &= ~RMAC_PAUSE_GEN_ENABLE;
3513         if (ep->rx_pause)
3514                 val64 |= RMAC_PAUSE_RX_ENABLE;
3515         else
3516                 val64 &= ~RMAC_PAUSE_RX_ENABLE;
3517         writeq(val64, &bar0->rmac_pause_cfg);
3518         return 0;
3519 }
3520
3521 /**
3522  * read_eeprom - reads 4 bytes of data from user given offset.
3523  * @sp : private member of the device structure, which is a pointer to the
3524  *      s2io_nic structure.
3525  * @off : offset at which the data must be written
3526  * @data : Its an output parameter where the data read at the given
3527  *      offset is stored.
3528  * Description:
3529  * Will read 4 bytes of data from the user given offset and return the
3530  * read data.
3531  * NOTE: Will allow to read only part of the EEPROM visible through the
3532  *   I2C bus.
3533  * Return value:
3534  *  -1 on failure and 0 on success.
3535  */
3536
3537 #define S2IO_DEV_ID             5
3538 static int read_eeprom(nic_t * sp, int off, u32 * data)
3539 {
3540         int ret = -1;
3541         u32 exit_cnt = 0;
3542         u64 val64;
3543         XENA_dev_config_t __iomem *bar0 = sp->bar0;
3544
3545         val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
3546             I2C_CONTROL_BYTE_CNT(0x3) | I2C_CONTROL_READ |
3547             I2C_CONTROL_CNTL_START;
3548         SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
3549
3550         while (exit_cnt < 5) {
3551                 val64 = readq(&bar0->i2c_control);
3552                 if (I2C_CONTROL_CNTL_END(val64)) {
3553                         *data = I2C_CONTROL_GET_DATA(val64);
3554                         ret = 0;
3555                         break;
3556                 }
3557                 msleep(50);
3558                 exit_cnt++;
3559         }
3560
3561         return ret;
3562 }
3563
3564 /**
3565  *  write_eeprom - actually writes the relevant part of the data value.
3566  *  @sp : private member of the device structure, which is a pointer to the
3567  *       s2io_nic structure.
3568  *  @off : offset at which the data must be written
3569  *  @data : The data that is to be written
3570  *  @cnt : Number of bytes of the data that are actually to be written into
3571  *  the Eeprom. (max of 3)
3572  * Description:
3573  *  Actually writes the relevant part of the data value into the Eeprom
3574  *  through the I2C bus.
3575  * Return value:
3576  *  0 on success, -1 on failure.
3577  */
3578
3579 static int write_eeprom(nic_t * sp, int off, u32 data, int cnt)
3580 {
3581         int exit_cnt = 0, ret = -1;
3582         u64 val64;
3583         XENA_dev_config_t __iomem *bar0 = sp->bar0;
3584
3585         val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
3586             I2C_CONTROL_BYTE_CNT(cnt) | I2C_CONTROL_SET_DATA(data) |
3587             I2C_CONTROL_CNTL_START;
3588         SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
3589
3590         while (exit_cnt < 5) {
3591                 val64 = readq(&bar0->i2c_control);
3592                 if (I2C_CONTROL_CNTL_END(val64)) {
3593                         if (!(val64 & I2C_CONTROL_NACK))
3594                                 ret = 0;
3595                         break;
3596                 }
3597                 msleep(50);
3598                 exit_cnt++;
3599         }
3600
3601         return ret;
3602 }
3603
3604 /**
3605  *  s2io_ethtool_geeprom  - reads the value stored in the Eeprom.
3606  *  @sp : private member of the device structure, which is a pointer to the *       s2io_nic structure.
3607  *  @eeprom : pointer to the user level structure provided by ethtool,
3608  *  containing all relevant information.
3609  *  @data_buf : user defined value to be written into Eeprom.
3610  *  Description: Reads the values stored in the Eeprom at given offset
3611  *  for a given length. Stores these values int the input argument data
3612  *  buffer 'data_buf' and returns these to the caller (ethtool.)
3613  *  Return value:
3614  *  int  0 on success
3615  */
3616
3617 static int s2io_ethtool_geeprom(struct net_device *dev,
3618                          struct ethtool_eeprom *eeprom, u8 * data_buf)
3619 {
3620         u32 data, i, valid;
3621         nic_t *sp = dev->priv;
3622
3623         eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
3624
3625         if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
3626                 eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
3627
3628         for (i = 0; i < eeprom->len; i += 4) {
3629                 if (read_eeprom(sp, (eeprom->offset + i), &data)) {
3630                         DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
3631                         return -EFAULT;
3632                 }
3633                 valid = INV(data);
3634                 memcpy((data_buf + i), &valid, 4);
3635         }
3636         return 0;
3637 }
3638
3639 /**
3640  *  s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
3641  *  @sp : private member of the device structure, which is a pointer to the
3642  *  s2io_nic structure.
3643  *  @eeprom : pointer to the user level structure provided by ethtool,
3644  *  containing all relevant information.
3645  *  @data_buf ; user defined value to be written into Eeprom.
3646  *  Description:
3647  *  Tries to write the user provided value in the Eeprom, at the offset
3648  *  given by the user.
3649  *  Return value:
3650  *  0 on success, -EFAULT on failure.
3651  */
3652
3653 static int s2io_ethtool_seeprom(struct net_device *dev,
3654                                 struct ethtool_eeprom *eeprom,
3655                                 u8 * data_buf)
3656 {
3657         int len = eeprom->len, cnt = 0;
3658         u32 valid = 0, data;
3659         nic_t *sp = dev->priv;
3660
3661         if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
3662                 DBG_PRINT(ERR_DBG,
3663                           "ETHTOOL_WRITE_EEPROM Err: Magic value ");
3664                 DBG_PRINT(ERR_DBG, "is wrong, Its not 0x%x\n",
3665                           eeprom->magic);
3666                 return -EFAULT;
3667         }
3668
3669         while (len) {
3670                 data = (u32) data_buf[cnt] & 0x000000FF;
3671                 if (data) {
3672                         valid = (u32) (data << 24);
3673                 } else
3674                         valid = data;
3675
3676                 if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
3677                         DBG_PRINT(ERR_DBG,
3678                                   "ETHTOOL_WRITE_EEPROM Err: Cannot ");
3679                         DBG_PRINT(ERR_DBG,
3680                                   "write into the specified offset\n");
3681                         return -EFAULT;
3682                 }
3683                 cnt++;
3684                 len--;
3685         }
3686
3687         return 0;
3688 }
3689
3690 /**
3691  * s2io_register_test - reads and writes into all clock domains.
3692  * @sp : private member of the device structure, which is a pointer to the
3693  * s2io_nic structure.
3694  * @data : variable that returns the result of each of the test conducted b
3695  * by the driver.
3696  * Description:
3697  * Read and write into all clock domains. The NIC has 3 clock domains,
3698  * see that registers in all the three regions are accessible.
3699  * Return value:
3700  * 0 on success.
3701  */
3702
3703 static int s2io_register_test(nic_t * sp, uint64_t * data)
3704 {
3705         XENA_dev_config_t __iomem *bar0 = sp->bar0;
3706         u64 val64 = 0;
3707         int fail = 0;
3708
3709         val64 = readq(&bar0->pif_rd_swapper_fb);
3710         if (val64 != 0x123456789abcdefULL) {
3711                 fail = 1;
3712                 DBG_PRINT(INFO_DBG, "Read Test level 1 fails\n");
3713         }
3714
3715         val64 = readq(&bar0->rmac_pause_cfg);
3716         if (val64 != 0xc000ffff00000000ULL) {
3717                 fail = 1;
3718                 DBG_PRINT(INFO_DBG, "Read Test level 2 fails\n");
3719         }
3720
3721         val64 = readq(&bar0->rx_queue_cfg);
3722         if (val64 != 0x0808080808080808ULL) {
3723                 fail = 1;
3724                 DBG_PRINT(INFO_DBG, "Read Test level 3 fails\n");
3725         }
3726
3727         val64 = readq(&bar0->xgxs_efifo_cfg);
3728         if (val64 != 0x000000001923141EULL) {
3729                 fail = 1;
3730                 DBG_PRINT(INFO_DBG, "Read Test level 4 fails\n");
3731         }
3732
3733         val64 = 0x5A5A5A5A5A5A5A5AULL;
3734         writeq(val64, &bar0->xmsi_data);
3735         val64 = readq(&bar0->xmsi_data);
3736         if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
3737                 fail = 1;
3738                 DBG_PRINT(ERR_DBG, "Write Test level 1 fails\n");
3739         }
3740
3741         val64 = 0xA5A5A5A5A5A5A5A5ULL;
3742         writeq(val64, &bar0->xmsi_data);
3743         val64 = readq(&bar0->xmsi_data);
3744         if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
3745                 fail = 1;
3746                 DBG_PRINT(ERR_DBG, "Write Test level 2 fails\n");
3747         }
3748
3749         *data = fail;
3750         return 0;
3751 }
3752
3753 /**
3754  * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
3755  * @sp : private member of the device structure, which is a pointer to the
3756  * s2io_nic structure.
3757  * @data:variable that returns the result of each of the test conducted by
3758  * the driver.
3759  * Description:
3760  * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
3761  * register.
3762  * Return value:
3763  * 0 on success.
3764  */
3765
3766 static int s2io_eeprom_test(nic_t * sp, uint64_t * data)
3767 {
3768         int fail = 0;
3769         u32 ret_data;
3770
3771         /* Test Write Error at offset 0 */
3772         if (!write_eeprom(sp, 0, 0, 3))
3773                 fail = 1;
3774
3775         /* Test Write at offset 4f0 */
3776         if (write_eeprom(sp, 0x4F0, 0x01234567, 3))
3777                 fail = 1;
3778         if (read_eeprom(sp, 0x4F0, &ret_data))
3779                 fail = 1;
3780
3781         if (ret_data != 0x01234567)
3782                 fail = 1;
3783
3784         /* Reset the EEPROM data go FFFF */
3785         write_eeprom(sp, 0x4F0, 0xFFFFFFFF, 3);
3786
3787         /* Test Write Request Error at offset 0x7c */
3788         if (!write_eeprom(sp, 0x07C, 0, 3))
3789                 fail = 1;
3790
3791         /* Test Write Request at offset 0x7fc */
3792         if (write_eeprom(sp, 0x7FC, 0x01234567, 3))
3793                 fail = 1;
3794         if (read_eeprom(sp, 0x7FC, &ret_data))
3795                 fail = 1;
3796
3797         if (ret_data != 0x01234567)
3798                 fail = 1;
3799
3800         /* Reset the EEPROM data go FFFF */
3801         write_eeprom(sp, 0x7FC, 0xFFFFFFFF, 3);
3802
3803         /* Test Write Error at offset 0x80 */
3804         if (!write_eeprom(sp, 0x080, 0, 3))
3805                 fail = 1;
3806
3807         /* Test Write Error at offset 0xfc */
3808         if (!write_eeprom(sp, 0x0FC, 0, 3))
3809                 fail = 1;
3810
3811         /* Test Write Error at offset 0x100 */
3812         if (!write_eeprom(sp, 0x100, 0, 3))
3813                 fail = 1;
3814
3815         /* Test Write Error at offset 4ec */
3816         if (!write_eeprom(sp, 0x4EC, 0, 3))
3817                 fail = 1;
3818
3819         *data = fail;
3820         return 0;
3821 }
3822
3823 /**
3824  * s2io_bist_test - invokes the MemBist test of the card .
3825  * @sp : private member of the device structure, which is a pointer to the
3826  * s2io_nic structure.
3827  * @data:variable that returns the result of each of the test conducted by
3828  * the driver.
3829  * Description:
3830  * This invokes the MemBist test of the card. We give around
3831  * 2 secs time for the Test to complete. If it's still not complete
3832  * within this peiod, we consider that the test failed.
3833  * Return value:
3834  * 0 on success and -1 on failure.
3835  */
3836
3837 static int s2io_bist_test(nic_t * sp, uint64_t * data)
3838 {
3839         u8 bist = 0;
3840         int cnt = 0, ret = -1;
3841
3842         pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
3843         bist |= PCI_BIST_START;
3844         pci_write_config_word(sp->pdev, PCI_BIST, bist);
3845
3846         while (cnt < 20) {
3847                 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
3848                 if (!(bist & PCI_BIST_START)) {
3849                         *data = (bist & PCI_BIST_CODE_MASK);
3850                         ret = 0;
3851                         break;
3852                 }
3853                 msleep(100);
3854                 cnt++;
3855         }
3856
3857         return ret;
3858 }
3859
3860 /**
3861  * s2io-link_test - verifies the link state of the nic
3862  * @sp ; private member of the device structure, which is a pointer to the
3863  * s2io_nic structure.
3864  * @data: variable that returns the result of each of the test conducted by
3865  * the driver.
3866  * Description:
3867  * The function verifies the link state of the NIC and updates the input
3868  * argument 'data' appropriately.
3869  * Return value:
3870  * 0 on success.
3871  */
3872
3873 static int s2io_link_test(nic_t * sp, uint64_t * data)
3874 {
3875         XENA_dev_config_t __iomem *bar0 = sp->bar0;
3876         u64 val64;
3877
3878         val64 = readq(&bar0->adapter_status);
3879         if (val64 & ADAPTER_STATUS_RMAC_LOCAL_FAULT)
3880                 *data = 1;
3881
3882         return 0;
3883 }
3884
3885 /**
3886  * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
3887  * @sp - private member of the device structure, which is a pointer to the
3888  * s2io_nic structure.
3889  * @data - variable that returns the result of each of the test
3890  * conducted by the driver.
3891  * Description:
3892  *  This is one of the offline test that tests the read and write
3893  *  access to the RldRam chip on the NIC.
3894  * Return value:
3895  *  0 on success.
3896  */
3897
3898 static int s2io_rldram_test(nic_t * sp, uint64_t * data)
3899 {
3900         XENA_dev_config_t __iomem *bar0 = sp->bar0;
3901         u64 val64;
3902         int cnt, iteration = 0, test_pass = 0;
3903
3904         val64 = readq(&bar0->adapter_control);
3905         val64 &= ~ADAPTER_ECC_EN;
3906         writeq(val64, &bar0->adapter_control);
3907
3908         val64 = readq(&bar0->mc_rldram_test_ctrl);
3909         val64 |= MC_RLDRAM_TEST_MODE;
3910         writeq(val64, &bar0->mc_rldram_test_ctrl);
3911
3912         val64 = readq(&bar0->mc_rldram_mrs);
3913         val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
3914         SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
3915
3916         val64 |= MC_RLDRAM_MRS_ENABLE;
3917         SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
3918
3919         while (iteration < 2) {
3920                 val64 = 0x55555555aaaa0000ULL;
3921                 if (iteration == 1) {
3922                         val64 ^= 0xFFFFFFFFFFFF0000ULL;
3923                 }
3924                 writeq(val64, &bar0->mc_rldram_test_d0);
3925
3926                 val64 = 0xaaaa5a5555550000ULL;
3927                 if (iteration == 1) {
3928                         val64 ^= 0xFFFFFFFFFFFF0000ULL;
3929                 }
3930                 writeq(val64, &bar0->mc_rldram_test_d1);
3931
3932                 val64 = 0x55aaaaaaaa5a0000ULL;
3933                 if (iteration == 1) {
3934                         val64 ^= 0xFFFFFFFFFFFF0000ULL;
3935                 }
3936                 writeq(val64, &bar0->mc_rldram_test_d2);
3937
3938                 val64 = (u64) (0x0000003fffff0000ULL);
3939                 writeq(val64, &bar0->mc_rldram_test_add);
3940
3941
3942                 val64 = MC_RLDRAM_TEST_MODE;
3943                 writeq(val64, &bar0->mc_rldram_test_ctrl);
3944
3945                 val64 |=
3946                     MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_WRITE |
3947                     MC_RLDRAM_TEST_GO;
3948                 writeq(val64, &bar0->mc_rldram_test_ctrl);
3949
3950                 for (cnt = 0; cnt < 5; cnt++) {
3951                         val64 = readq(&bar0->mc_rldram_test_ctrl);
3952                         if (val64 & MC_RLDRAM_TEST_DONE)
3953                                 break;
3954                         msleep(200);
3955                 }
3956
3957                 if (cnt == 5)
3958                         break;
3959
3960                 val64 = MC_RLDRAM_TEST_MODE;
3961                 writeq(val64, &bar0->mc_rldram_test_ctrl);
3962
3963                 val64 |= MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
3964                 writeq(val64, &bar0->mc_rldram_test_ctrl);
3965
3966                 for (cnt = 0; cnt < 5; cnt++) {
3967                         val64 = readq(&bar0->mc_rldram_test_ctrl);
3968                         if (val64 & MC_RLDRAM_TEST_DONE)
3969                                 break;
3970                         msleep(500);
3971                 }
3972
3973                 if (cnt == 5)
3974                         break;
3975
3976                 val64 = readq(&bar0->mc_rldram_test_ctrl);
3977                 if (val64 & MC_RLDRAM_TEST_PASS)
3978                         test_pass = 1;
3979
3980                 iteration++;
3981         }
3982
3983         if (!test_pass)
3984                 *data = 1;
3985         else
3986                 *data = 0;
3987
3988         return 0;
3989 }
3990
3991 /**
3992  *  s2io_ethtool_test - conducts 6 tsets to determine the health of card.
3993  *  @sp : private member of the device structure, which is a pointer to the
3994  *  s2io_nic structure.
3995  *  @ethtest : pointer to a ethtool command specific structure that will be
3996  *  returned to the user.
3997  *  @data : variable that returns the result of each of the test
3998  * conducted by the driver.
3999  * Description:
4000  *  This function conducts 6 tests ( 4 offline and 2 online) to determine
4001  *  the health of the card.
4002  * Return value:
4003  *  void
4004  */
4005
4006 static void s2io_ethtool_test(struct net_device *dev,
4007                               struct ethtool_test *ethtest,
4008                               uint64_t * data)
4009 {
4010         nic_t *sp = dev->priv;
4011         int orig_state = netif_running(sp->dev);
4012
4013         if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
4014                 /* Offline Tests. */
4015                 if (orig_state)
4016                         s2io_close(sp->dev);
4017
4018                 if (s2io_register_test(sp, &data[0]))
4019                         ethtest->flags |= ETH_TEST_FL_FAILED;
4020
4021                 s2io_reset(sp);
4022
4023                 if (s2io_rldram_test(sp, &data[3]))
4024                         ethtest->flags |= ETH_TEST_FL_FAILED;
4025
4026                 s2io_reset(sp);
4027
4028                 if (s2io_eeprom_test(sp, &data[1]))
4029                         ethtest->flags |= ETH_TEST_FL_FAILED;
4030
4031                 if (s2io_bist_test(sp, &data[4]))
4032                         ethtest->flags |= ETH_TEST_FL_FAILED;
4033
4034                 if (orig_state)
4035                         s2io_open(sp->dev);
4036
4037                 data[2] = 0;
4038         } else {
4039                 /* Online Tests. */
4040                 if (!orig_state) {
4041                         DBG_PRINT(ERR_DBG,
4042                                   "%s: is not up, cannot run test\n",
4043                                   dev->name);
4044                         data[0] = -1;
4045                         data[1] = -1;
4046                         data[2] = -1;
4047                         data[3] = -1;
4048                         data[4] = -1;
4049                 }
4050
4051                 if (s2io_link_test(sp, &data[2]))
4052                         ethtest->flags |= ETH_TEST_FL_FAILED;
4053
4054                 data[0] = 0;
4055                 data[1] = 0;
4056                 data[3] = 0;
4057                 data[4] = 0;
4058         }
4059 }
4060
4061 static void s2io_get_ethtool_stats(struct net_device *dev,
4062                                    struct ethtool_stats *estats,
4063                                    u64 * tmp_stats)
4064 {
4065         int i = 0;
4066         nic_t *sp = dev->priv;
4067         StatInfo_t *stat_info = sp->mac_control.stats_info;
4068
4069         s2io_updt_stats(sp);
4070         tmp_stats[i++] = le32_to_cpu(stat_info->tmac_frms);
4071         tmp_stats[i++] = le32_to_cpu(stat_info->tmac_data_octets);
4072         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_drop_frms);
4073         tmp_stats[i++] = le32_to_cpu(stat_info->tmac_mcst_frms);
4074         tmp_stats[i++] = le32_to_cpu(stat_info->tmac_bcst_frms);
4075         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_pause_ctrl_frms);
4076         tmp_stats[i++] = le32_to_cpu(stat_info->tmac_any_err_frms);
4077         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_vld_ip_octets);
4078         tmp_stats[i++] = le32_to_cpu(stat_info->tmac_vld_ip);
4079         tmp_stats[i++] = le32_to_cpu(stat_info->tmac_drop_ip);
4080         tmp_stats[i++] = le32_to_cpu(stat_info->tmac_icmp);
4081         tmp_stats[i++] = le32_to_cpu(stat_info->tmac_rst_tcp);
4082         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_tcp);
4083         tmp_stats[i++] = le32_to_cpu(stat_info->tmac_udp);
4084         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_vld_frms);
4085         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_data_octets);
4086         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_fcs_err_frms);
4087         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_drop_frms);
4088         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_vld_mcst_frms);
4089         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_vld_bcst_frms);
4090         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_in_rng_len_err_frms);
4091         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_long_frms);
4092         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_pause_ctrl_frms);
4093         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_discarded_frms);
4094         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_usized_frms);
4095         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_osized_frms);
4096         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_frag_frms);
4097         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_jabber_frms);
4098         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_ip);
4099         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ip_octets);
4100         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_hdr_err_ip);
4101         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_drop_ip);
4102         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_icmp);
4103         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_tcp);
4104         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_udp);
4105         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_drp_udp);
4106         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_pause_cnt);
4107         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_accepted_ip);
4108         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_tcp);
4109         tmp_stats[i++] = 0;
4110         tmp_stats[i++] = stat_info->sw_stat.single_ecc_errs;
4111         tmp_stats[i++] = stat_info->sw_stat.double_ecc_errs;
4112 }
4113
4114 int s2io_ethtool_get_regs_len(struct net_device *dev)
4115 {
4116         return (XENA_REG_SPACE);
4117 }
4118
4119
4120 u32 s2io_ethtool_get_rx_csum(struct net_device * dev)
4121 {
4122         nic_t *sp = dev->priv;
4123
4124         return (sp->rx_csum);
4125 }
4126 int s2io_ethtool_set_rx_csum(struct net_device *dev, u32 data)
4127 {
4128         nic_t *sp = dev->priv;
4129
4130         if (data)
4131                 sp->rx_csum = 1;
4132         else
4133                 sp->rx_csum = 0;
4134
4135         return 0;
4136 }
4137 int s2io_get_eeprom_len(struct net_device *dev)
4138 {
4139         return (XENA_EEPROM_SPACE);
4140 }
4141
4142 int s2io_ethtool_self_test_count(struct net_device *dev)
4143 {
4144         return (S2IO_TEST_LEN);
4145 }
4146 void s2io_ethtool_get_strings(struct net_device *dev,
4147                               u32 stringset, u8 * data)
4148 {
4149         switch (stringset) {
4150         case ETH_SS_TEST:
4151                 memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
4152                 break;
4153         case ETH_SS_STATS:
4154                 memcpy(data, &ethtool_stats_keys,
4155                        sizeof(ethtool_stats_keys));
4156         }
4157 }
4158 static int s2io_ethtool_get_stats_count(struct net_device *dev)
4159 {
4160         return (S2IO_STAT_LEN);
4161 }
4162
4163 int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
4164 {
4165         if (data)
4166                 dev->features |= NETIF_F_IP_CSUM;
4167         else
4168                 dev->features &= ~NETIF_F_IP_CSUM;
4169
4170         return 0;
4171 }
4172
4173
4174 static struct ethtool_ops netdev_ethtool_ops = {
4175         .get_settings = s2io_ethtool_gset,
4176         .set_settings = s2io_ethtool_sset,
4177         .get_drvinfo = s2io_ethtool_gdrvinfo,
4178         .get_regs_len = s2io_ethtool_get_regs_len,
4179         .get_regs = s2io_ethtool_gregs,
4180         .get_link = ethtool_op_get_link,
4181         .get_eeprom_len = s2io_get_eeprom_len,
4182         .get_eeprom = s2io_ethtool_geeprom,
4183         .set_eeprom = s2io_ethtool_seeprom,
4184         .get_pauseparam = s2io_ethtool_getpause_data,
4185         .set_pauseparam = s2io_ethtool_setpause_data,
4186         .get_rx_csum = s2io_ethtool_get_rx_csum,
4187         .set_rx_csum = s2io_ethtool_set_rx_csum,
4188         .get_tx_csum = ethtool_op_get_tx_csum,
4189         .set_tx_csum = s2io_ethtool_op_set_tx_csum,
4190         .get_sg = ethtool_op_get_sg,
4191         .set_sg = ethtool_op_set_sg,
4192 #ifdef NETIF_F_TSO
4193         .get_tso = ethtool_op_get_tso,
4194         .set_tso = ethtool_op_set_tso,
4195 #endif
4196         .self_test_count = s2io_ethtool_self_test_count,
4197         .self_test = s2io_ethtool_test,
4198         .get_strings = s2io_ethtool_get_strings,
4199         .phys_id = s2io_ethtool_idnic,
4200         .get_stats_count = s2io_ethtool_get_stats_count,
4201         .get_ethtool_stats = s2io_get_ethtool_stats
4202 };
4203
4204 /**
4205  *  s2io_ioctl - Entry point for the Ioctl
4206  *  @dev :  Device pointer.
4207  *  @ifr :  An IOCTL specefic structure, that can contain a pointer to
4208  *  a proprietary structure used to pass information to the driver.
4209  *  @cmd :  This is used to distinguish between the different commands that
4210  *  can be passed to the IOCTL functions.
4211  *  Description:
4212  *  Currently there are no special functionality supported in IOCTL, hence
4213  *  function always return EOPNOTSUPPORTED
4214  */
4215
4216 int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
4217 {
4218         return -EOPNOTSUPP;
4219 }
4220
4221 /**
4222  *  s2io_change_mtu - entry point to change MTU size for the device.
4223  *   @dev : device pointer.
4224  *   @new_mtu : the new MTU size for the device.
4225  *   Description: A driver entry point to change MTU size for the device.
4226  *   Before changing the MTU the device must be stopped.
4227  *  Return value:
4228  *   0 on success and an appropriate (-)ve integer as defined in errno.h
4229  *   file on failure.
4230  */
4231
4232 int s2io_change_mtu(struct net_device *dev, int new_mtu)
4233 {
4234         nic_t *sp = dev->priv;
4235         XENA_dev_config_t __iomem *bar0 = sp->bar0;
4236         register u64 val64;
4237
4238         if (netif_running(dev)) {
4239                 DBG_PRINT(ERR_DBG, "%s: Must be stopped to ", dev->name);
4240                 DBG_PRINT(ERR_DBG, "change its MTU\n");
4241                 return -EBUSY;
4242         }
4243
4244         if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) {
4245                 DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n",
4246                           dev->name);
4247                 return -EPERM;
4248         }
4249
4250         /* Set the new MTU into the PYLD register of the NIC */
4251         val64 = new_mtu;
4252         writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
4253
4254         dev->mtu = new_mtu;
4255
4256         return 0;
4257 }
4258
4259 /**
4260  *  s2io_tasklet - Bottom half of the ISR.
4261  *  @dev_adr : address of the device structure in dma_addr_t format.
4262  *  Description:
4263  *  This is the tasklet or the bottom half of the ISR. This is
4264  *  an extension of the ISR which is scheduled by the scheduler to be run
4265  *  when the load on the CPU is low. All low priority tasks of the ISR can
4266  *  be pushed into the tasklet. For now the tasklet is used only to
4267  *  replenish the Rx buffers in the Rx buffer descriptors.
4268  *  Return value:
4269  *  void.
4270  */
4271
4272 static void s2io_tasklet(unsigned long dev_addr)
4273 {
4274         struct net_device *dev = (struct net_device *) dev_addr;
4275         nic_t *sp = dev->priv;
4276         int i, ret;
4277         mac_info_t *mac_control;
4278         struct config_param *config;
4279
4280         mac_control = &sp->mac_control;
4281         config = &sp->config;
4282
4283         if (!TASKLET_IN_USE) {
4284                 for (i = 0; i < config->rx_ring_num; i++) {
4285                         ret = fill_rx_buffers(sp, i);
4286                         if (ret == -ENOMEM) {
4287                                 DBG_PRINT(ERR_DBG, "%s: Out of ",
4288                                           dev->name);
4289                                 DBG_PRINT(ERR_DBG, "memory in tasklet\n");
4290                                 break;
4291                         } else if (ret == -EFILL) {
4292                                 DBG_PRINT(ERR_DBG,
4293                                           "%s: Rx Ring %d is full\n",
4294                                           dev->name, i);
4295                                 break;
4296                         }
4297                 }
4298                 clear_bit(0, (&sp->tasklet_status));
4299         }
4300 }
4301
4302 /**
4303  * s2io_set_link - Set the LInk status
4304  * @data: long pointer to device private structue
4305  * Description: Sets the link status for the adapter
4306  */
4307
4308 static void s2io_set_link(unsigned long data)
4309 {
4310         nic_t *nic = (nic_t *) data;
4311         struct net_device *dev = nic->dev;
4312         XENA_dev_config_t __iomem *bar0 = nic->bar0;
4313         register u64 val64;
4314         u16 subid;
4315
4316         if (test_and_set_bit(0, &(nic->link_state))) {
4317                 /* The card is being reset, no point doing anything */
4318                 return;
4319         }
4320
4321         subid = nic->pdev->subsystem_device;
4322         /*
4323          * Allow a small delay for the NICs self initiated
4324          * cleanup to complete.
4325          */
4326         msleep(100);
4327
4328         val64 = readq(&bar0->adapter_status);
4329         if (verify_xena_quiescence(nic, val64, nic->device_enabled_once)) {
4330                 if (LINK_IS_UP(val64)) {
4331                         val64 = readq(&bar0->adapter_control);
4332                         val64 |= ADAPTER_CNTL_EN;
4333                         writeq(val64, &bar0->adapter_control);
4334                         if (CARDS_WITH_FAULTY_LINK_INDICATORS(subid)) {
4335                                 val64 = readq(&bar0->gpio_control);
4336                                 val64 |= GPIO_CTRL_GPIO_0;
4337                                 writeq(val64, &bar0->gpio_control);
4338                                 val64 = readq(&bar0->gpio_control);
4339                         } else {
4340                                 val64 |= ADAPTER_LED_ON;
4341                                 writeq(val64, &bar0->adapter_control);
4342                         }
4343                         val64 = readq(&bar0->adapter_status);
4344                         if (!LINK_IS_UP(val64)) {
4345                                 DBG_PRINT(ERR_DBG, "%s:", dev->name);
4346                                 DBG_PRINT(ERR_DBG, " Link down");
4347                                 DBG_PRINT(ERR_DBG, "after ");
4348                                 DBG_PRINT(ERR_DBG, "enabling ");
4349                                 DBG_PRINT(ERR_DBG, "device \n");
4350                         }
4351                         if (nic->device_enabled_once == FALSE) {
4352                                 nic->device_enabled_once = TRUE;
4353                         }
4354                         s2io_link(nic, LINK_UP);
4355                 } else {
4356                         if (CARDS_WITH_FAULTY_LINK_INDICATORS(subid)) {
4357                                 val64 = readq(&bar0->gpio_control);
4358                                 val64 &= ~GPIO_CTRL_GPIO_0;
4359                                 writeq(val64, &bar0->gpio_control);
4360                                 val64 = readq(&bar0->gpio_control);
4361                         }
4362                         s2io_link(nic, LINK_DOWN);
4363                 }
4364         } else {                /* NIC is not Quiescent. */
4365                 DBG_PRINT(ERR_DBG, "%s: Error: ", dev->name);
4366                 DBG_PRINT(ERR_DBG, "device is not Quiescent\n");
4367                 netif_stop_queue(dev);
4368         }
4369         clear_bit(0, &(nic->link_state));
4370 }
4371
4372 static void s2io_card_down(nic_t * sp)
4373 {
4374         int cnt = 0;
4375         XENA_dev_config_t __iomem *bar0 = sp->bar0;
4376         unsigned long flags;
4377         register u64 val64 = 0;
4378
4379         /* If s2io_set_link task is executing, wait till it completes. */
4380         while (test_and_set_bit(0, &(sp->link_state))) {
4381                 msleep(50);
4382         }
4383         atomic_set(&sp->card_state, CARD_DOWN);
4384
4385         /* disable Tx and Rx traffic on the NIC */
4386         stop_nic(sp);
4387
4388         /* Kill tasklet. */
4389         tasklet_kill(&sp->task);
4390
4391         /* Check if the device is Quiescent and then Reset the NIC */
4392         do {
4393                 val64 = readq(&bar0->adapter_status);
4394                 if (verify_xena_quiescence(sp, val64, sp->device_enabled_once)) {
4395                         break;
4396                 }
4397
4398                 msleep(50);
4399                 cnt++;
4400                 if (cnt == 10) {
4401                         DBG_PRINT(ERR_DBG,
4402                                   "s2io_close:Device not Quiescent ");
4403                         DBG_PRINT(ERR_DBG, "adaper status reads 0x%llx\n",
4404                                   (unsigned long long) val64);
4405                         break;
4406                 }
4407         } while (1);
4408         s2io_reset(sp);
4409
4410         /* Waiting till all Interrupt handlers are complete */
4411         cnt = 0;
4412         do {
4413                 msleep(10);
4414                 if (!atomic_read(&sp->isr_cnt))
4415                         break;
4416                 cnt++;
4417         } while(cnt < 5);
4418
4419         spin_lock_irqsave(&sp->tx_lock, flags);
4420         /* Free all Tx buffers */
4421         free_tx_buffers(sp);
4422         spin_unlock_irqrestore(&sp->tx_lock, flags);
4423
4424         /* Free all Rx buffers */
4425         spin_lock_irqsave(&sp->rx_lock, flags);
4426         free_rx_buffers(sp);
4427         spin_unlock_irqrestore(&sp->rx_lock, flags);
4428
4429         clear_bit(0, &(sp->link_state));
4430 }
4431
4432 static int s2io_card_up(nic_t * sp)
4433 {
4434         int i, ret;
4435         mac_info_t *mac_control;
4436         struct config_param *config;
4437         struct net_device *dev = (struct net_device *) sp->dev;
4438
4439         /* Initialize the H/W I/O registers */
4440         if (init_nic(sp) != 0) {
4441                 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
4442                           dev->name);
4443                 return -ENODEV;
4444         }
4445
4446         /*
4447          * Initializing the Rx buffers. For now we are considering only 1
4448          * Rx ring and initializing buffers into 30 Rx blocks
4449          */
4450         mac_control = &sp->mac_control;
4451         config = &sp->config;
4452
4453         for (i = 0; i < config->rx_ring_num; i++) {
4454                 if ((ret = fill_rx_buffers(sp, i))) {
4455                         DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
4456                                   dev->name);
4457                         s2io_reset(sp);
4458                         free_rx_buffers(sp);
4459                         return -ENOMEM;
4460                 }
4461                 DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
4462                           atomic_read(&sp->rx_bufs_left[i]));
4463         }
4464
4465         /* Setting its receive mode */
4466         s2io_set_multicast(dev);
4467
4468         /* Enable tasklet for the device */
4469         tasklet_init(&sp->task, s2io_tasklet, (unsigned long) dev);
4470
4471         /* Enable Rx Traffic and interrupts on the NIC */
4472         if (start_nic(sp)) {
4473                 DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
4474                 tasklet_kill(&sp->task);
4475                 s2io_reset(sp);
4476                 free_irq(dev->irq, dev);
4477                 free_rx_buffers(sp);
4478                 return -ENODEV;
4479         }
4480
4481         atomic_set(&sp->card_state, CARD_UP);
4482         return 0;
4483 }
4484
4485 /**
4486  * s2io_restart_nic - Resets the NIC.
4487  * @data : long pointer to the device private structure
4488  * Description:
4489  * This function is scheduled to be run by the s2io_tx_watchdog
4490  * function after 0.5 secs to reset the NIC. The idea is to reduce
4491  * the run time of the watch dog routine which is run holding a
4492  * spin lock.
4493  */
4494
4495 static void s2io_restart_nic(unsigned long data)
4496 {
4497         struct net_device *dev = (struct net_device *) data;
4498         nic_t *sp = dev->priv;
4499
4500         s2io_card_down(sp);
4501         if (s2io_card_up(sp)) {
4502                 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
4503                           dev->name);
4504         }
4505         netif_wake_queue(dev);
4506         DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n",
4507                   dev->name);
4508
4509 }
4510
4511 /**
4512  *  s2io_tx_watchdog - Watchdog for transmit side.
4513  *  @dev : Pointer to net device structure
4514  *  Description:
4515  *  This function is triggered if the Tx Queue is stopped
4516  *  for a pre-defined amount of time when the Interface is still up.
4517  *  If the Interface is jammed in such a situation, the hardware is
4518  *  reset (by s2io_close) and restarted again (by s2io_open) to
4519  *  overcome any problem that might have been caused in the hardware.
4520  *  Return value:
4521  *  void
4522  */
4523
4524 static void s2io_tx_watchdog(struct net_device *dev)
4525 {
4526         nic_t *sp = dev->priv;
4527
4528         if (netif_carrier_ok(dev)) {
4529                 schedule_work(&sp->rst_timer_task);
4530         }
4531 }
4532
4533 /**
4534  *   rx_osm_handler - To perform some OS related operations on SKB.
4535  *   @sp: private member of the device structure,pointer to s2io_nic structure.
4536  *   @skb : the socket buffer pointer.
4537  *   @len : length of the packet
4538  *   @cksum : FCS checksum of the frame.
4539  *   @ring_no : the ring from which this RxD was extracted.
4540  *   Description:
4541  *   This function is called by the Tx interrupt serivce routine to perform
4542  *   some OS related operations on the SKB before passing it to the upper
4543  *   layers. It mainly checks if the checksum is OK, if so adds it to the
4544  *   SKBs cksum variable, increments the Rx packet count and passes the SKB
4545  *   to the upper layer. If the checksum is wrong, it increments the Rx
4546  *   packet error count, frees the SKB and returns error.
4547  *   Return value:
4548  *   SUCCESS on success and -1 on failure.
4549  */
4550 static int rx_osm_handler(ring_info_t *ring_data, RxD_t * rxdp)
4551 {
4552         nic_t *sp = ring_data->nic;
4553         struct net_device *dev = (struct net_device *) sp->dev;
4554         struct sk_buff *skb = (struct sk_buff *)
4555                 ((unsigned long) rxdp->Host_Control);
4556         int ring_no = ring_data->ring_no;
4557         u16 l3_csum, l4_csum;
4558 #ifdef CONFIG_2BUFF_MODE
4559         int buf0_len = RXD_GET_BUFFER0_SIZE(rxdp->Control_2);
4560         int buf2_len = RXD_GET_BUFFER2_SIZE(rxdp->Control_2);
4561         int get_block = ring_data->rx_curr_get_info.block_index;
4562         int get_off = ring_data->rx_curr_get_info.offset;
4563         buffAdd_t *ba = &ring_data->ba[get_block][get_off];
4564         unsigned char *buff;
4565 #else
4566         u16 len = (u16) ((RXD_GET_BUFFER0_SIZE(rxdp->Control_2)) >> 48);;
4567 #endif
4568         skb->dev = dev;
4569         if (rxdp->Control_1 & RXD_T_CODE) {
4570                 unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
4571                 DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%llx\n",
4572                           dev->name, err);
4573         }
4574
4575         /* Updating statistics */
4576         rxdp->Host_Control = 0;
4577         sp->rx_pkt_count++;
4578         sp->stats.rx_packets++;
4579 #ifndef CONFIG_2BUFF_MODE
4580         sp->stats.rx_bytes += len;
4581 #else
4582         sp->stats.rx_bytes += buf0_len + buf2_len;
4583 #endif
4584
4585 #ifndef CONFIG_2BUFF_MODE
4586         skb_put(skb, len);
4587 #else
4588         buff = skb_push(skb, buf0_len);
4589         memcpy(buff, ba->ba_0, buf0_len);
4590         skb_put(skb, buf2_len);
4591 #endif
4592
4593         if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) &&
4594             (sp->rx_csum)) {
4595                 l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
4596                 l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
4597                 if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
4598                         /*
4599                          * NIC verifies if the Checksum of the received
4600                          * frame is Ok or not and accordingly returns
4601                          * a flag in the RxD.
4602                          */
4603                         skb->ip_summed = CHECKSUM_UNNECESSARY;
4604                 } else {
4605                         /*
4606                          * Packet with erroneous checksum, let the
4607                          * upper layers deal with it.
4608                          */
4609                         skb->ip_summed = CHECKSUM_NONE;
4610                 }
4611         } else {
4612                 skb->ip_summed = CHECKSUM_NONE;
4613         }
4614
4615         skb->protocol = eth_type_trans(skb, dev);
4616 #ifdef CONFIG_S2IO_NAPI
4617         netif_receive_skb(skb);
4618 #else
4619         netif_rx(skb);
4620 #endif
4621         dev->last_rx = jiffies;
4622         atomic_dec(&sp->rx_bufs_left[ring_no]);
4623         return SUCCESS;
4624 }
4625
4626 /**
4627  *  s2io_link - stops/starts the Tx queue.
4628  *  @sp : private member of the device structure, which is a pointer to the
4629  *  s2io_nic structure.
4630  *  @link : inidicates whether link is UP/DOWN.
4631  *  Description:
4632  *  This function stops/starts the Tx queue depending on whether the link
4633  *  status of the NIC is is down or up. This is called by the Alarm
4634  *  interrupt handler whenever a link change interrupt comes up.
4635  *  Return value:
4636  *  void.
4637  */
4638
4639 void s2io_link(nic_t * sp, int link)
4640 {
4641         struct net_device *dev = (struct net_device *) sp->dev;
4642
4643         if (link != sp->last_link_state) {
4644                 if (link == LINK_DOWN) {
4645                         DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
4646                         netif_carrier_off(dev);
4647                 } else {
4648                         DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
4649                         netif_carrier_on(dev);
4650                 }
4651         }
4652         sp->last_link_state = link;
4653 }
4654
4655 /**
4656  *  get_xena_rev_id - to identify revision ID of xena.
4657  *  @pdev : PCI Dev structure
4658  *  Description:
4659  *  Function to identify the Revision ID of xena.
4660  *  Return value:
4661  *  returns the revision ID of the device.
4662  */
4663
4664 int get_xena_rev_id(struct pci_dev *pdev)
4665 {
4666         u8 id = 0;
4667         int ret;
4668         ret = pci_read_config_byte(pdev, PCI_REVISION_ID, (u8 *) & id);
4669         return id;
4670 }
4671
4672 /**
4673  *  s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
4674  *  @sp : private member of the device structure, which is a pointer to the
4675  *  s2io_nic structure.
4676  *  Description:
4677  *  This function initializes a few of the PCI and PCI-X configuration registers
4678  *  with recommended values.
4679  *  Return value:
4680  *  void
4681  */
4682
4683 static void s2io_init_pci(nic_t * sp)
4684 {
4685         u16 pci_cmd = 0, pcix_cmd = 0;
4686
4687         /* Enable Data Parity Error Recovery in PCI-X command register. */
4688         pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4689                              &(pcix_cmd));
4690         pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4691                               (pcix_cmd | 1));
4692         pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4693                              &(pcix_cmd));
4694
4695         /* Set the PErr Response bit in PCI command register. */
4696         pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
4697         pci_write_config_word(sp->pdev, PCI_COMMAND,
4698                               (pci_cmd | PCI_COMMAND_PARITY));
4699         pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
4700
4701         /* Forcibly disabling relaxed ordering capability of the card. */
4702         pcix_cmd &= 0xfffd;
4703         pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4704                               pcix_cmd);
4705         pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4706                              &(pcix_cmd));
4707 }
4708
4709 MODULE_AUTHOR("Raghavendra Koushik <raghavendra.koushik@neterion.com>");
4710 MODULE_LICENSE("GPL");
4711 module_param(tx_fifo_num, int, 0);
4712 module_param(rx_ring_num, int, 0);
4713 module_param_array(tx_fifo_len, uint, NULL, 0);
4714 module_param_array(rx_ring_sz, uint, NULL, 0);
4715 module_param_array(rts_frm_len, uint, NULL, 0);
4716 module_param(use_continuous_tx_intrs, int, 1);
4717 module_param(rmac_pause_time, int, 0);
4718 module_param(mc_pause_threshold_q0q3, int, 0);
4719 module_param(mc_pause_threshold_q4q7, int, 0);
4720 module_param(shared_splits, int, 0);
4721 module_param(tmac_util_period, int, 0);
4722 module_param(rmac_util_period, int, 0);
4723 #ifndef CONFIG_S2IO_NAPI
4724 module_param(indicate_max_pkts, int, 0);
4725 #endif
4726
4727 /**
4728  *  s2io_init_nic - Initialization of the adapter .
4729  *  @pdev : structure containing the PCI related information of the device.
4730  *  @pre: List of PCI devices supported by the driver listed in s2io_tbl.
4731  *  Description:
4732  *  The function initializes an adapter identified by the pci_dec structure.
4733  *  All OS related initialization including memory and device structure and
4734  *  initlaization of the device private variable is done. Also the swapper
4735  *  control register is initialized to enable read and write into the I/O
4736  *  registers of the device.
4737  *  Return value:
4738  *  returns 0 on success and negative on failure.
4739  */
4740
4741 static int __devinit
4742 s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
4743 {
4744         nic_t *sp;
4745         struct net_device *dev;
4746         int i, j, ret;
4747         int dma_flag = FALSE;
4748         u32 mac_up, mac_down;
4749         u64 val64 = 0, tmp64 = 0;
4750         XENA_dev_config_t __iomem *bar0 = NULL;
4751         u16 subid;
4752         mac_info_t *mac_control;
4753         struct config_param *config;
4754
4755 #ifdef CONFIG_S2IO_NAPI
4756         DBG_PRINT(ERR_DBG, "NAPI support has been enabled\n");
4757 #endif
4758
4759         if ((ret = pci_enable_device(pdev))) {
4760                 DBG_PRINT(ERR_DBG,
4761                           "s2io_init_nic: pci_enable_device failed\n");
4762                 return ret;
4763         }
4764
4765         if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
4766                 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 64bit DMA\n");
4767                 dma_flag = TRUE;
4768                 if (pci_set_consistent_dma_mask
4769                     (pdev, DMA_64BIT_MASK)) {
4770                         DBG_PRINT(ERR_DBG,
4771                                   "Unable to obtain 64bit DMA for \
4772                                         consistent allocations\n");
4773                         pci_disable_device(pdev);
4774                         return -ENOMEM;
4775                 }
4776         } else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
4777                 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 32bit DMA\n");
4778         } else {
4779                 pci_disable_device(pdev);
4780                 return -ENOMEM;
4781         }
4782
4783         if (pci_request_regions(pdev, s2io_driver_name)) {
4784                 DBG_PRINT(ERR_DBG, "Request Regions failed\n"),
4785                     pci_disable_device(pdev);
4786                 return -ENODEV;
4787         }
4788
4789         dev = alloc_etherdev(sizeof(nic_t));
4790         if (dev == NULL) {
4791                 DBG_PRINT(ERR_DBG, "Device allocation failed\n");
4792                 pci_disable_device(pdev);
4793                 pci_release_regions(pdev);
4794                 return -ENODEV;
4795         }
4796
4797         pci_set_master(pdev);
4798         pci_set_drvdata(pdev, dev);
4799         SET_MODULE_OWNER(dev);
4800         SET_NETDEV_DEV(dev, &pdev->dev);
4801
4802         /*  Private member variable initialized to s2io NIC structure */
4803         sp = dev->priv;
4804         memset(sp, 0, sizeof(nic_t));
4805         sp->dev = dev;
4806         sp->pdev = pdev;
4807         sp->high_dma_flag = dma_flag;
4808         sp->device_enabled_once = FALSE;
4809
4810         /* Initialize some PCI/PCI-X fields of the NIC. */
4811         s2io_init_pci(sp);
4812
4813         /*
4814          * Setting the device configuration parameters.
4815          * Most of these parameters can be specified by the user during
4816          * module insertion as they are module loadable parameters. If
4817          * these parameters are not not specified during load time, they
4818          * are initialized with default values.
4819          */
4820         mac_control = &sp->mac_control;
4821         config = &sp->config;
4822
4823         /* Tx side parameters. */
4824         tx_fifo_len[0] = DEFAULT_FIFO_LEN;      /* Default value. */
4825         config->tx_fifo_num = tx_fifo_num;
4826         for (i = 0; i < MAX_TX_FIFOS; i++) {
4827                 config->tx_cfg[i].fifo_len = tx_fifo_len[i];
4828                 config->tx_cfg[i].fifo_priority = i;
4829         }
4830
4831         /* mapping the QoS priority to the configured fifos */
4832         for (i = 0; i < MAX_TX_FIFOS; i++)
4833                 config->fifo_mapping[i] = fifo_map[config->tx_fifo_num][i];
4834
4835         config->tx_intr_type = TXD_INT_TYPE_UTILZ;
4836         for (i = 0; i < config->tx_fifo_num; i++) {
4837                 config->tx_cfg[i].f_no_snoop =
4838                     (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
4839                 if (config->tx_cfg[i].fifo_len < 65) {
4840                         config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
4841                         break;
4842                 }
4843         }
4844         config->max_txds = MAX_SKB_FRAGS;
4845
4846         /* Rx side parameters. */
4847         rx_ring_sz[0] = SMALL_BLK_CNT;  /* Default value. */
4848         config->rx_ring_num = rx_ring_num;
4849         for (i = 0; i < MAX_RX_RINGS; i++) {
4850                 config->rx_cfg[i].num_rxd = rx_ring_sz[i] *
4851                     (MAX_RXDS_PER_BLOCK + 1);
4852                 config->rx_cfg[i].ring_priority = i;
4853         }
4854
4855         for (i = 0; i < rx_ring_num; i++) {
4856                 config->rx_cfg[i].ring_org = RING_ORG_BUFF1;
4857                 config->rx_cfg[i].f_no_snoop =
4858                     (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
4859         }
4860
4861         /*  Setting Mac Control parameters */
4862         mac_control->rmac_pause_time = rmac_pause_time;
4863         mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
4864         mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
4865
4866
4867         /* Initialize Ring buffer parameters. */
4868         for (i = 0; i < config->rx_ring_num; i++)
4869                 atomic_set(&sp->rx_bufs_left[i], 0);
4870
4871         /* Initialize the number of ISRs currently running */
4872         atomic_set(&sp->isr_cnt, 0);
4873
4874         /*  initialize the shared memory used by the NIC and the host */
4875         if (init_shared_mem(sp)) {
4876                 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n",
4877                           dev->name);
4878                 ret = -ENOMEM;
4879                 goto mem_alloc_failed;
4880         }
4881
4882         sp->bar0 = ioremap(pci_resource_start(pdev, 0),
4883                                      pci_resource_len(pdev, 0));
4884         if (!sp->bar0) {
4885                 DBG_PRINT(ERR_DBG, "%s: S2IO: cannot remap io mem1\n",
4886                           dev->name);
4887                 ret = -ENOMEM;
4888                 goto bar0_remap_failed;
4889         }
4890
4891         sp->bar1 = ioremap(pci_resource_start(pdev, 2),
4892                                      pci_resource_len(pdev, 2));
4893         if (!sp->bar1) {
4894                 DBG_PRINT(ERR_DBG, "%s: S2IO: cannot remap io mem2\n",
4895                           dev->name);
4896                 ret = -ENOMEM;
4897                 goto bar1_remap_failed;
4898         }
4899
4900         dev->irq = pdev->irq;
4901         dev->base_addr = (unsigned long) sp->bar0;
4902
4903         /* Initializing the BAR1 address as the start of the FIFO pointer. */
4904         for (j = 0; j < MAX_TX_FIFOS; j++) {
4905                 mac_control->tx_FIFO_start[j] = (TxFIFO_element_t __iomem *)
4906                     (sp->bar1 + (j * 0x00020000));
4907         }
4908
4909         /*  Driver entry points */
4910         dev->open = &s2io_open;
4911         dev->stop = &s2io_close;
4912         dev->hard_start_xmit = &s2io_xmit;
4913         dev->get_stats = &s2io_get_stats;
4914         dev->set_multicast_list = &s2io_set_multicast;
4915         dev->do_ioctl = &s2io_ioctl;
4916         dev->change_mtu = &s2io_change_mtu;
4917         SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
4918
4919         /*
4920          * will use eth_mac_addr() for  dev->set_mac_address
4921          * mac address will be set every time dev->open() is called
4922          */
4923 #if defined(CONFIG_S2IO_NAPI)
4924         dev->poll = s2io_poll;
4925         dev->weight = 32;
4926 #endif
4927
4928         dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
4929         if (sp->high_dma_flag == TRUE)
4930                 dev->features |= NETIF_F_HIGHDMA;
4931 #ifdef NETIF_F_TSO
4932         dev->features |= NETIF_F_TSO;
4933 #endif
4934
4935         dev->tx_timeout = &s2io_tx_watchdog;
4936         dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
4937         INIT_WORK(&sp->rst_timer_task,
4938                   (void (*)(void *)) s2io_restart_nic, dev);
4939         INIT_WORK(&sp->set_link_task,
4940                   (void (*)(void *)) s2io_set_link, sp);
4941
4942         pci_save_state(sp->pdev);
4943
4944         /* Setting swapper control on the NIC, for proper reset operation */
4945         if (s2io_set_swapper(sp)) {
4946                 DBG_PRINT(ERR_DBG, "%s:swapper settings are wrong\n",
4947                           dev->name);
4948                 ret = -EAGAIN;
4949                 goto set_swap_failed;
4950         }
4951
4952         /*
4953          * Fix for all "FFs" MAC address problems observed on
4954          * Alpha platforms
4955          */
4956         fix_mac_address(sp);
4957         s2io_reset(sp);
4958
4959         /*
4960          * MAC address initialization.
4961          * For now only one mac address will be read and used.
4962          */
4963         bar0 = sp->bar0;
4964         val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4965             RMAC_ADDR_CMD_MEM_OFFSET(0 + MAC_MAC_ADDR_START_OFFSET);
4966         writeq(val64, &bar0->rmac_addr_cmd_mem);
4967         wait_for_cmd_complete(sp);
4968
4969         tmp64 = readq(&bar0->rmac_addr_data0_mem);
4970         mac_down = (u32) tmp64;
4971         mac_up = (u32) (tmp64 >> 32);
4972
4973         memset(sp->def_mac_addr[0].mac_addr, 0, sizeof(ETH_ALEN));
4974
4975         sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
4976         sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
4977         sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
4978         sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
4979         sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
4980         sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
4981
4982         DBG_PRINT(INIT_DBG,
4983                   "DEFAULT MAC ADDR:0x%02x-%02x-%02x-%02x-%02x-%02x\n",
4984                   sp->def_mac_addr[0].mac_addr[0],
4985                   sp->def_mac_addr[0].mac_addr[1],
4986                   sp->def_mac_addr[0].mac_addr[2],
4987                   sp->def_mac_addr[0].mac_addr[3],
4988                   sp->def_mac_addr[0].mac_addr[4],
4989                   sp->def_mac_addr[0].mac_addr[5]);
4990
4991         /*  Set the factory defined MAC address initially   */
4992         dev->addr_len = ETH_ALEN;
4993         memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
4994
4995         /*
4996          * Initialize the tasklet status and link state flags
4997          * and the card statte parameter
4998          */
4999         atomic_set(&(sp->card_state), 0);
5000         sp->tasklet_status = 0;
5001         sp->link_state = 0;
5002
5003         /* Initialize spinlocks */
5004         spin_lock_init(&sp->tx_lock);
5005 #ifndef CONFIG_S2IO_NAPI
5006         spin_lock_init(&sp->put_lock);
5007 #endif
5008         spin_lock_init(&sp->rx_lock);
5009
5010         /*
5011          * SXE-002: Configure link and activity LED to init state
5012          * on driver load.
5013          */
5014         subid = sp->pdev->subsystem_device;
5015         if ((subid & 0xFF) >= 0x07) {
5016                 val64 = readq(&bar0->gpio_control);
5017                 val64 |= 0x0000800000000000ULL;
5018                 writeq(val64, &bar0->gpio_control);
5019                 val64 = 0x0411040400000000ULL;
5020                 writeq(val64, (void __iomem *) bar0 + 0x2700);
5021                 val64 = readq(&bar0->gpio_control);
5022         }
5023
5024         sp->rx_csum = 1;        /* Rx chksum verify enabled by default */
5025
5026         if (register_netdev(dev)) {
5027                 DBG_PRINT(ERR_DBG, "Device registration failed\n");
5028                 ret = -ENODEV;
5029                 goto register_failed;
5030         }
5031
5032         /* Initialize device name */
5033         strcpy(sp->name, dev->name);
5034         strcat(sp->name, ": Neterion Xframe I 10GbE adapter");
5035
5036         /*
5037          * Make Link state as off at this point, when the Link change
5038          * interrupt comes the state will be automatically changed to
5039          * the right state.
5040          */
5041         netif_carrier_off(dev);
5042
5043         return 0;
5044
5045       register_failed:
5046       set_swap_failed:
5047         iounmap(sp->bar1);
5048       bar1_remap_failed:
5049         iounmap(sp->bar0);
5050       bar0_remap_failed:
5051       mem_alloc_failed:
5052         free_shared_mem(sp);
5053         pci_disable_device(pdev);
5054         pci_release_regions(pdev);
5055         pci_set_drvdata(pdev, NULL);
5056         free_netdev(dev);
5057
5058         return ret;
5059 }
5060
5061 /**
5062  * s2io_rem_nic - Free the PCI device
5063  * @pdev: structure containing the PCI related information of the device.
5064  * Description: This function is called by the Pci subsystem to release a
5065  * PCI device and free up all resource held up by the device. This could
5066  * be in response to a Hot plug event or when the driver is to be removed
5067  * from memory.
5068  */
5069
5070 static void __devexit s2io_rem_nic(struct pci_dev *pdev)
5071 {
5072         struct net_device *dev =
5073             (struct net_device *) pci_get_drvdata(pdev);
5074         nic_t *sp;
5075
5076         if (dev == NULL) {
5077                 DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
5078                 return;
5079         }
5080
5081         sp = dev->priv;
5082         unregister_netdev(dev);
5083
5084         free_shared_mem(sp);
5085         iounmap(sp->bar0);
5086         iounmap(sp->bar1);
5087         pci_disable_device(pdev);
5088         pci_release_regions(pdev);
5089         pci_set_drvdata(pdev, NULL);
5090         free_netdev(dev);
5091 }
5092
5093 /**
5094  * s2io_starter - Entry point for the driver
5095  * Description: This function is the entry point for the driver. It verifies
5096  * the module loadable parameters and initializes PCI configuration space.
5097  */
5098
5099 int __init s2io_starter(void)
5100 {
5101         return pci_module_init(&s2io_driver);
5102 }
5103
5104 /**
5105  * s2io_closer - Cleanup routine for the driver
5106  * Description: This function is the cleanup routine for the driver. It unregist * ers the driver.
5107  */
5108
5109 void s2io_closer(void)
5110 {
5111         pci_unregister_driver(&s2io_driver);
5112         DBG_PRINT(INIT_DBG, "cleanup done\n");
5113 }
5114
5115 module_init(s2io_starter);
5116 module_exit(s2io_closer);