]> Pileus Git - ~andy/linux/blob - drivers/net/s2io.c
S2io: Fix IOMMU overflow checking.
[~andy/linux] / drivers / net / s2io.c
1 /************************************************************************
2  * s2io.c: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
3  * Copyright(c) 2002-2007 Neterion Inc.
4
5  * This software may be used and distributed according to the terms of
6  * the GNU General Public License (GPL), incorporated herein by reference.
7  * Drivers based on or derived from this code fall under the GPL and must
8  * retain the authorship, copyright and license notice.  This file is not
9  * a complete program and may only be used when the entire operating
10  * system is licensed under the GPL.
11  * See the file COPYING in this distribution for more information.
12  *
13  * Credits:
14  * Jeff Garzik          : For pointing out the improper error condition
15  *                        check in the s2io_xmit routine and also some
16  *                        issues in the Tx watch dog function. Also for
17  *                        patiently answering all those innumerable
18  *                        questions regaring the 2.6 porting issues.
19  * Stephen Hemminger    : Providing proper 2.6 porting mechanism for some
20  *                        macros available only in 2.6 Kernel.
21  * Francois Romieu      : For pointing out all code part that were
22  *                        deprecated and also styling related comments.
23  * Grant Grundler       : For helping me get rid of some Architecture
24  *                        dependent code.
25  * Christopher Hellwig  : Some more 2.6 specific issues in the driver.
26  *
27  * The module loadable parameters that are supported by the driver and a brief
28  * explaination of all the variables.
29  *
30  * rx_ring_num : This can be used to program the number of receive rings used
31  * in the driver.
32  * rx_ring_sz: This defines the number of receive blocks each ring can have.
33  *     This is also an array of size 8.
34  * rx_ring_mode: This defines the operation mode of all 8 rings. The valid
35  *              values are 1, 2.
36  * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
37  * tx_fifo_len: This too is an array of 8. Each element defines the number of
38  * Tx descriptors that can be associated with each corresponding FIFO.
39  * intr_type: This defines the type of interrupt. The values can be 0(INTA),
40  *     2(MSI_X). Default value is '2(MSI_X)'
41  * lro_enable: Specifies whether to enable Large Receive Offload (LRO) or not.
42  *     Possible values '1' for enable '0' for disable. Default is '0'
43  * lro_max_pkts: This parameter defines maximum number of packets can be
44  *     aggregated as a single large packet
45  * napi: This parameter used to enable/disable NAPI (polling Rx)
46  *     Possible values '1' for enable and '0' for disable. Default is '1'
47  * ufo: This parameter used to enable/disable UDP Fragmentation Offload(UFO)
48  *      Possible values '1' for enable and '0' for disable. Default is '0'
49  * vlan_tag_strip: This can be used to enable or disable vlan stripping.
50  *                 Possible values '1' for enable , '0' for disable.
51  *                 Default is '2' - which means disable in promisc mode
52  *                 and enable in non-promiscuous mode.
53  * multiq: This parameter used to enable/disable MULTIQUEUE support.
54  *      Possible values '1' for enable and '0' for disable. Default is '0'
55  ************************************************************************/
56
57 #include <linux/module.h>
58 #include <linux/types.h>
59 #include <linux/errno.h>
60 #include <linux/ioport.h>
61 #include <linux/pci.h>
62 #include <linux/dma-mapping.h>
63 #include <linux/kernel.h>
64 #include <linux/netdevice.h>
65 #include <linux/etherdevice.h>
66 #include <linux/skbuff.h>
67 #include <linux/init.h>
68 #include <linux/delay.h>
69 #include <linux/stddef.h>
70 #include <linux/ioctl.h>
71 #include <linux/timex.h>
72 #include <linux/ethtool.h>
73 #include <linux/workqueue.h>
74 #include <linux/if_vlan.h>
75 #include <linux/ip.h>
76 #include <linux/tcp.h>
77 #include <net/tcp.h>
78
79 #include <asm/system.h>
80 #include <asm/uaccess.h>
81 #include <asm/io.h>
82 #include <asm/div64.h>
83 #include <asm/irq.h>
84
85 /* local include */
86 #include "s2io.h"
87 #include "s2io-regs.h"
88
89 #define DRV_VERSION "2.0.26.24"
90
91 /* S2io Driver name & version. */
92 static char s2io_driver_name[] = "Neterion";
93 static char s2io_driver_version[] = DRV_VERSION;
94
95 static int rxd_size[2] = {32,48};
96 static int rxd_count[2] = {127,85};
97
98 static inline int RXD_IS_UP2DT(struct RxD_t *rxdp)
99 {
100         int ret;
101
102         ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
103                 (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
104
105         return ret;
106 }
107
108 /*
109  * Cards with following subsystem_id have a link state indication
110  * problem, 600B, 600C, 600D, 640B, 640C and 640D.
111  * macro below identifies these cards given the subsystem_id.
112  */
113 #define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid) \
114         (dev_type == XFRAME_I_DEVICE) ?                 \
115                 ((((subid >= 0x600B) && (subid <= 0x600D)) || \
116                  ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
117
118 #define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
119                                       ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
120
121 static inline int is_s2io_card_up(const struct s2io_nic * sp)
122 {
123         return test_bit(__S2IO_STATE_CARD_UP, &sp->state);
124 }
125
126 /* Ethtool related variables and Macros. */
127 static char s2io_gstrings[][ETH_GSTRING_LEN] = {
128         "Register test\t(offline)",
129         "Eeprom test\t(offline)",
130         "Link test\t(online)",
131         "RLDRAM test\t(offline)",
132         "BIST Test\t(offline)"
133 };
134
135 static char ethtool_xena_stats_keys[][ETH_GSTRING_LEN] = {
136         {"tmac_frms"},
137         {"tmac_data_octets"},
138         {"tmac_drop_frms"},
139         {"tmac_mcst_frms"},
140         {"tmac_bcst_frms"},
141         {"tmac_pause_ctrl_frms"},
142         {"tmac_ttl_octets"},
143         {"tmac_ucst_frms"},
144         {"tmac_nucst_frms"},
145         {"tmac_any_err_frms"},
146         {"tmac_ttl_less_fb_octets"},
147         {"tmac_vld_ip_octets"},
148         {"tmac_vld_ip"},
149         {"tmac_drop_ip"},
150         {"tmac_icmp"},
151         {"tmac_rst_tcp"},
152         {"tmac_tcp"},
153         {"tmac_udp"},
154         {"rmac_vld_frms"},
155         {"rmac_data_octets"},
156         {"rmac_fcs_err_frms"},
157         {"rmac_drop_frms"},
158         {"rmac_vld_mcst_frms"},
159         {"rmac_vld_bcst_frms"},
160         {"rmac_in_rng_len_err_frms"},
161         {"rmac_out_rng_len_err_frms"},
162         {"rmac_long_frms"},
163         {"rmac_pause_ctrl_frms"},
164         {"rmac_unsup_ctrl_frms"},
165         {"rmac_ttl_octets"},
166         {"rmac_accepted_ucst_frms"},
167         {"rmac_accepted_nucst_frms"},
168         {"rmac_discarded_frms"},
169         {"rmac_drop_events"},
170         {"rmac_ttl_less_fb_octets"},
171         {"rmac_ttl_frms"},
172         {"rmac_usized_frms"},
173         {"rmac_osized_frms"},
174         {"rmac_frag_frms"},
175         {"rmac_jabber_frms"},
176         {"rmac_ttl_64_frms"},
177         {"rmac_ttl_65_127_frms"},
178         {"rmac_ttl_128_255_frms"},
179         {"rmac_ttl_256_511_frms"},
180         {"rmac_ttl_512_1023_frms"},
181         {"rmac_ttl_1024_1518_frms"},
182         {"rmac_ip"},
183         {"rmac_ip_octets"},
184         {"rmac_hdr_err_ip"},
185         {"rmac_drop_ip"},
186         {"rmac_icmp"},
187         {"rmac_tcp"},
188         {"rmac_udp"},
189         {"rmac_err_drp_udp"},
190         {"rmac_xgmii_err_sym"},
191         {"rmac_frms_q0"},
192         {"rmac_frms_q1"},
193         {"rmac_frms_q2"},
194         {"rmac_frms_q3"},
195         {"rmac_frms_q4"},
196         {"rmac_frms_q5"},
197         {"rmac_frms_q6"},
198         {"rmac_frms_q7"},
199         {"rmac_full_q0"},
200         {"rmac_full_q1"},
201         {"rmac_full_q2"},
202         {"rmac_full_q3"},
203         {"rmac_full_q4"},
204         {"rmac_full_q5"},
205         {"rmac_full_q6"},
206         {"rmac_full_q7"},
207         {"rmac_pause_cnt"},
208         {"rmac_xgmii_data_err_cnt"},
209         {"rmac_xgmii_ctrl_err_cnt"},
210         {"rmac_accepted_ip"},
211         {"rmac_err_tcp"},
212         {"rd_req_cnt"},
213         {"new_rd_req_cnt"},
214         {"new_rd_req_rtry_cnt"},
215         {"rd_rtry_cnt"},
216         {"wr_rtry_rd_ack_cnt"},
217         {"wr_req_cnt"},
218         {"new_wr_req_cnt"},
219         {"new_wr_req_rtry_cnt"},
220         {"wr_rtry_cnt"},
221         {"wr_disc_cnt"},
222         {"rd_rtry_wr_ack_cnt"},
223         {"txp_wr_cnt"},
224         {"txd_rd_cnt"},
225         {"txd_wr_cnt"},
226         {"rxd_rd_cnt"},
227         {"rxd_wr_cnt"},
228         {"txf_rd_cnt"},
229         {"rxf_wr_cnt"}
230 };
231
232 static char ethtool_enhanced_stats_keys[][ETH_GSTRING_LEN] = {
233         {"rmac_ttl_1519_4095_frms"},
234         {"rmac_ttl_4096_8191_frms"},
235         {"rmac_ttl_8192_max_frms"},
236         {"rmac_ttl_gt_max_frms"},
237         {"rmac_osized_alt_frms"},
238         {"rmac_jabber_alt_frms"},
239         {"rmac_gt_max_alt_frms"},
240         {"rmac_vlan_frms"},
241         {"rmac_len_discard"},
242         {"rmac_fcs_discard"},
243         {"rmac_pf_discard"},
244         {"rmac_da_discard"},
245         {"rmac_red_discard"},
246         {"rmac_rts_discard"},
247         {"rmac_ingm_full_discard"},
248         {"link_fault_cnt"}
249 };
250
251 static char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = {
252         {"\n DRIVER STATISTICS"},
253         {"single_bit_ecc_errs"},
254         {"double_bit_ecc_errs"},
255         {"parity_err_cnt"},
256         {"serious_err_cnt"},
257         {"soft_reset_cnt"},
258         {"fifo_full_cnt"},
259         {"ring_0_full_cnt"},
260         {"ring_1_full_cnt"},
261         {"ring_2_full_cnt"},
262         {"ring_3_full_cnt"},
263         {"ring_4_full_cnt"},
264         {"ring_5_full_cnt"},
265         {"ring_6_full_cnt"},
266         {"ring_7_full_cnt"},
267         {"alarm_transceiver_temp_high"},
268         {"alarm_transceiver_temp_low"},
269         {"alarm_laser_bias_current_high"},
270         {"alarm_laser_bias_current_low"},
271         {"alarm_laser_output_power_high"},
272         {"alarm_laser_output_power_low"},
273         {"warn_transceiver_temp_high"},
274         {"warn_transceiver_temp_low"},
275         {"warn_laser_bias_current_high"},
276         {"warn_laser_bias_current_low"},
277         {"warn_laser_output_power_high"},
278         {"warn_laser_output_power_low"},
279         {"lro_aggregated_pkts"},
280         {"lro_flush_both_count"},
281         {"lro_out_of_sequence_pkts"},
282         {"lro_flush_due_to_max_pkts"},
283         {"lro_avg_aggr_pkts"},
284         {"mem_alloc_fail_cnt"},
285         {"pci_map_fail_cnt"},
286         {"watchdog_timer_cnt"},
287         {"mem_allocated"},
288         {"mem_freed"},
289         {"link_up_cnt"},
290         {"link_down_cnt"},
291         {"link_up_time"},
292         {"link_down_time"},
293         {"tx_tcode_buf_abort_cnt"},
294         {"tx_tcode_desc_abort_cnt"},
295         {"tx_tcode_parity_err_cnt"},
296         {"tx_tcode_link_loss_cnt"},
297         {"tx_tcode_list_proc_err_cnt"},
298         {"rx_tcode_parity_err_cnt"},
299         {"rx_tcode_abort_cnt"},
300         {"rx_tcode_parity_abort_cnt"},
301         {"rx_tcode_rda_fail_cnt"},
302         {"rx_tcode_unkn_prot_cnt"},
303         {"rx_tcode_fcs_err_cnt"},
304         {"rx_tcode_buf_size_err_cnt"},
305         {"rx_tcode_rxd_corrupt_cnt"},
306         {"rx_tcode_unkn_err_cnt"},
307         {"tda_err_cnt"},
308         {"pfc_err_cnt"},
309         {"pcc_err_cnt"},
310         {"tti_err_cnt"},
311         {"tpa_err_cnt"},
312         {"sm_err_cnt"},
313         {"lso_err_cnt"},
314         {"mac_tmac_err_cnt"},
315         {"mac_rmac_err_cnt"},
316         {"xgxs_txgxs_err_cnt"},
317         {"xgxs_rxgxs_err_cnt"},
318         {"rc_err_cnt"},
319         {"prc_pcix_err_cnt"},
320         {"rpa_err_cnt"},
321         {"rda_err_cnt"},
322         {"rti_err_cnt"},
323         {"mc_err_cnt"}
324 };
325
326 #define S2IO_XENA_STAT_LEN      ARRAY_SIZE(ethtool_xena_stats_keys)
327 #define S2IO_ENHANCED_STAT_LEN  ARRAY_SIZE(ethtool_enhanced_stats_keys)
328 #define S2IO_DRIVER_STAT_LEN    ARRAY_SIZE(ethtool_driver_stats_keys)
329
330 #define XFRAME_I_STAT_LEN (S2IO_XENA_STAT_LEN + S2IO_DRIVER_STAT_LEN )
331 #define XFRAME_II_STAT_LEN (XFRAME_I_STAT_LEN + S2IO_ENHANCED_STAT_LEN )
332
333 #define XFRAME_I_STAT_STRINGS_LEN ( XFRAME_I_STAT_LEN * ETH_GSTRING_LEN )
334 #define XFRAME_II_STAT_STRINGS_LEN ( XFRAME_II_STAT_LEN * ETH_GSTRING_LEN )
335
336 #define S2IO_TEST_LEN   ARRAY_SIZE(s2io_gstrings)
337 #define S2IO_STRINGS_LEN        S2IO_TEST_LEN * ETH_GSTRING_LEN
338
339 #define S2IO_TIMER_CONF(timer, handle, arg, exp)                \
340                         init_timer(&timer);                     \
341                         timer.function = handle;                \
342                         timer.data = (unsigned long) arg;       \
343                         mod_timer(&timer, (jiffies + exp))      \
344
345 /* copy mac addr to def_mac_addr array */
346 static void do_s2io_copy_mac_addr(struct s2io_nic *sp, int offset, u64 mac_addr)
347 {
348         sp->def_mac_addr[offset].mac_addr[5] = (u8) (mac_addr);
349         sp->def_mac_addr[offset].mac_addr[4] = (u8) (mac_addr >> 8);
350         sp->def_mac_addr[offset].mac_addr[3] = (u8) (mac_addr >> 16);
351         sp->def_mac_addr[offset].mac_addr[2] = (u8) (mac_addr >> 24);
352         sp->def_mac_addr[offset].mac_addr[1] = (u8) (mac_addr >> 32);
353         sp->def_mac_addr[offset].mac_addr[0] = (u8) (mac_addr >> 40);
354 }
355 /* Add the vlan */
356 static void s2io_vlan_rx_register(struct net_device *dev,
357                                         struct vlan_group *grp)
358 {
359         int i;
360         struct s2io_nic *nic = dev->priv;
361         unsigned long flags[MAX_TX_FIFOS];
362         struct mac_info *mac_control = &nic->mac_control;
363         struct config_param *config = &nic->config;
364
365         for (i = 0; i < config->tx_fifo_num; i++)
366                 spin_lock_irqsave(&mac_control->fifos[i].tx_lock, flags[i]);
367
368         nic->vlgrp = grp;
369         for (i = config->tx_fifo_num - 1; i >= 0; i--)
370                 spin_unlock_irqrestore(&mac_control->fifos[i].tx_lock,
371                                 flags[i]);
372 }
373
374 /* A flag indicating whether 'RX_PA_CFG_STRIP_VLAN_TAG' bit is set or not */
375 static int vlan_strip_flag;
376
377 /* Unregister the vlan */
378 static void s2io_vlan_rx_kill_vid(struct net_device *dev, unsigned long vid)
379 {
380         int i;
381         struct s2io_nic *nic = dev->priv;
382         unsigned long flags[MAX_TX_FIFOS];
383         struct mac_info *mac_control = &nic->mac_control;
384         struct config_param *config = &nic->config;
385
386         for (i = 0; i < config->tx_fifo_num; i++)
387                 spin_lock_irqsave(&mac_control->fifos[i].tx_lock, flags[i]);
388
389         if (nic->vlgrp)
390                 vlan_group_set_device(nic->vlgrp, vid, NULL);
391
392         for (i = config->tx_fifo_num - 1; i >= 0; i--)
393                 spin_unlock_irqrestore(&mac_control->fifos[i].tx_lock,
394                         flags[i]);
395 }
396
397 /*
398  * Constants to be programmed into the Xena's registers, to configure
399  * the XAUI.
400  */
401
402 #define END_SIGN        0x0
403 static const u64 herc_act_dtx_cfg[] = {
404         /* Set address */
405         0x8000051536750000ULL, 0x80000515367500E0ULL,
406         /* Write data */
407         0x8000051536750004ULL, 0x80000515367500E4ULL,
408         /* Set address */
409         0x80010515003F0000ULL, 0x80010515003F00E0ULL,
410         /* Write data */
411         0x80010515003F0004ULL, 0x80010515003F00E4ULL,
412         /* Set address */
413         0x801205150D440000ULL, 0x801205150D4400E0ULL,
414         /* Write data */
415         0x801205150D440004ULL, 0x801205150D4400E4ULL,
416         /* Set address */
417         0x80020515F2100000ULL, 0x80020515F21000E0ULL,
418         /* Write data */
419         0x80020515F2100004ULL, 0x80020515F21000E4ULL,
420         /* Done */
421         END_SIGN
422 };
423
424 static const u64 xena_dtx_cfg[] = {
425         /* Set address */
426         0x8000051500000000ULL, 0x80000515000000E0ULL,
427         /* Write data */
428         0x80000515D9350004ULL, 0x80000515D93500E4ULL,
429         /* Set address */
430         0x8001051500000000ULL, 0x80010515000000E0ULL,
431         /* Write data */
432         0x80010515001E0004ULL, 0x80010515001E00E4ULL,
433         /* Set address */
434         0x8002051500000000ULL, 0x80020515000000E0ULL,
435         /* Write data */
436         0x80020515F2100004ULL, 0x80020515F21000E4ULL,
437         END_SIGN
438 };
439
440 /*
441  * Constants for Fixing the MacAddress problem seen mostly on
442  * Alpha machines.
443  */
444 static const u64 fix_mac[] = {
445         0x0060000000000000ULL, 0x0060600000000000ULL,
446         0x0040600000000000ULL, 0x0000600000000000ULL,
447         0x0020600000000000ULL, 0x0060600000000000ULL,
448         0x0020600000000000ULL, 0x0060600000000000ULL,
449         0x0020600000000000ULL, 0x0060600000000000ULL,
450         0x0020600000000000ULL, 0x0060600000000000ULL,
451         0x0020600000000000ULL, 0x0060600000000000ULL,
452         0x0020600000000000ULL, 0x0060600000000000ULL,
453         0x0020600000000000ULL, 0x0060600000000000ULL,
454         0x0020600000000000ULL, 0x0060600000000000ULL,
455         0x0020600000000000ULL, 0x0060600000000000ULL,
456         0x0020600000000000ULL, 0x0060600000000000ULL,
457         0x0020600000000000ULL, 0x0000600000000000ULL,
458         0x0040600000000000ULL, 0x0060600000000000ULL,
459         END_SIGN
460 };
461
462 MODULE_LICENSE("GPL");
463 MODULE_VERSION(DRV_VERSION);
464
465
466 /* Module Loadable parameters. */
467 S2IO_PARM_INT(tx_fifo_num, FIFO_DEFAULT_NUM);
468 S2IO_PARM_INT(rx_ring_num, 1);
469 S2IO_PARM_INT(multiq, 0);
470 S2IO_PARM_INT(rx_ring_mode, 1);
471 S2IO_PARM_INT(use_continuous_tx_intrs, 1);
472 S2IO_PARM_INT(rmac_pause_time, 0x100);
473 S2IO_PARM_INT(mc_pause_threshold_q0q3, 187);
474 S2IO_PARM_INT(mc_pause_threshold_q4q7, 187);
475 S2IO_PARM_INT(shared_splits, 0);
476 S2IO_PARM_INT(tmac_util_period, 5);
477 S2IO_PARM_INT(rmac_util_period, 5);
478 S2IO_PARM_INT(l3l4hdr_size, 128);
479 /* 0 is no steering, 1 is Priority steering, 2 is Default steering */
480 S2IO_PARM_INT(tx_steering_type, TX_DEFAULT_STEERING);
481 /* Frequency of Rx desc syncs expressed as power of 2 */
482 S2IO_PARM_INT(rxsync_frequency, 3);
483 /* Interrupt type. Values can be 0(INTA), 2(MSI_X) */
484 S2IO_PARM_INT(intr_type, 2);
485 /* Large receive offload feature */
486 static unsigned int lro_enable;
487 module_param_named(lro, lro_enable, uint, 0);
488
489 /* Max pkts to be aggregated by LRO at one time. If not specified,
490  * aggregation happens until we hit max IP pkt size(64K)
491  */
492 S2IO_PARM_INT(lro_max_pkts, 0xFFFF);
493 S2IO_PARM_INT(indicate_max_pkts, 0);
494
495 S2IO_PARM_INT(napi, 1);
496 S2IO_PARM_INT(ufo, 0);
497 S2IO_PARM_INT(vlan_tag_strip, NO_STRIP_IN_PROMISC);
498
499 static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
500     {DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN};
501 static unsigned int rx_ring_sz[MAX_RX_RINGS] =
502     {[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT};
503 static unsigned int rts_frm_len[MAX_RX_RINGS] =
504     {[0 ...(MAX_RX_RINGS - 1)] = 0 };
505
506 module_param_array(tx_fifo_len, uint, NULL, 0);
507 module_param_array(rx_ring_sz, uint, NULL, 0);
508 module_param_array(rts_frm_len, uint, NULL, 0);
509
510 /*
511  * S2IO device table.
512  * This table lists all the devices that this driver supports.
513  */
514 static struct pci_device_id s2io_tbl[] __devinitdata = {
515         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
516          PCI_ANY_ID, PCI_ANY_ID},
517         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
518          PCI_ANY_ID, PCI_ANY_ID},
519         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
520          PCI_ANY_ID, PCI_ANY_ID},
521         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
522          PCI_ANY_ID, PCI_ANY_ID},
523         {0,}
524 };
525
526 MODULE_DEVICE_TABLE(pci, s2io_tbl);
527
528 static struct pci_error_handlers s2io_err_handler = {
529         .error_detected = s2io_io_error_detected,
530         .slot_reset = s2io_io_slot_reset,
531         .resume = s2io_io_resume,
532 };
533
534 static struct pci_driver s2io_driver = {
535       .name = "S2IO",
536       .id_table = s2io_tbl,
537       .probe = s2io_init_nic,
538       .remove = __devexit_p(s2io_rem_nic),
539       .err_handler = &s2io_err_handler,
540 };
541
542 /* A simplifier macro used both by init and free shared_mem Fns(). */
543 #define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
544
545 /* netqueue manipulation helper functions */
546 static inline void s2io_stop_all_tx_queue(struct s2io_nic *sp)
547 {
548         int i;
549         if (sp->config.multiq) {
550                 for (i = 0; i < sp->config.tx_fifo_num; i++)
551                         netif_stop_subqueue(sp->dev, i);
552         } else {
553                 for (i = 0; i < sp->config.tx_fifo_num; i++)
554                         sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_STOP;
555                 netif_stop_queue(sp->dev);
556         }
557 }
558
559 static inline void s2io_stop_tx_queue(struct s2io_nic *sp, int fifo_no)
560 {
561         if (sp->config.multiq)
562                 netif_stop_subqueue(sp->dev, fifo_no);
563         else {
564                 sp->mac_control.fifos[fifo_no].queue_state =
565                         FIFO_QUEUE_STOP;
566                 netif_stop_queue(sp->dev);
567         }
568 }
569
570 static inline void s2io_start_all_tx_queue(struct s2io_nic *sp)
571 {
572         int i;
573         if (sp->config.multiq) {
574                 for (i = 0; i < sp->config.tx_fifo_num; i++)
575                         netif_start_subqueue(sp->dev, i);
576         } else {
577                 for (i = 0; i < sp->config.tx_fifo_num; i++)
578                         sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START;
579                 netif_start_queue(sp->dev);
580         }
581 }
582
583 static inline void s2io_start_tx_queue(struct s2io_nic *sp, int fifo_no)
584 {
585         if (sp->config.multiq)
586                 netif_start_subqueue(sp->dev, fifo_no);
587         else {
588                 sp->mac_control.fifos[fifo_no].queue_state =
589                         FIFO_QUEUE_START;
590                 netif_start_queue(sp->dev);
591         }
592 }
593
594 static inline void s2io_wake_all_tx_queue(struct s2io_nic *sp)
595 {
596         int i;
597         if (sp->config.multiq) {
598                 for (i = 0; i < sp->config.tx_fifo_num; i++)
599                         netif_wake_subqueue(sp->dev, i);
600         } else {
601                 for (i = 0; i < sp->config.tx_fifo_num; i++)
602                         sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START;
603                 netif_wake_queue(sp->dev);
604         }
605 }
606
607 static inline void s2io_wake_tx_queue(
608         struct fifo_info *fifo, int cnt, u8 multiq)
609 {
610
611         if (multiq) {
612                 if (cnt && __netif_subqueue_stopped(fifo->dev, fifo->fifo_no))
613                         netif_wake_subqueue(fifo->dev, fifo->fifo_no);
614         } else if (cnt && (fifo->queue_state == FIFO_QUEUE_STOP)) {
615                 if (netif_queue_stopped(fifo->dev)) {
616                         fifo->queue_state = FIFO_QUEUE_START;
617                         netif_wake_queue(fifo->dev);
618                 }
619         }
620 }
621
622 /**
623  * init_shared_mem - Allocation and Initialization of Memory
624  * @nic: Device private variable.
625  * Description: The function allocates all the memory areas shared
626  * between the NIC and the driver. This includes Tx descriptors,
627  * Rx descriptors and the statistics block.
628  */
629
630 static int init_shared_mem(struct s2io_nic *nic)
631 {
632         u32 size;
633         void *tmp_v_addr, *tmp_v_addr_next;
634         dma_addr_t tmp_p_addr, tmp_p_addr_next;
635         struct RxD_block *pre_rxd_blk = NULL;
636         int i, j, blk_cnt;
637         int lst_size, lst_per_page;
638         struct net_device *dev = nic->dev;
639         unsigned long tmp;
640         struct buffAdd *ba;
641
642         struct mac_info *mac_control;
643         struct config_param *config;
644         unsigned long long mem_allocated = 0;
645
646         mac_control = &nic->mac_control;
647         config = &nic->config;
648
649
650         /* Allocation and initialization of TXDLs in FIOFs */
651         size = 0;
652         for (i = 0; i < config->tx_fifo_num; i++) {
653                 size += config->tx_cfg[i].fifo_len;
654         }
655         if (size > MAX_AVAILABLE_TXDS) {
656                 DBG_PRINT(ERR_DBG, "s2io: Requested TxDs too high, ");
657                 DBG_PRINT(ERR_DBG, "Requested: %d, max supported: 8192\n", size);
658                 return -EINVAL;
659         }
660
661         size = 0;
662         for (i = 0; i < config->tx_fifo_num; i++) {
663                 size = config->tx_cfg[i].fifo_len;
664                 /*
665                  * Legal values are from 2 to 8192
666                  */
667                 if (size < 2) {
668                         DBG_PRINT(ERR_DBG, "s2io: Invalid fifo len (%d)", size);
669                         DBG_PRINT(ERR_DBG, "for fifo %d\n", i);
670                         DBG_PRINT(ERR_DBG, "s2io: Legal values for fifo len"
671                                 "are 2 to 8192\n");
672                         return -EINVAL;
673                 }
674         }
675
676         lst_size = (sizeof(struct TxD) * config->max_txds);
677         lst_per_page = PAGE_SIZE / lst_size;
678
679         for (i = 0; i < config->tx_fifo_num; i++) {
680                 int fifo_len = config->tx_cfg[i].fifo_len;
681                 int list_holder_size = fifo_len * sizeof(struct list_info_hold);
682                 mac_control->fifos[i].list_info = kzalloc(list_holder_size,
683                                                           GFP_KERNEL);
684                 if (!mac_control->fifos[i].list_info) {
685                         DBG_PRINT(INFO_DBG,
686                                   "Malloc failed for list_info\n");
687                         return -ENOMEM;
688                 }
689                 mem_allocated += list_holder_size;
690         }
691         for (i = 0; i < config->tx_fifo_num; i++) {
692                 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
693                                                 lst_per_page);
694                 mac_control->fifos[i].tx_curr_put_info.offset = 0;
695                 mac_control->fifos[i].tx_curr_put_info.fifo_len =
696                     config->tx_cfg[i].fifo_len - 1;
697                 mac_control->fifos[i].tx_curr_get_info.offset = 0;
698                 mac_control->fifos[i].tx_curr_get_info.fifo_len =
699                     config->tx_cfg[i].fifo_len - 1;
700                 mac_control->fifos[i].fifo_no = i;
701                 mac_control->fifos[i].nic = nic;
702                 mac_control->fifos[i].max_txds = MAX_SKB_FRAGS + 2;
703                 mac_control->fifos[i].dev = dev;
704
705                 for (j = 0; j < page_num; j++) {
706                         int k = 0;
707                         dma_addr_t tmp_p;
708                         void *tmp_v;
709                         tmp_v = pci_alloc_consistent(nic->pdev,
710                                                      PAGE_SIZE, &tmp_p);
711                         if (!tmp_v) {
712                                 DBG_PRINT(INFO_DBG,
713                                           "pci_alloc_consistent ");
714                                 DBG_PRINT(INFO_DBG, "failed for TxDL\n");
715                                 return -ENOMEM;
716                         }
717                         /* If we got a zero DMA address(can happen on
718                          * certain platforms like PPC), reallocate.
719                          * Store virtual address of page we don't want,
720                          * to be freed later.
721                          */
722                         if (!tmp_p) {
723                                 mac_control->zerodma_virt_addr = tmp_v;
724                                 DBG_PRINT(INIT_DBG,
725                                 "%s: Zero DMA address for TxDL. ", dev->name);
726                                 DBG_PRINT(INIT_DBG,
727                                 "Virtual address %p\n", tmp_v);
728                                 tmp_v = pci_alloc_consistent(nic->pdev,
729                                                      PAGE_SIZE, &tmp_p);
730                                 if (!tmp_v) {
731                                         DBG_PRINT(INFO_DBG,
732                                           "pci_alloc_consistent ");
733                                         DBG_PRINT(INFO_DBG, "failed for TxDL\n");
734                                         return -ENOMEM;
735                                 }
736                                 mem_allocated += PAGE_SIZE;
737                         }
738                         while (k < lst_per_page) {
739                                 int l = (j * lst_per_page) + k;
740                                 if (l == config->tx_cfg[i].fifo_len)
741                                         break;
742                                 mac_control->fifos[i].list_info[l].list_virt_addr =
743                                     tmp_v + (k * lst_size);
744                                 mac_control->fifos[i].list_info[l].list_phy_addr =
745                                     tmp_p + (k * lst_size);
746                                 k++;
747                         }
748                 }
749         }
750
751         for (i = 0; i < config->tx_fifo_num; i++) {
752                 size = config->tx_cfg[i].fifo_len;
753                 mac_control->fifos[i].ufo_in_band_v
754                         = kcalloc(size, sizeof(u64), GFP_KERNEL);
755                 if (!mac_control->fifos[i].ufo_in_band_v)
756                         return -ENOMEM;
757                 mem_allocated += (size * sizeof(u64));
758         }
759
760         /* Allocation and initialization of RXDs in Rings */
761         size = 0;
762         for (i = 0; i < config->rx_ring_num; i++) {
763                 if (config->rx_cfg[i].num_rxd %
764                     (rxd_count[nic->rxd_mode] + 1)) {
765                         DBG_PRINT(ERR_DBG, "%s: RxD count of ", dev->name);
766                         DBG_PRINT(ERR_DBG, "Ring%d is not a multiple of ",
767                                   i);
768                         DBG_PRINT(ERR_DBG, "RxDs per Block");
769                         return FAILURE;
770                 }
771                 size += config->rx_cfg[i].num_rxd;
772                 mac_control->rings[i].block_count =
773                         config->rx_cfg[i].num_rxd /
774                         (rxd_count[nic->rxd_mode] + 1 );
775                 mac_control->rings[i].pkt_cnt = config->rx_cfg[i].num_rxd -
776                         mac_control->rings[i].block_count;
777         }
778         if (nic->rxd_mode == RXD_MODE_1)
779                 size = (size * (sizeof(struct RxD1)));
780         else
781                 size = (size * (sizeof(struct RxD3)));
782
783         for (i = 0; i < config->rx_ring_num; i++) {
784                 mac_control->rings[i].rx_curr_get_info.block_index = 0;
785                 mac_control->rings[i].rx_curr_get_info.offset = 0;
786                 mac_control->rings[i].rx_curr_get_info.ring_len =
787                     config->rx_cfg[i].num_rxd - 1;
788                 mac_control->rings[i].rx_curr_put_info.block_index = 0;
789                 mac_control->rings[i].rx_curr_put_info.offset = 0;
790                 mac_control->rings[i].rx_curr_put_info.ring_len =
791                     config->rx_cfg[i].num_rxd - 1;
792                 mac_control->rings[i].nic = nic;
793                 mac_control->rings[i].ring_no = i;
794                 mac_control->rings[i].lro = lro_enable;
795
796                 blk_cnt = config->rx_cfg[i].num_rxd /
797                                 (rxd_count[nic->rxd_mode] + 1);
798                 /*  Allocating all the Rx blocks */
799                 for (j = 0; j < blk_cnt; j++) {
800                         struct rx_block_info *rx_blocks;
801                         int l;
802
803                         rx_blocks = &mac_control->rings[i].rx_blocks[j];
804                         size = SIZE_OF_BLOCK; //size is always page size
805                         tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
806                                                           &tmp_p_addr);
807                         if (tmp_v_addr == NULL) {
808                                 /*
809                                  * In case of failure, free_shared_mem()
810                                  * is called, which should free any
811                                  * memory that was alloced till the
812                                  * failure happened.
813                                  */
814                                 rx_blocks->block_virt_addr = tmp_v_addr;
815                                 return -ENOMEM;
816                         }
817                         mem_allocated += size;
818                         memset(tmp_v_addr, 0, size);
819                         rx_blocks->block_virt_addr = tmp_v_addr;
820                         rx_blocks->block_dma_addr = tmp_p_addr;
821                         rx_blocks->rxds = kmalloc(sizeof(struct rxd_info)*
822                                                   rxd_count[nic->rxd_mode],
823                                                   GFP_KERNEL);
824                         if (!rx_blocks->rxds)
825                                 return -ENOMEM;
826                         mem_allocated +=
827                         (sizeof(struct rxd_info)* rxd_count[nic->rxd_mode]);
828                         for (l=0; l<rxd_count[nic->rxd_mode];l++) {
829                                 rx_blocks->rxds[l].virt_addr =
830                                         rx_blocks->block_virt_addr +
831                                         (rxd_size[nic->rxd_mode] * l);
832                                 rx_blocks->rxds[l].dma_addr =
833                                         rx_blocks->block_dma_addr +
834                                         (rxd_size[nic->rxd_mode] * l);
835                         }
836                 }
837                 /* Interlinking all Rx Blocks */
838                 for (j = 0; j < blk_cnt; j++) {
839                         tmp_v_addr =
840                                 mac_control->rings[i].rx_blocks[j].block_virt_addr;
841                         tmp_v_addr_next =
842                                 mac_control->rings[i].rx_blocks[(j + 1) %
843                                               blk_cnt].block_virt_addr;
844                         tmp_p_addr =
845                                 mac_control->rings[i].rx_blocks[j].block_dma_addr;
846                         tmp_p_addr_next =
847                                 mac_control->rings[i].rx_blocks[(j + 1) %
848                                               blk_cnt].block_dma_addr;
849
850                         pre_rxd_blk = (struct RxD_block *) tmp_v_addr;
851                         pre_rxd_blk->reserved_2_pNext_RxD_block =
852                             (unsigned long) tmp_v_addr_next;
853                         pre_rxd_blk->pNext_RxD_Blk_physical =
854                             (u64) tmp_p_addr_next;
855                 }
856         }
857         if (nic->rxd_mode == RXD_MODE_3B) {
858                 /*
859                  * Allocation of Storages for buffer addresses in 2BUFF mode
860                  * and the buffers as well.
861                  */
862                 for (i = 0; i < config->rx_ring_num; i++) {
863                         blk_cnt = config->rx_cfg[i].num_rxd /
864                            (rxd_count[nic->rxd_mode]+ 1);
865                         mac_control->rings[i].ba =
866                                 kmalloc((sizeof(struct buffAdd *) * blk_cnt),
867                                      GFP_KERNEL);
868                         if (!mac_control->rings[i].ba)
869                                 return -ENOMEM;
870                         mem_allocated +=(sizeof(struct buffAdd *) * blk_cnt);
871                         for (j = 0; j < blk_cnt; j++) {
872                                 int k = 0;
873                                 mac_control->rings[i].ba[j] =
874                                         kmalloc((sizeof(struct buffAdd) *
875                                                 (rxd_count[nic->rxd_mode] + 1)),
876                                                 GFP_KERNEL);
877                                 if (!mac_control->rings[i].ba[j])
878                                         return -ENOMEM;
879                                 mem_allocated += (sizeof(struct buffAdd) *  \
880                                         (rxd_count[nic->rxd_mode] + 1));
881                                 while (k != rxd_count[nic->rxd_mode]) {
882                                         ba = &mac_control->rings[i].ba[j][k];
883
884                                         ba->ba_0_org = (void *) kmalloc
885                                             (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL);
886                                         if (!ba->ba_0_org)
887                                                 return -ENOMEM;
888                                         mem_allocated +=
889                                                 (BUF0_LEN + ALIGN_SIZE);
890                                         tmp = (unsigned long)ba->ba_0_org;
891                                         tmp += ALIGN_SIZE;
892                                         tmp &= ~((unsigned long) ALIGN_SIZE);
893                                         ba->ba_0 = (void *) tmp;
894
895                                         ba->ba_1_org = (void *) kmalloc
896                                             (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL);
897                                         if (!ba->ba_1_org)
898                                                 return -ENOMEM;
899                                         mem_allocated
900                                                 += (BUF1_LEN + ALIGN_SIZE);
901                                         tmp = (unsigned long) ba->ba_1_org;
902                                         tmp += ALIGN_SIZE;
903                                         tmp &= ~((unsigned long) ALIGN_SIZE);
904                                         ba->ba_1 = (void *) tmp;
905                                         k++;
906                                 }
907                         }
908                 }
909         }
910
911         /* Allocation and initialization of Statistics block */
912         size = sizeof(struct stat_block);
913         mac_control->stats_mem = pci_alloc_consistent
914             (nic->pdev, size, &mac_control->stats_mem_phy);
915
916         if (!mac_control->stats_mem) {
917                 /*
918                  * In case of failure, free_shared_mem() is called, which
919                  * should free any memory that was alloced till the
920                  * failure happened.
921                  */
922                 return -ENOMEM;
923         }
924         mem_allocated += size;
925         mac_control->stats_mem_sz = size;
926
927         tmp_v_addr = mac_control->stats_mem;
928         mac_control->stats_info = (struct stat_block *) tmp_v_addr;
929         memset(tmp_v_addr, 0, size);
930         DBG_PRINT(INIT_DBG, "%s:Ring Mem PHY: 0x%llx\n", dev->name,
931                   (unsigned long long) tmp_p_addr);
932         mac_control->stats_info->sw_stat.mem_allocated += mem_allocated;
933         return SUCCESS;
934 }
935
936 /**
937  * free_shared_mem - Free the allocated Memory
938  * @nic:  Device private variable.
939  * Description: This function is to free all memory locations allocated by
940  * the init_shared_mem() function and return it to the kernel.
941  */
942
943 static void free_shared_mem(struct s2io_nic *nic)
944 {
945         int i, j, blk_cnt, size;
946         void *tmp_v_addr;
947         dma_addr_t tmp_p_addr;
948         struct mac_info *mac_control;
949         struct config_param *config;
950         int lst_size, lst_per_page;
951         struct net_device *dev;
952         int page_num = 0;
953
954         if (!nic)
955                 return;
956
957         dev = nic->dev;
958
959         mac_control = &nic->mac_control;
960         config = &nic->config;
961
962         lst_size = (sizeof(struct TxD) * config->max_txds);
963         lst_per_page = PAGE_SIZE / lst_size;
964
965         for (i = 0; i < config->tx_fifo_num; i++) {
966                 page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
967                                                         lst_per_page);
968                 for (j = 0; j < page_num; j++) {
969                         int mem_blks = (j * lst_per_page);
970                         if (!mac_control->fifos[i].list_info)
971                                 return;
972                         if (!mac_control->fifos[i].list_info[mem_blks].
973                                  list_virt_addr)
974                                 break;
975                         pci_free_consistent(nic->pdev, PAGE_SIZE,
976                                             mac_control->fifos[i].
977                                             list_info[mem_blks].
978                                             list_virt_addr,
979                                             mac_control->fifos[i].
980                                             list_info[mem_blks].
981                                             list_phy_addr);
982                         nic->mac_control.stats_info->sw_stat.mem_freed
983                                                 += PAGE_SIZE;
984                 }
985                 /* If we got a zero DMA address during allocation,
986                  * free the page now
987                  */
988                 if (mac_control->zerodma_virt_addr) {
989                         pci_free_consistent(nic->pdev, PAGE_SIZE,
990                                             mac_control->zerodma_virt_addr,
991                                             (dma_addr_t)0);
992                         DBG_PRINT(INIT_DBG,
993                                 "%s: Freeing TxDL with zero DMA addr. ",
994                                 dev->name);
995                         DBG_PRINT(INIT_DBG, "Virtual address %p\n",
996                                 mac_control->zerodma_virt_addr);
997                         nic->mac_control.stats_info->sw_stat.mem_freed
998                                                 += PAGE_SIZE;
999                 }
1000                 kfree(mac_control->fifos[i].list_info);
1001                 nic->mac_control.stats_info->sw_stat.mem_freed +=
1002                 (nic->config.tx_cfg[i].fifo_len *sizeof(struct list_info_hold));
1003         }
1004
1005         size = SIZE_OF_BLOCK;
1006         for (i = 0; i < config->rx_ring_num; i++) {
1007                 blk_cnt = mac_control->rings[i].block_count;
1008                 for (j = 0; j < blk_cnt; j++) {
1009                         tmp_v_addr = mac_control->rings[i].rx_blocks[j].
1010                                 block_virt_addr;
1011                         tmp_p_addr = mac_control->rings[i].rx_blocks[j].
1012                                 block_dma_addr;
1013                         if (tmp_v_addr == NULL)
1014                                 break;
1015                         pci_free_consistent(nic->pdev, size,
1016                                             tmp_v_addr, tmp_p_addr);
1017                         nic->mac_control.stats_info->sw_stat.mem_freed += size;
1018                         kfree(mac_control->rings[i].rx_blocks[j].rxds);
1019                         nic->mac_control.stats_info->sw_stat.mem_freed +=
1020                         ( sizeof(struct rxd_info)* rxd_count[nic->rxd_mode]);
1021                 }
1022         }
1023
1024         if (nic->rxd_mode == RXD_MODE_3B) {
1025                 /* Freeing buffer storage addresses in 2BUFF mode. */
1026                 for (i = 0; i < config->rx_ring_num; i++) {
1027                         blk_cnt = config->rx_cfg[i].num_rxd /
1028                             (rxd_count[nic->rxd_mode] + 1);
1029                         for (j = 0; j < blk_cnt; j++) {
1030                                 int k = 0;
1031                                 if (!mac_control->rings[i].ba[j])
1032                                         continue;
1033                                 while (k != rxd_count[nic->rxd_mode]) {
1034                                         struct buffAdd *ba =
1035                                                 &mac_control->rings[i].ba[j][k];
1036                                         kfree(ba->ba_0_org);
1037                                         nic->mac_control.stats_info->sw_stat.\
1038                                         mem_freed += (BUF0_LEN + ALIGN_SIZE);
1039                                         kfree(ba->ba_1_org);
1040                                         nic->mac_control.stats_info->sw_stat.\
1041                                         mem_freed += (BUF1_LEN + ALIGN_SIZE);
1042                                         k++;
1043                                 }
1044                                 kfree(mac_control->rings[i].ba[j]);
1045                                 nic->mac_control.stats_info->sw_stat.mem_freed +=
1046                                         (sizeof(struct buffAdd) *
1047                                         (rxd_count[nic->rxd_mode] + 1));
1048                         }
1049                         kfree(mac_control->rings[i].ba);
1050                         nic->mac_control.stats_info->sw_stat.mem_freed +=
1051                         (sizeof(struct buffAdd *) * blk_cnt);
1052                 }
1053         }
1054
1055         for (i = 0; i < nic->config.tx_fifo_num; i++) {
1056                 if (mac_control->fifos[i].ufo_in_band_v) {
1057                         nic->mac_control.stats_info->sw_stat.mem_freed
1058                                 += (config->tx_cfg[i].fifo_len * sizeof(u64));
1059                         kfree(mac_control->fifos[i].ufo_in_band_v);
1060                 }
1061         }
1062
1063         if (mac_control->stats_mem) {
1064                 nic->mac_control.stats_info->sw_stat.mem_freed +=
1065                         mac_control->stats_mem_sz;
1066                 pci_free_consistent(nic->pdev,
1067                                     mac_control->stats_mem_sz,
1068                                     mac_control->stats_mem,
1069                                     mac_control->stats_mem_phy);
1070         }
1071 }
1072
1073 /**
1074  * s2io_verify_pci_mode -
1075  */
1076
1077 static int s2io_verify_pci_mode(struct s2io_nic *nic)
1078 {
1079         struct XENA_dev_config __iomem *bar0 = nic->bar0;
1080         register u64 val64 = 0;
1081         int     mode;
1082
1083         val64 = readq(&bar0->pci_mode);
1084         mode = (u8)GET_PCI_MODE(val64);
1085
1086         if ( val64 & PCI_MODE_UNKNOWN_MODE)
1087                 return -1;      /* Unknown PCI mode */
1088         return mode;
1089 }
1090
1091 #define NEC_VENID   0x1033
1092 #define NEC_DEVID   0x0125
1093 static int s2io_on_nec_bridge(struct pci_dev *s2io_pdev)
1094 {
1095         struct pci_dev *tdev = NULL;
1096         while ((tdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, tdev)) != NULL) {
1097                 if (tdev->vendor == NEC_VENID && tdev->device == NEC_DEVID) {
1098                         if (tdev->bus == s2io_pdev->bus->parent) {
1099                                 pci_dev_put(tdev);
1100                                 return 1;
1101                         }
1102                 }
1103         }
1104         return 0;
1105 }
1106
1107 static int bus_speed[8] = {33, 133, 133, 200, 266, 133, 200, 266};
1108 /**
1109  * s2io_print_pci_mode -
1110  */
1111 static int s2io_print_pci_mode(struct s2io_nic *nic)
1112 {
1113         struct XENA_dev_config __iomem *bar0 = nic->bar0;
1114         register u64 val64 = 0;
1115         int     mode;
1116         struct config_param *config = &nic->config;
1117
1118         val64 = readq(&bar0->pci_mode);
1119         mode = (u8)GET_PCI_MODE(val64);
1120
1121         if ( val64 & PCI_MODE_UNKNOWN_MODE)
1122                 return -1;      /* Unknown PCI mode */
1123
1124         config->bus_speed = bus_speed[mode];
1125
1126         if (s2io_on_nec_bridge(nic->pdev)) {
1127                 DBG_PRINT(ERR_DBG, "%s: Device is on PCI-E bus\n",
1128                                                         nic->dev->name);
1129                 return mode;
1130         }
1131
1132         if (val64 & PCI_MODE_32_BITS) {
1133                 DBG_PRINT(ERR_DBG, "%s: Device is on 32 bit ", nic->dev->name);
1134         } else {
1135                 DBG_PRINT(ERR_DBG, "%s: Device is on 64 bit ", nic->dev->name);
1136         }
1137
1138         switch(mode) {
1139                 case PCI_MODE_PCI_33:
1140                         DBG_PRINT(ERR_DBG, "33MHz PCI bus\n");
1141                         break;
1142                 case PCI_MODE_PCI_66:
1143                         DBG_PRINT(ERR_DBG, "66MHz PCI bus\n");
1144                         break;
1145                 case PCI_MODE_PCIX_M1_66:
1146                         DBG_PRINT(ERR_DBG, "66MHz PCIX(M1) bus\n");
1147                         break;
1148                 case PCI_MODE_PCIX_M1_100:
1149                         DBG_PRINT(ERR_DBG, "100MHz PCIX(M1) bus\n");
1150                         break;
1151                 case PCI_MODE_PCIX_M1_133:
1152                         DBG_PRINT(ERR_DBG, "133MHz PCIX(M1) bus\n");
1153                         break;
1154                 case PCI_MODE_PCIX_M2_66:
1155                         DBG_PRINT(ERR_DBG, "133MHz PCIX(M2) bus\n");
1156                         break;
1157                 case PCI_MODE_PCIX_M2_100:
1158                         DBG_PRINT(ERR_DBG, "200MHz PCIX(M2) bus\n");
1159                         break;
1160                 case PCI_MODE_PCIX_M2_133:
1161                         DBG_PRINT(ERR_DBG, "266MHz PCIX(M2) bus\n");
1162                         break;
1163                 default:
1164                         return -1;      /* Unsupported bus speed */
1165         }
1166
1167         return mode;
1168 }
1169
1170 /**
1171  *  init_tti - Initialization transmit traffic interrupt scheme
1172  *  @nic: device private variable
1173  *  @link: link status (UP/DOWN) used to enable/disable continuous
1174  *  transmit interrupts
1175  *  Description: The function configures transmit traffic interrupts
1176  *  Return Value:  SUCCESS on success and
1177  *  '-1' on failure
1178  */
1179
1180 static int init_tti(struct s2io_nic *nic, int link)
1181 {
1182         struct XENA_dev_config __iomem *bar0 = nic->bar0;
1183         register u64 val64 = 0;
1184         int i;
1185         struct config_param *config;
1186
1187         config = &nic->config;
1188
1189         for (i = 0; i < config->tx_fifo_num; i++) {
1190                 /*
1191                  * TTI Initialization. Default Tx timer gets us about
1192                  * 250 interrupts per sec. Continuous interrupts are enabled
1193                  * by default.
1194                  */
1195                 if (nic->device_type == XFRAME_II_DEVICE) {
1196                         int count = (nic->config.bus_speed * 125)/2;
1197                         val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count);
1198                 } else
1199                         val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
1200
1201                 val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) |
1202                                 TTI_DATA1_MEM_TX_URNG_B(0x10) |
1203                                 TTI_DATA1_MEM_TX_URNG_C(0x30) |
1204                                 TTI_DATA1_MEM_TX_TIMER_AC_EN;
1205                 if (i == 0)
1206                         if (use_continuous_tx_intrs && (link == LINK_UP))
1207                                 val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
1208                 writeq(val64, &bar0->tti_data1_mem);
1209
1210                 if (nic->config.intr_type == MSI_X) {
1211                         val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1212                                 TTI_DATA2_MEM_TX_UFC_B(0x100) |
1213                                 TTI_DATA2_MEM_TX_UFC_C(0x200) |
1214                                 TTI_DATA2_MEM_TX_UFC_D(0x300);
1215                 } else {
1216                         if ((nic->config.tx_steering_type ==
1217                                 TX_DEFAULT_STEERING) &&
1218                                 (config->tx_fifo_num > 1) &&
1219                                 (i >= nic->udp_fifo_idx) &&
1220                                 (i < (nic->udp_fifo_idx +
1221                                 nic->total_udp_fifos)))
1222                                 val64 = TTI_DATA2_MEM_TX_UFC_A(0x50) |
1223                                         TTI_DATA2_MEM_TX_UFC_B(0x80) |
1224                                         TTI_DATA2_MEM_TX_UFC_C(0x100) |
1225                                         TTI_DATA2_MEM_TX_UFC_D(0x120);
1226                         else
1227                                 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1228                                         TTI_DATA2_MEM_TX_UFC_B(0x20) |
1229                                         TTI_DATA2_MEM_TX_UFC_C(0x40) |
1230                                         TTI_DATA2_MEM_TX_UFC_D(0x80);
1231                 }
1232
1233                 writeq(val64, &bar0->tti_data2_mem);
1234
1235                 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD |
1236                                 TTI_CMD_MEM_OFFSET(i);
1237                 writeq(val64, &bar0->tti_command_mem);
1238
1239                 if (wait_for_cmd_complete(&bar0->tti_command_mem,
1240                         TTI_CMD_MEM_STROBE_NEW_CMD, S2IO_BIT_RESET) != SUCCESS)
1241                         return FAILURE;
1242         }
1243
1244         return SUCCESS;
1245 }
1246
1247 /**
1248  *  init_nic - Initialization of hardware
1249  *  @nic: device private variable
1250  *  Description: The function sequentially configures every block
1251  *  of the H/W from their reset values.
1252  *  Return Value:  SUCCESS on success and
1253  *  '-1' on failure (endian settings incorrect).
1254  */
1255
1256 static int init_nic(struct s2io_nic *nic)
1257 {
1258         struct XENA_dev_config __iomem *bar0 = nic->bar0;
1259         struct net_device *dev = nic->dev;
1260         register u64 val64 = 0;
1261         void __iomem *add;
1262         u32 time;
1263         int i, j;
1264         struct mac_info *mac_control;
1265         struct config_param *config;
1266         int dtx_cnt = 0;
1267         unsigned long long mem_share;
1268         int mem_size;
1269
1270         mac_control = &nic->mac_control;
1271         config = &nic->config;
1272
1273         /* to set the swapper controle on the card */
1274         if(s2io_set_swapper(nic)) {
1275                 DBG_PRINT(ERR_DBG,"ERROR: Setting Swapper failed\n");
1276                 return -EIO;
1277         }
1278
1279         /*
1280          * Herc requires EOI to be removed from reset before XGXS, so..
1281          */
1282         if (nic->device_type & XFRAME_II_DEVICE) {
1283                 val64 = 0xA500000000ULL;
1284                 writeq(val64, &bar0->sw_reset);
1285                 msleep(500);
1286                 val64 = readq(&bar0->sw_reset);
1287         }
1288
1289         /* Remove XGXS from reset state */
1290         val64 = 0;
1291         writeq(val64, &bar0->sw_reset);
1292         msleep(500);
1293         val64 = readq(&bar0->sw_reset);
1294
1295         /* Ensure that it's safe to access registers by checking
1296          * RIC_RUNNING bit is reset. Check is valid only for XframeII.
1297          */
1298         if (nic->device_type == XFRAME_II_DEVICE) {
1299                 for (i = 0; i < 50; i++) {
1300                         val64 = readq(&bar0->adapter_status);
1301                         if (!(val64 & ADAPTER_STATUS_RIC_RUNNING))
1302                                 break;
1303                         msleep(10);
1304                 }
1305                 if (i == 50)
1306                         return -ENODEV;
1307         }
1308
1309         /*  Enable Receiving broadcasts */
1310         add = &bar0->mac_cfg;
1311         val64 = readq(&bar0->mac_cfg);
1312         val64 |= MAC_RMAC_BCAST_ENABLE;
1313         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1314         writel((u32) val64, add);
1315         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1316         writel((u32) (val64 >> 32), (add + 4));
1317
1318         /* Read registers in all blocks */
1319         val64 = readq(&bar0->mac_int_mask);
1320         val64 = readq(&bar0->mc_int_mask);
1321         val64 = readq(&bar0->xgxs_int_mask);
1322
1323         /*  Set MTU */
1324         val64 = dev->mtu;
1325         writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
1326
1327         if (nic->device_type & XFRAME_II_DEVICE) {
1328                 while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
1329                         SPECIAL_REG_WRITE(herc_act_dtx_cfg[dtx_cnt],
1330                                           &bar0->dtx_control, UF);
1331                         if (dtx_cnt & 0x1)
1332                                 msleep(1); /* Necessary!! */
1333                         dtx_cnt++;
1334                 }
1335         } else {
1336                 while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
1337                         SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
1338                                           &bar0->dtx_control, UF);
1339                         val64 = readq(&bar0->dtx_control);
1340                         dtx_cnt++;
1341                 }
1342         }
1343
1344         /*  Tx DMA Initialization */
1345         val64 = 0;
1346         writeq(val64, &bar0->tx_fifo_partition_0);
1347         writeq(val64, &bar0->tx_fifo_partition_1);
1348         writeq(val64, &bar0->tx_fifo_partition_2);
1349         writeq(val64, &bar0->tx_fifo_partition_3);
1350
1351
1352         for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
1353                 val64 |=
1354                     vBIT(config->tx_cfg[i].fifo_len - 1, ((j * 32) + 19),
1355                          13) | vBIT(config->tx_cfg[i].fifo_priority,
1356                                     ((j * 32) + 5), 3);
1357
1358                 if (i == (config->tx_fifo_num - 1)) {
1359                         if (i % 2 == 0)
1360                                 i++;
1361                 }
1362
1363                 switch (i) {
1364                 case 1:
1365                         writeq(val64, &bar0->tx_fifo_partition_0);
1366                         val64 = 0;
1367                         j = 0;
1368                         break;
1369                 case 3:
1370                         writeq(val64, &bar0->tx_fifo_partition_1);
1371                         val64 = 0;
1372                         j = 0;
1373                         break;
1374                 case 5:
1375                         writeq(val64, &bar0->tx_fifo_partition_2);
1376                         val64 = 0;
1377                         j = 0;
1378                         break;
1379                 case 7:
1380                         writeq(val64, &bar0->tx_fifo_partition_3);
1381                         val64 = 0;
1382                         j = 0;
1383                         break;
1384                 default:
1385                         j++;
1386                         break;
1387                 }
1388         }
1389
1390         /*
1391          * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
1392          * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
1393          */
1394         if ((nic->device_type == XFRAME_I_DEVICE) &&
1395                 (nic->pdev->revision < 4))
1396                 writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
1397
1398         val64 = readq(&bar0->tx_fifo_partition_0);
1399         DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
1400                   &bar0->tx_fifo_partition_0, (unsigned long long) val64);
1401
1402         /*
1403          * Initialization of Tx_PA_CONFIG register to ignore packet
1404          * integrity checking.
1405          */
1406         val64 = readq(&bar0->tx_pa_cfg);
1407         val64 |= TX_PA_CFG_IGNORE_FRM_ERR | TX_PA_CFG_IGNORE_SNAP_OUI |
1408             TX_PA_CFG_IGNORE_LLC_CTRL | TX_PA_CFG_IGNORE_L2_ERR;
1409         writeq(val64, &bar0->tx_pa_cfg);
1410
1411         /* Rx DMA intialization. */
1412         val64 = 0;
1413         for (i = 0; i < config->rx_ring_num; i++) {
1414                 val64 |=
1415                     vBIT(config->rx_cfg[i].ring_priority, (5 + (i * 8)),
1416                          3);
1417         }
1418         writeq(val64, &bar0->rx_queue_priority);
1419
1420         /*
1421          * Allocating equal share of memory to all the
1422          * configured Rings.
1423          */
1424         val64 = 0;
1425         if (nic->device_type & XFRAME_II_DEVICE)
1426                 mem_size = 32;
1427         else
1428                 mem_size = 64;
1429
1430         for (i = 0; i < config->rx_ring_num; i++) {
1431                 switch (i) {
1432                 case 0:
1433                         mem_share = (mem_size / config->rx_ring_num +
1434                                      mem_size % config->rx_ring_num);
1435                         val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
1436                         continue;
1437                 case 1:
1438                         mem_share = (mem_size / config->rx_ring_num);
1439                         val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
1440                         continue;
1441                 case 2:
1442                         mem_share = (mem_size / config->rx_ring_num);
1443                         val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
1444                         continue;
1445                 case 3:
1446                         mem_share = (mem_size / config->rx_ring_num);
1447                         val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
1448                         continue;
1449                 case 4:
1450                         mem_share = (mem_size / config->rx_ring_num);
1451                         val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
1452                         continue;
1453                 case 5:
1454                         mem_share = (mem_size / config->rx_ring_num);
1455                         val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
1456                         continue;
1457                 case 6:
1458                         mem_share = (mem_size / config->rx_ring_num);
1459                         val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
1460                         continue;
1461                 case 7:
1462                         mem_share = (mem_size / config->rx_ring_num);
1463                         val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
1464                         continue;
1465                 }
1466         }
1467         writeq(val64, &bar0->rx_queue_cfg);
1468
1469         /*
1470          * Filling Tx round robin registers
1471          * as per the number of FIFOs for equal scheduling priority
1472          */
1473         switch (config->tx_fifo_num) {
1474         case 1:
1475                 val64 = 0x0;
1476                 writeq(val64, &bar0->tx_w_round_robin_0);
1477                 writeq(val64, &bar0->tx_w_round_robin_1);
1478                 writeq(val64, &bar0->tx_w_round_robin_2);
1479                 writeq(val64, &bar0->tx_w_round_robin_3);
1480                 writeq(val64, &bar0->tx_w_round_robin_4);
1481                 break;
1482         case 2:
1483                 val64 = 0x0001000100010001ULL;
1484                 writeq(val64, &bar0->tx_w_round_robin_0);
1485                 writeq(val64, &bar0->tx_w_round_robin_1);
1486                 writeq(val64, &bar0->tx_w_round_robin_2);
1487                 writeq(val64, &bar0->tx_w_round_robin_3);
1488                 val64 = 0x0001000100000000ULL;
1489                 writeq(val64, &bar0->tx_w_round_robin_4);
1490                 break;
1491         case 3:
1492                 val64 = 0x0001020001020001ULL;
1493                 writeq(val64, &bar0->tx_w_round_robin_0);
1494                 val64 = 0x0200010200010200ULL;
1495                 writeq(val64, &bar0->tx_w_round_robin_1);
1496                 val64 = 0x0102000102000102ULL;
1497                 writeq(val64, &bar0->tx_w_round_robin_2);
1498                 val64 = 0x0001020001020001ULL;
1499                 writeq(val64, &bar0->tx_w_round_robin_3);
1500                 val64 = 0x0200010200000000ULL;
1501                 writeq(val64, &bar0->tx_w_round_robin_4);
1502                 break;
1503         case 4:
1504                 val64 = 0x0001020300010203ULL;
1505                 writeq(val64, &bar0->tx_w_round_robin_0);
1506                 writeq(val64, &bar0->tx_w_round_robin_1);
1507                 writeq(val64, &bar0->tx_w_round_robin_2);
1508                 writeq(val64, &bar0->tx_w_round_robin_3);
1509                 val64 = 0x0001020300000000ULL;
1510                 writeq(val64, &bar0->tx_w_round_robin_4);
1511                 break;
1512         case 5:
1513                 val64 = 0x0001020304000102ULL;
1514                 writeq(val64, &bar0->tx_w_round_robin_0);
1515                 val64 = 0x0304000102030400ULL;
1516                 writeq(val64, &bar0->tx_w_round_robin_1);
1517                 val64 = 0x0102030400010203ULL;
1518                 writeq(val64, &bar0->tx_w_round_robin_2);
1519                 val64 = 0x0400010203040001ULL;
1520                 writeq(val64, &bar0->tx_w_round_robin_3);
1521                 val64 = 0x0203040000000000ULL;
1522                 writeq(val64, &bar0->tx_w_round_robin_4);
1523                 break;
1524         case 6:
1525                 val64 = 0x0001020304050001ULL;
1526                 writeq(val64, &bar0->tx_w_round_robin_0);
1527                 val64 = 0x0203040500010203ULL;
1528                 writeq(val64, &bar0->tx_w_round_robin_1);
1529                 val64 = 0x0405000102030405ULL;
1530                 writeq(val64, &bar0->tx_w_round_robin_2);
1531                 val64 = 0x0001020304050001ULL;
1532                 writeq(val64, &bar0->tx_w_round_robin_3);
1533                 val64 = 0x0203040500000000ULL;
1534                 writeq(val64, &bar0->tx_w_round_robin_4);
1535                 break;
1536         case 7:
1537                 val64 = 0x0001020304050600ULL;
1538                 writeq(val64, &bar0->tx_w_round_robin_0);
1539                 val64 = 0x0102030405060001ULL;
1540                 writeq(val64, &bar0->tx_w_round_robin_1);
1541                 val64 = 0x0203040506000102ULL;
1542                 writeq(val64, &bar0->tx_w_round_robin_2);
1543                 val64 = 0x0304050600010203ULL;
1544                 writeq(val64, &bar0->tx_w_round_robin_3);
1545                 val64 = 0x0405060000000000ULL;
1546                 writeq(val64, &bar0->tx_w_round_robin_4);
1547                 break;
1548         case 8:
1549                 val64 = 0x0001020304050607ULL;
1550                 writeq(val64, &bar0->tx_w_round_robin_0);
1551                 writeq(val64, &bar0->tx_w_round_robin_1);
1552                 writeq(val64, &bar0->tx_w_round_robin_2);
1553                 writeq(val64, &bar0->tx_w_round_robin_3);
1554                 val64 = 0x0001020300000000ULL;
1555                 writeq(val64, &bar0->tx_w_round_robin_4);
1556                 break;
1557         }
1558
1559         /* Enable all configured Tx FIFO partitions */
1560         val64 = readq(&bar0->tx_fifo_partition_0);
1561         val64 |= (TX_FIFO_PARTITION_EN);
1562         writeq(val64, &bar0->tx_fifo_partition_0);
1563
1564         /* Filling the Rx round robin registers as per the
1565          * number of Rings and steering based on QoS with
1566          * equal priority.
1567          */
1568         switch (config->rx_ring_num) {
1569         case 1:
1570                 val64 = 0x0;
1571                 writeq(val64, &bar0->rx_w_round_robin_0);
1572                 writeq(val64, &bar0->rx_w_round_robin_1);
1573                 writeq(val64, &bar0->rx_w_round_robin_2);
1574                 writeq(val64, &bar0->rx_w_round_robin_3);
1575                 writeq(val64, &bar0->rx_w_round_robin_4);
1576
1577                 val64 = 0x8080808080808080ULL;
1578                 writeq(val64, &bar0->rts_qos_steering);
1579                 break;
1580         case 2:
1581                 val64 = 0x0001000100010001ULL;
1582                 writeq(val64, &bar0->rx_w_round_robin_0);
1583                 writeq(val64, &bar0->rx_w_round_robin_1);
1584                 writeq(val64, &bar0->rx_w_round_robin_2);
1585                 writeq(val64, &bar0->rx_w_round_robin_3);
1586                 val64 = 0x0001000100000000ULL;
1587                 writeq(val64, &bar0->rx_w_round_robin_4);
1588
1589                 val64 = 0x8080808040404040ULL;
1590                 writeq(val64, &bar0->rts_qos_steering);
1591                 break;
1592         case 3:
1593                 val64 = 0x0001020001020001ULL;
1594                 writeq(val64, &bar0->rx_w_round_robin_0);
1595                 val64 = 0x0200010200010200ULL;
1596                 writeq(val64, &bar0->rx_w_round_robin_1);
1597                 val64 = 0x0102000102000102ULL;
1598                 writeq(val64, &bar0->rx_w_round_robin_2);
1599                 val64 = 0x0001020001020001ULL;
1600                 writeq(val64, &bar0->rx_w_round_robin_3);
1601                 val64 = 0x0200010200000000ULL;
1602                 writeq(val64, &bar0->rx_w_round_robin_4);
1603
1604                 val64 = 0x8080804040402020ULL;
1605                 writeq(val64, &bar0->rts_qos_steering);
1606                 break;
1607         case 4:
1608                 val64 = 0x0001020300010203ULL;
1609                 writeq(val64, &bar0->rx_w_round_robin_0);
1610                 writeq(val64, &bar0->rx_w_round_robin_1);
1611                 writeq(val64, &bar0->rx_w_round_robin_2);
1612                 writeq(val64, &bar0->rx_w_round_robin_3);
1613                 val64 = 0x0001020300000000ULL;
1614                 writeq(val64, &bar0->rx_w_round_robin_4);
1615
1616                 val64 = 0x8080404020201010ULL;
1617                 writeq(val64, &bar0->rts_qos_steering);
1618                 break;
1619         case 5:
1620                 val64 = 0x0001020304000102ULL;
1621                 writeq(val64, &bar0->rx_w_round_robin_0);
1622                 val64 = 0x0304000102030400ULL;
1623                 writeq(val64, &bar0->rx_w_round_robin_1);
1624                 val64 = 0x0102030400010203ULL;
1625                 writeq(val64, &bar0->rx_w_round_robin_2);
1626                 val64 = 0x0400010203040001ULL;
1627                 writeq(val64, &bar0->rx_w_round_robin_3);
1628                 val64 = 0x0203040000000000ULL;
1629                 writeq(val64, &bar0->rx_w_round_robin_4);
1630
1631                 val64 = 0x8080404020201008ULL;
1632                 writeq(val64, &bar0->rts_qos_steering);
1633                 break;
1634         case 6:
1635                 val64 = 0x0001020304050001ULL;
1636                 writeq(val64, &bar0->rx_w_round_robin_0);
1637                 val64 = 0x0203040500010203ULL;
1638                 writeq(val64, &bar0->rx_w_round_robin_1);
1639                 val64 = 0x0405000102030405ULL;
1640                 writeq(val64, &bar0->rx_w_round_robin_2);
1641                 val64 = 0x0001020304050001ULL;
1642                 writeq(val64, &bar0->rx_w_round_robin_3);
1643                 val64 = 0x0203040500000000ULL;
1644                 writeq(val64, &bar0->rx_w_round_robin_4);
1645
1646                 val64 = 0x8080404020100804ULL;
1647                 writeq(val64, &bar0->rts_qos_steering);
1648                 break;
1649         case 7:
1650                 val64 = 0x0001020304050600ULL;
1651                 writeq(val64, &bar0->rx_w_round_robin_0);
1652                 val64 = 0x0102030405060001ULL;
1653                 writeq(val64, &bar0->rx_w_round_robin_1);
1654                 val64 = 0x0203040506000102ULL;
1655                 writeq(val64, &bar0->rx_w_round_robin_2);
1656                 val64 = 0x0304050600010203ULL;
1657                 writeq(val64, &bar0->rx_w_round_robin_3);
1658                 val64 = 0x0405060000000000ULL;
1659                 writeq(val64, &bar0->rx_w_round_robin_4);
1660
1661                 val64 = 0x8080402010080402ULL;
1662                 writeq(val64, &bar0->rts_qos_steering);
1663                 break;
1664         case 8:
1665                 val64 = 0x0001020304050607ULL;
1666                 writeq(val64, &bar0->rx_w_round_robin_0);
1667                 writeq(val64, &bar0->rx_w_round_robin_1);
1668                 writeq(val64, &bar0->rx_w_round_robin_2);
1669                 writeq(val64, &bar0->rx_w_round_robin_3);
1670                 val64 = 0x0001020300000000ULL;
1671                 writeq(val64, &bar0->rx_w_round_robin_4);
1672
1673                 val64 = 0x8040201008040201ULL;
1674                 writeq(val64, &bar0->rts_qos_steering);
1675                 break;
1676         }
1677
1678         /* UDP Fix */
1679         val64 = 0;
1680         for (i = 0; i < 8; i++)
1681                 writeq(val64, &bar0->rts_frm_len_n[i]);
1682
1683         /* Set the default rts frame length for the rings configured */
1684         val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1685         for (i = 0 ; i < config->rx_ring_num ; i++)
1686                 writeq(val64, &bar0->rts_frm_len_n[i]);
1687
1688         /* Set the frame length for the configured rings
1689          * desired by the user
1690          */
1691         for (i = 0; i < config->rx_ring_num; i++) {
1692                 /* If rts_frm_len[i] == 0 then it is assumed that user not
1693                  * specified frame length steering.
1694                  * If the user provides the frame length then program
1695                  * the rts_frm_len register for those values or else
1696                  * leave it as it is.
1697                  */
1698                 if (rts_frm_len[i] != 0) {
1699                         writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
1700                                 &bar0->rts_frm_len_n[i]);
1701                 }
1702         }
1703
1704         /* Disable differentiated services steering logic */
1705         for (i = 0; i < 64; i++) {
1706                 if (rts_ds_steer(nic, i, 0) == FAILURE) {
1707                         DBG_PRINT(ERR_DBG, "%s: failed rts ds steering",
1708                                 dev->name);
1709                         DBG_PRINT(ERR_DBG, "set on codepoint %d\n", i);
1710                         return -ENODEV;
1711                 }
1712         }
1713
1714         /* Program statistics memory */
1715         writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1716
1717         if (nic->device_type == XFRAME_II_DEVICE) {
1718                 val64 = STAT_BC(0x320);
1719                 writeq(val64, &bar0->stat_byte_cnt);
1720         }
1721
1722         /*
1723          * Initializing the sampling rate for the device to calculate the
1724          * bandwidth utilization.
1725          */
1726         val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
1727             MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1728         writeq(val64, &bar0->mac_link_util);
1729
1730         /*
1731          * Initializing the Transmit and Receive Traffic Interrupt
1732          * Scheme.
1733          */
1734
1735         /* Initialize TTI */
1736         if (SUCCESS != init_tti(nic, nic->last_link_state))
1737                 return -ENODEV;
1738
1739         /* RTI Initialization */
1740         if (nic->device_type == XFRAME_II_DEVICE) {
1741                 /*
1742                  * Programmed to generate Apprx 500 Intrs per
1743                  * second
1744                  */
1745                 int count = (nic->config.bus_speed * 125)/4;
1746                 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count);
1747         } else
1748                 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
1749         val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) |
1750                  RTI_DATA1_MEM_RX_URNG_B(0x10) |
1751                  RTI_DATA1_MEM_RX_URNG_C(0x30) | RTI_DATA1_MEM_RX_TIMER_AC_EN;
1752
1753         writeq(val64, &bar0->rti_data1_mem);
1754
1755         val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
1756                 RTI_DATA2_MEM_RX_UFC_B(0x2) ;
1757         if (nic->config.intr_type == MSI_X)
1758             val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x20) | \
1759                         RTI_DATA2_MEM_RX_UFC_D(0x40));
1760         else
1761             val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x40) | \
1762                         RTI_DATA2_MEM_RX_UFC_D(0x80));
1763         writeq(val64, &bar0->rti_data2_mem);
1764
1765         for (i = 0; i < config->rx_ring_num; i++) {
1766                 val64 = RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE_NEW_CMD
1767                                 | RTI_CMD_MEM_OFFSET(i);
1768                 writeq(val64, &bar0->rti_command_mem);
1769
1770                 /*
1771                  * Once the operation completes, the Strobe bit of the
1772                  * command register will be reset. We poll for this
1773                  * particular condition. We wait for a maximum of 500ms
1774                  * for the operation to complete, if it's not complete
1775                  * by then we return error.
1776                  */
1777                 time = 0;
1778                 while (TRUE) {
1779                         val64 = readq(&bar0->rti_command_mem);
1780                         if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD))
1781                                 break;
1782
1783                         if (time > 10) {
1784                                 DBG_PRINT(ERR_DBG, "%s: RTI init Failed\n",
1785                                           dev->name);
1786                                 return -ENODEV;
1787                         }
1788                         time++;
1789                         msleep(50);
1790                 }
1791         }
1792
1793         /*
1794          * Initializing proper values as Pause threshold into all
1795          * the 8 Queues on Rx side.
1796          */
1797         writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1798         writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1799
1800         /* Disable RMAC PAD STRIPPING */
1801         add = &bar0->mac_cfg;
1802         val64 = readq(&bar0->mac_cfg);
1803         val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1804         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1805         writel((u32) (val64), add);
1806         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1807         writel((u32) (val64 >> 32), (add + 4));
1808         val64 = readq(&bar0->mac_cfg);
1809
1810         /* Enable FCS stripping by adapter */
1811         add = &bar0->mac_cfg;
1812         val64 = readq(&bar0->mac_cfg);
1813         val64 |= MAC_CFG_RMAC_STRIP_FCS;
1814         if (nic->device_type == XFRAME_II_DEVICE)
1815                 writeq(val64, &bar0->mac_cfg);
1816         else {
1817                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1818                 writel((u32) (val64), add);
1819                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1820                 writel((u32) (val64 >> 32), (add + 4));
1821         }
1822
1823         /*
1824          * Set the time value to be inserted in the pause frame
1825          * generated by xena.
1826          */
1827         val64 = readq(&bar0->rmac_pause_cfg);
1828         val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1829         val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1830         writeq(val64, &bar0->rmac_pause_cfg);
1831
1832         /*
1833          * Set the Threshold Limit for Generating the pause frame
1834          * If the amount of data in any Queue exceeds ratio of
1835          * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1836          * pause frame is generated
1837          */
1838         val64 = 0;
1839         for (i = 0; i < 4; i++) {
1840                 val64 |=
1841                     (((u64) 0xFF00 | nic->mac_control.
1842                       mc_pause_threshold_q0q3)
1843                      << (i * 2 * 8));
1844         }
1845         writeq(val64, &bar0->mc_pause_thresh_q0q3);
1846
1847         val64 = 0;
1848         for (i = 0; i < 4; i++) {
1849                 val64 |=
1850                     (((u64) 0xFF00 | nic->mac_control.
1851                       mc_pause_threshold_q4q7)
1852                      << (i * 2 * 8));
1853         }
1854         writeq(val64, &bar0->mc_pause_thresh_q4q7);
1855
1856         /*
1857          * TxDMA will stop Read request if the number of read split has
1858          * exceeded the limit pointed by shared_splits
1859          */
1860         val64 = readq(&bar0->pic_control);
1861         val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1862         writeq(val64, &bar0->pic_control);
1863
1864         if (nic->config.bus_speed == 266) {
1865                 writeq(TXREQTO_VAL(0x7f) | TXREQTO_EN, &bar0->txreqtimeout);
1866                 writeq(0x0, &bar0->read_retry_delay);
1867                 writeq(0x0, &bar0->write_retry_delay);
1868         }
1869
1870         /*
1871          * Programming the Herc to split every write transaction
1872          * that does not start on an ADB to reduce disconnects.
1873          */
1874         if (nic->device_type == XFRAME_II_DEVICE) {
1875                 val64 = FAULT_BEHAVIOUR | EXT_REQ_EN |
1876                         MISC_LINK_STABILITY_PRD(3);
1877                 writeq(val64, &bar0->misc_control);
1878                 val64 = readq(&bar0->pic_control2);
1879                 val64 &= ~(s2BIT(13)|s2BIT(14)|s2BIT(15));
1880                 writeq(val64, &bar0->pic_control2);
1881         }
1882         if (strstr(nic->product_name, "CX4")) {
1883                 val64 = TMAC_AVG_IPG(0x17);
1884                 writeq(val64, &bar0->tmac_avg_ipg);
1885         }
1886
1887         return SUCCESS;
1888 }
1889 #define LINK_UP_DOWN_INTERRUPT          1
1890 #define MAC_RMAC_ERR_TIMER              2
1891
1892 static int s2io_link_fault_indication(struct s2io_nic *nic)
1893 {
1894         if (nic->config.intr_type != INTA)
1895                 return MAC_RMAC_ERR_TIMER;
1896         if (nic->device_type == XFRAME_II_DEVICE)
1897                 return LINK_UP_DOWN_INTERRUPT;
1898         else
1899                 return MAC_RMAC_ERR_TIMER;
1900 }
1901
1902 /**
1903  *  do_s2io_write_bits -  update alarm bits in alarm register
1904  *  @value: alarm bits
1905  *  @flag: interrupt status
1906  *  @addr: address value
1907  *  Description: update alarm bits in alarm register
1908  *  Return Value:
1909  *  NONE.
1910  */
1911 static void do_s2io_write_bits(u64 value, int flag, void __iomem *addr)
1912 {
1913         u64 temp64;
1914
1915         temp64 = readq(addr);
1916
1917         if(flag == ENABLE_INTRS)
1918                 temp64 &= ~((u64) value);
1919         else
1920                 temp64 |= ((u64) value);
1921         writeq(temp64, addr);
1922 }
1923
1924 static void en_dis_err_alarms(struct s2io_nic *nic, u16 mask, int flag)
1925 {
1926         struct XENA_dev_config __iomem *bar0 = nic->bar0;
1927         register u64 gen_int_mask = 0;
1928
1929         if (mask & TX_DMA_INTR) {
1930
1931                 gen_int_mask |= TXDMA_INT_M;
1932
1933                 do_s2io_write_bits(TXDMA_TDA_INT | TXDMA_PFC_INT |
1934                                 TXDMA_PCC_INT | TXDMA_TTI_INT |
1935                                 TXDMA_LSO_INT | TXDMA_TPA_INT |
1936                                 TXDMA_SM_INT, flag, &bar0->txdma_int_mask);
1937
1938                 do_s2io_write_bits(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM |
1939                                 PFC_MISC_0_ERR | PFC_MISC_1_ERR |
1940                                 PFC_PCIX_ERR | PFC_ECC_SG_ERR, flag,
1941                                 &bar0->pfc_err_mask);
1942
1943                 do_s2io_write_bits(TDA_Fn_ECC_DB_ERR | TDA_SM0_ERR_ALARM |
1944                                 TDA_SM1_ERR_ALARM | TDA_Fn_ECC_SG_ERR |
1945                                 TDA_PCIX_ERR, flag, &bar0->tda_err_mask);
1946
1947                 do_s2io_write_bits(PCC_FB_ECC_DB_ERR | PCC_TXB_ECC_DB_ERR |
1948                                 PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM |
1949                                 PCC_N_SERR | PCC_6_COF_OV_ERR |
1950                                 PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR |
1951                                 PCC_7_LSO_OV_ERR | PCC_FB_ECC_SG_ERR |
1952                                 PCC_TXB_ECC_SG_ERR, flag, &bar0->pcc_err_mask);
1953
1954                 do_s2io_write_bits(TTI_SM_ERR_ALARM | TTI_ECC_SG_ERR |
1955                                 TTI_ECC_DB_ERR, flag, &bar0->tti_err_mask);
1956
1957                 do_s2io_write_bits(LSO6_ABORT | LSO7_ABORT |
1958                                 LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM |
1959                                 LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
1960                                 flag, &bar0->lso_err_mask);
1961
1962                 do_s2io_write_bits(TPA_SM_ERR_ALARM | TPA_TX_FRM_DROP,
1963                                 flag, &bar0->tpa_err_mask);
1964
1965                 do_s2io_write_bits(SM_SM_ERR_ALARM, flag, &bar0->sm_err_mask);
1966
1967         }
1968
1969         if (mask & TX_MAC_INTR) {
1970                 gen_int_mask |= TXMAC_INT_M;
1971                 do_s2io_write_bits(MAC_INT_STATUS_TMAC_INT, flag,
1972                                 &bar0->mac_int_mask);
1973                 do_s2io_write_bits(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR |
1974                                 TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR |
1975                                 TMAC_DESC_ECC_SG_ERR | TMAC_DESC_ECC_DB_ERR,
1976                                 flag, &bar0->mac_tmac_err_mask);
1977         }
1978
1979         if (mask & TX_XGXS_INTR) {
1980                 gen_int_mask |= TXXGXS_INT_M;
1981                 do_s2io_write_bits(XGXS_INT_STATUS_TXGXS, flag,
1982                                 &bar0->xgxs_int_mask);
1983                 do_s2io_write_bits(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR |
1984                                 TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
1985                                 flag, &bar0->xgxs_txgxs_err_mask);
1986         }
1987
1988         if (mask & RX_DMA_INTR) {
1989                 gen_int_mask |= RXDMA_INT_M;
1990                 do_s2io_write_bits(RXDMA_INT_RC_INT_M | RXDMA_INT_RPA_INT_M |
1991                                 RXDMA_INT_RDA_INT_M | RXDMA_INT_RTI_INT_M,
1992                                 flag, &bar0->rxdma_int_mask);
1993                 do_s2io_write_bits(RC_PRCn_ECC_DB_ERR | RC_FTC_ECC_DB_ERR |
1994                                 RC_PRCn_SM_ERR_ALARM | RC_FTC_SM_ERR_ALARM |
1995                                 RC_PRCn_ECC_SG_ERR | RC_FTC_ECC_SG_ERR |
1996                                 RC_RDA_FAIL_WR_Rn, flag, &bar0->rc_err_mask);
1997                 do_s2io_write_bits(PRC_PCI_AB_RD_Rn | PRC_PCI_AB_WR_Rn |
1998                                 PRC_PCI_AB_F_WR_Rn | PRC_PCI_DP_RD_Rn |
1999                                 PRC_PCI_DP_WR_Rn | PRC_PCI_DP_F_WR_Rn, flag,
2000                                 &bar0->prc_pcix_err_mask);
2001                 do_s2io_write_bits(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR |
2002                                 RPA_ECC_SG_ERR | RPA_ECC_DB_ERR, flag,
2003                                 &bar0->rpa_err_mask);
2004                 do_s2io_write_bits(RDA_RXDn_ECC_DB_ERR | RDA_FRM_ECC_DB_N_AERR |
2005                                 RDA_SM1_ERR_ALARM | RDA_SM0_ERR_ALARM |
2006                                 RDA_RXD_ECC_DB_SERR | RDA_RXDn_ECC_SG_ERR |
2007                                 RDA_FRM_ECC_SG_ERR | RDA_MISC_ERR|RDA_PCIX_ERR,
2008                                 flag, &bar0->rda_err_mask);
2009                 do_s2io_write_bits(RTI_SM_ERR_ALARM |
2010                                 RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
2011                                 flag, &bar0->rti_err_mask);
2012         }
2013
2014         if (mask & RX_MAC_INTR) {
2015                 gen_int_mask |= RXMAC_INT_M;
2016                 do_s2io_write_bits(MAC_INT_STATUS_RMAC_INT, flag,
2017                                 &bar0->mac_int_mask);
2018                 do_s2io_write_bits(RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR |
2019                                 RMAC_UNUSED_INT | RMAC_SINGLE_ECC_ERR |
2020                                 RMAC_DOUBLE_ECC_ERR |
2021                                 RMAC_LINK_STATE_CHANGE_INT,
2022                                 flag, &bar0->mac_rmac_err_mask);
2023         }
2024
2025         if (mask & RX_XGXS_INTR)
2026         {
2027                 gen_int_mask |= RXXGXS_INT_M;
2028                 do_s2io_write_bits(XGXS_INT_STATUS_RXGXS, flag,
2029                                 &bar0->xgxs_int_mask);
2030                 do_s2io_write_bits(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR, flag,
2031                                 &bar0->xgxs_rxgxs_err_mask);
2032         }
2033
2034         if (mask & MC_INTR) {
2035                 gen_int_mask |= MC_INT_M;
2036                 do_s2io_write_bits(MC_INT_MASK_MC_INT, flag, &bar0->mc_int_mask);
2037                 do_s2io_write_bits(MC_ERR_REG_SM_ERR | MC_ERR_REG_ECC_ALL_SNG |
2038                                 MC_ERR_REG_ECC_ALL_DBL | PLL_LOCK_N, flag,
2039                                 &bar0->mc_err_mask);
2040         }
2041         nic->general_int_mask = gen_int_mask;
2042
2043         /* Remove this line when alarm interrupts are enabled */
2044         nic->general_int_mask = 0;
2045 }
2046 /**
2047  *  en_dis_able_nic_intrs - Enable or Disable the interrupts
2048  *  @nic: device private variable,
2049  *  @mask: A mask indicating which Intr block must be modified and,
2050  *  @flag: A flag indicating whether to enable or disable the Intrs.
2051  *  Description: This function will either disable or enable the interrupts
2052  *  depending on the flag argument. The mask argument can be used to
2053  *  enable/disable any Intr block.
2054  *  Return Value: NONE.
2055  */
2056
2057 static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
2058 {
2059         struct XENA_dev_config __iomem *bar0 = nic->bar0;
2060         register u64 temp64 = 0, intr_mask = 0;
2061
2062         intr_mask = nic->general_int_mask;
2063
2064         /*  Top level interrupt classification */
2065         /*  PIC Interrupts */
2066         if (mask & TX_PIC_INTR) {
2067                 /*  Enable PIC Intrs in the general intr mask register */
2068                 intr_mask |= TXPIC_INT_M;
2069                 if (flag == ENABLE_INTRS) {
2070                         /*
2071                          * If Hercules adapter enable GPIO otherwise
2072                          * disable all PCIX, Flash, MDIO, IIC and GPIO
2073                          * interrupts for now.
2074                          * TODO
2075                          */
2076                         if (s2io_link_fault_indication(nic) ==
2077                                         LINK_UP_DOWN_INTERRUPT ) {
2078                                 do_s2io_write_bits(PIC_INT_GPIO, flag,
2079                                                 &bar0->pic_int_mask);
2080                                 do_s2io_write_bits(GPIO_INT_MASK_LINK_UP, flag,
2081                                                 &bar0->gpio_int_mask);
2082                         } else
2083                                 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
2084                 } else if (flag == DISABLE_INTRS) {
2085                         /*
2086                          * Disable PIC Intrs in the general
2087                          * intr mask register
2088                          */
2089                         writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
2090                 }
2091         }
2092
2093         /*  Tx traffic interrupts */
2094         if (mask & TX_TRAFFIC_INTR) {
2095                 intr_mask |= TXTRAFFIC_INT_M;
2096                 if (flag == ENABLE_INTRS) {
2097                         /*
2098                          * Enable all the Tx side interrupts
2099                          * writing 0 Enables all 64 TX interrupt levels
2100                          */
2101                         writeq(0x0, &bar0->tx_traffic_mask);
2102                 } else if (flag == DISABLE_INTRS) {
2103                         /*
2104                          * Disable Tx Traffic Intrs in the general intr mask
2105                          * register.
2106                          */
2107                         writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
2108                 }
2109         }
2110
2111         /*  Rx traffic interrupts */
2112         if (mask & RX_TRAFFIC_INTR) {
2113                 intr_mask |= RXTRAFFIC_INT_M;
2114                 if (flag == ENABLE_INTRS) {
2115                         /* writing 0 Enables all 8 RX interrupt levels */
2116                         writeq(0x0, &bar0->rx_traffic_mask);
2117                 } else if (flag == DISABLE_INTRS) {
2118                         /*
2119                          * Disable Rx Traffic Intrs in the general intr mask
2120                          * register.
2121                          */
2122                         writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
2123                 }
2124         }
2125
2126         temp64 = readq(&bar0->general_int_mask);
2127         if (flag == ENABLE_INTRS)
2128                 temp64 &= ~((u64) intr_mask);
2129         else
2130                 temp64 = DISABLE_ALL_INTRS;
2131         writeq(temp64, &bar0->general_int_mask);
2132
2133         nic->general_int_mask = readq(&bar0->general_int_mask);
2134 }
2135
2136 /**
2137  *  verify_pcc_quiescent- Checks for PCC quiescent state
2138  *  Return: 1 If PCC is quiescence
2139  *          0 If PCC is not quiescence
2140  */
2141 static int verify_pcc_quiescent(struct s2io_nic *sp, int flag)
2142 {
2143         int ret = 0, herc;
2144         struct XENA_dev_config __iomem *bar0 = sp->bar0;
2145         u64 val64 = readq(&bar0->adapter_status);
2146
2147         herc = (sp->device_type == XFRAME_II_DEVICE);
2148
2149         if (flag == FALSE) {
2150                 if ((!herc && (sp->pdev->revision >= 4)) || herc) {
2151                         if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE))
2152                                 ret = 1;
2153                 } else {
2154                         if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
2155                                 ret = 1;
2156                 }
2157         } else {
2158                 if ((!herc && (sp->pdev->revision >= 4)) || herc) {
2159                         if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
2160                              ADAPTER_STATUS_RMAC_PCC_IDLE))
2161                                 ret = 1;
2162                 } else {
2163                         if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
2164                              ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
2165                                 ret = 1;
2166                 }
2167         }
2168
2169         return ret;
2170 }
2171 /**
2172  *  verify_xena_quiescence - Checks whether the H/W is ready
2173  *  Description: Returns whether the H/W is ready to go or not. Depending
2174  *  on whether adapter enable bit was written or not the comparison
2175  *  differs and the calling function passes the input argument flag to
2176  *  indicate this.
2177  *  Return: 1 If xena is quiescence
2178  *          0 If Xena is not quiescence
2179  */
2180
2181 static int verify_xena_quiescence(struct s2io_nic *sp)
2182 {
2183         int  mode;
2184         struct XENA_dev_config __iomem *bar0 = sp->bar0;
2185         u64 val64 = readq(&bar0->adapter_status);
2186         mode = s2io_verify_pci_mode(sp);
2187
2188         if (!(val64 & ADAPTER_STATUS_TDMA_READY)) {
2189                 DBG_PRINT(ERR_DBG, "%s", "TDMA is not ready!");
2190                 return 0;
2191         }
2192         if (!(val64 & ADAPTER_STATUS_RDMA_READY)) {
2193         DBG_PRINT(ERR_DBG, "%s", "RDMA is not ready!");
2194                 return 0;
2195         }
2196         if (!(val64 & ADAPTER_STATUS_PFC_READY)) {
2197                 DBG_PRINT(ERR_DBG, "%s", "PFC is not ready!");
2198                 return 0;
2199         }
2200         if (!(val64 & ADAPTER_STATUS_TMAC_BUF_EMPTY)) {
2201                 DBG_PRINT(ERR_DBG, "%s", "TMAC BUF is not empty!");
2202                 return 0;
2203         }
2204         if (!(val64 & ADAPTER_STATUS_PIC_QUIESCENT)) {
2205                 DBG_PRINT(ERR_DBG, "%s", "PIC is not QUIESCENT!");
2206                 return 0;
2207         }
2208         if (!(val64 & ADAPTER_STATUS_MC_DRAM_READY)) {
2209                 DBG_PRINT(ERR_DBG, "%s", "MC_DRAM is not ready!");
2210                 return 0;
2211         }
2212         if (!(val64 & ADAPTER_STATUS_MC_QUEUES_READY)) {
2213                 DBG_PRINT(ERR_DBG, "%s", "MC_QUEUES is not ready!");
2214                 return 0;
2215         }
2216         if (!(val64 & ADAPTER_STATUS_M_PLL_LOCK)) {
2217                 DBG_PRINT(ERR_DBG, "%s", "M_PLL is not locked!");
2218                 return 0;
2219         }
2220
2221         /*
2222          * In PCI 33 mode, the P_PLL is not used, and therefore,
2223          * the the P_PLL_LOCK bit in the adapter_status register will
2224          * not be asserted.
2225          */
2226         if (!(val64 & ADAPTER_STATUS_P_PLL_LOCK) &&
2227                 sp->device_type == XFRAME_II_DEVICE && mode !=
2228                 PCI_MODE_PCI_33) {
2229                 DBG_PRINT(ERR_DBG, "%s", "P_PLL is not locked!");
2230                 return 0;
2231         }
2232         if (!((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
2233                         ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
2234                 DBG_PRINT(ERR_DBG, "%s", "RC_PRC is not QUIESCENT!");
2235                 return 0;
2236         }
2237         return 1;
2238 }
2239
2240 /**
2241  * fix_mac_address -  Fix for Mac addr problem on Alpha platforms
2242  * @sp: Pointer to device specifc structure
2243  * Description :
2244  * New procedure to clear mac address reading  problems on Alpha platforms
2245  *
2246  */
2247
2248 static void fix_mac_address(struct s2io_nic * sp)
2249 {
2250         struct XENA_dev_config __iomem *bar0 = sp->bar0;
2251         u64 val64;
2252         int i = 0;
2253
2254         while (fix_mac[i] != END_SIGN) {
2255                 writeq(fix_mac[i++], &bar0->gpio_control);
2256                 udelay(10);
2257                 val64 = readq(&bar0->gpio_control);
2258         }
2259 }
2260
2261 /**
2262  *  start_nic - Turns the device on
2263  *  @nic : device private variable.
2264  *  Description:
2265  *  This function actually turns the device on. Before this  function is
2266  *  called,all Registers are configured from their reset states
2267  *  and shared memory is allocated but the NIC is still quiescent. On
2268  *  calling this function, the device interrupts are cleared and the NIC is
2269  *  literally switched on by writing into the adapter control register.
2270  *  Return Value:
2271  *  SUCCESS on success and -1 on failure.
2272  */
2273
2274 static int start_nic(struct s2io_nic *nic)
2275 {
2276         struct XENA_dev_config __iomem *bar0 = nic->bar0;
2277         struct net_device *dev = nic->dev;
2278         register u64 val64 = 0;
2279         u16 subid, i;
2280         struct mac_info *mac_control;
2281         struct config_param *config;
2282
2283         mac_control = &nic->mac_control;
2284         config = &nic->config;
2285
2286         /*  PRC Initialization and configuration */
2287         for (i = 0; i < config->rx_ring_num; i++) {
2288                 writeq((u64) mac_control->rings[i].rx_blocks[0].block_dma_addr,
2289                        &bar0->prc_rxd0_n[i]);
2290
2291                 val64 = readq(&bar0->prc_ctrl_n[i]);
2292                 if (nic->rxd_mode == RXD_MODE_1)
2293                         val64 |= PRC_CTRL_RC_ENABLED;
2294                 else
2295                         val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
2296                 if (nic->device_type == XFRAME_II_DEVICE)
2297                         val64 |= PRC_CTRL_GROUP_READS;
2298                 val64 &= ~PRC_CTRL_RXD_BACKOFF_INTERVAL(0xFFFFFF);
2299                 val64 |= PRC_CTRL_RXD_BACKOFF_INTERVAL(0x1000);
2300                 writeq(val64, &bar0->prc_ctrl_n[i]);
2301         }
2302
2303         if (nic->rxd_mode == RXD_MODE_3B) {
2304                 /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
2305                 val64 = readq(&bar0->rx_pa_cfg);
2306                 val64 |= RX_PA_CFG_IGNORE_L2_ERR;
2307                 writeq(val64, &bar0->rx_pa_cfg);
2308         }
2309
2310         if (vlan_tag_strip == 0) {
2311                 val64 = readq(&bar0->rx_pa_cfg);
2312                 val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
2313                 writeq(val64, &bar0->rx_pa_cfg);
2314                 vlan_strip_flag = 0;
2315         }
2316
2317         /*
2318          * Enabling MC-RLDRAM. After enabling the device, we timeout
2319          * for around 100ms, which is approximately the time required
2320          * for the device to be ready for operation.
2321          */
2322         val64 = readq(&bar0->mc_rldram_mrs);
2323         val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
2324         SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
2325         val64 = readq(&bar0->mc_rldram_mrs);
2326
2327         msleep(100);    /* Delay by around 100 ms. */
2328
2329         /* Enabling ECC Protection. */
2330         val64 = readq(&bar0->adapter_control);
2331         val64 &= ~ADAPTER_ECC_EN;
2332         writeq(val64, &bar0->adapter_control);
2333
2334         /*
2335          * Verify if the device is ready to be enabled, if so enable
2336          * it.
2337          */
2338         val64 = readq(&bar0->adapter_status);
2339         if (!verify_xena_quiescence(nic)) {
2340                 DBG_PRINT(ERR_DBG, "%s: device is not ready, ", dev->name);
2341                 DBG_PRINT(ERR_DBG, "Adapter status reads: 0x%llx\n",
2342                           (unsigned long long) val64);
2343                 return FAILURE;
2344         }
2345
2346         /*
2347          * With some switches, link might be already up at this point.
2348          * Because of this weird behavior, when we enable laser,
2349          * we may not get link. We need to handle this. We cannot
2350          * figure out which switch is misbehaving. So we are forced to
2351          * make a global change.
2352          */
2353
2354         /* Enabling Laser. */
2355         val64 = readq(&bar0->adapter_control);
2356         val64 |= ADAPTER_EOI_TX_ON;
2357         writeq(val64, &bar0->adapter_control);
2358
2359         if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
2360                 /*
2361                  * Dont see link state interrupts initally on some switches,
2362                  * so directly scheduling the link state task here.
2363                  */
2364                 schedule_work(&nic->set_link_task);
2365         }
2366         /* SXE-002: Initialize link and activity LED */
2367         subid = nic->pdev->subsystem_device;
2368         if (((subid & 0xFF) >= 0x07) &&
2369             (nic->device_type == XFRAME_I_DEVICE)) {
2370                 val64 = readq(&bar0->gpio_control);
2371                 val64 |= 0x0000800000000000ULL;
2372                 writeq(val64, &bar0->gpio_control);
2373                 val64 = 0x0411040400000000ULL;
2374                 writeq(val64, (void __iomem *)bar0 + 0x2700);
2375         }
2376
2377         return SUCCESS;
2378 }
2379 /**
2380  * s2io_txdl_getskb - Get the skb from txdl, unmap and return skb
2381  */
2382 static struct sk_buff *s2io_txdl_getskb(struct fifo_info *fifo_data, struct \
2383                                         TxD *txdlp, int get_off)
2384 {
2385         struct s2io_nic *nic = fifo_data->nic;
2386         struct sk_buff *skb;
2387         struct TxD *txds;
2388         u16 j, frg_cnt;
2389
2390         txds = txdlp;
2391         if (txds->Host_Control == (u64)(long)fifo_data->ufo_in_band_v) {
2392                 pci_unmap_single(nic->pdev, (dma_addr_t)
2393                         txds->Buffer_Pointer, sizeof(u64),
2394                         PCI_DMA_TODEVICE);
2395                 txds++;
2396         }
2397
2398         skb = (struct sk_buff *) ((unsigned long)
2399                         txds->Host_Control);
2400         if (!skb) {
2401                 memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
2402                 return NULL;
2403         }
2404         pci_unmap_single(nic->pdev, (dma_addr_t)
2405                          txds->Buffer_Pointer,
2406                          skb->len - skb->data_len,
2407                          PCI_DMA_TODEVICE);
2408         frg_cnt = skb_shinfo(skb)->nr_frags;
2409         if (frg_cnt) {
2410                 txds++;
2411                 for (j = 0; j < frg_cnt; j++, txds++) {
2412                         skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
2413                         if (!txds->Buffer_Pointer)
2414                                 break;
2415                         pci_unmap_page(nic->pdev, (dma_addr_t)
2416                                         txds->Buffer_Pointer,
2417                                        frag->size, PCI_DMA_TODEVICE);
2418                 }
2419         }
2420         memset(txdlp,0, (sizeof(struct TxD) * fifo_data->max_txds));
2421         return(skb);
2422 }
2423
2424 /**
2425  *  free_tx_buffers - Free all queued Tx buffers
2426  *  @nic : device private variable.
2427  *  Description:
2428  *  Free all queued Tx buffers.
2429  *  Return Value: void
2430 */
2431
2432 static void free_tx_buffers(struct s2io_nic *nic)
2433 {
2434         struct net_device *dev = nic->dev;
2435         struct sk_buff *skb;
2436         struct TxD *txdp;
2437         int i, j;
2438         struct mac_info *mac_control;
2439         struct config_param *config;
2440         int cnt = 0;
2441
2442         mac_control = &nic->mac_control;
2443         config = &nic->config;
2444
2445         for (i = 0; i < config->tx_fifo_num; i++) {
2446                 unsigned long flags;
2447                 spin_lock_irqsave(&mac_control->fifos[i].tx_lock, flags);
2448                 for (j = 0; j < config->tx_cfg[i].fifo_len; j++) {
2449                         txdp = (struct TxD *) \
2450                         mac_control->fifos[i].list_info[j].list_virt_addr;
2451                         skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j);
2452                         if (skb) {
2453                                 nic->mac_control.stats_info->sw_stat.mem_freed
2454                                         += skb->truesize;
2455                                 dev_kfree_skb(skb);
2456                                 cnt++;
2457                         }
2458                 }
2459                 DBG_PRINT(INTR_DBG,
2460                           "%s:forcibly freeing %d skbs on FIFO%d\n",
2461                           dev->name, cnt, i);
2462                 mac_control->fifos[i].tx_curr_get_info.offset = 0;
2463                 mac_control->fifos[i].tx_curr_put_info.offset = 0;
2464                 spin_unlock_irqrestore(&mac_control->fifos[i].tx_lock, flags);
2465         }
2466 }
2467
2468 /**
2469  *   stop_nic -  To stop the nic
2470  *   @nic ; device private variable.
2471  *   Description:
2472  *   This function does exactly the opposite of what the start_nic()
2473  *   function does. This function is called to stop the device.
2474  *   Return Value:
2475  *   void.
2476  */
2477
2478 static void stop_nic(struct s2io_nic *nic)
2479 {
2480         struct XENA_dev_config __iomem *bar0 = nic->bar0;
2481         register u64 val64 = 0;
2482         u16 interruptible;
2483         struct mac_info *mac_control;
2484         struct config_param *config;
2485
2486         mac_control = &nic->mac_control;
2487         config = &nic->config;
2488
2489         /*  Disable all interrupts */
2490         en_dis_err_alarms(nic, ENA_ALL_INTRS, DISABLE_INTRS);
2491         interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
2492         interruptible |= TX_PIC_INTR;
2493         en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
2494
2495         /* Clearing Adapter_En bit of ADAPTER_CONTROL Register */
2496         val64 = readq(&bar0->adapter_control);
2497         val64 &= ~(ADAPTER_CNTL_EN);
2498         writeq(val64, &bar0->adapter_control);
2499 }
2500
2501 /**
2502  *  fill_rx_buffers - Allocates the Rx side skbs
2503  *  @ring_info: per ring structure
2504  *  @from_card_up: If this is true, we will map the buffer to get
2505  *     the dma address for buf0 and buf1 to give it to the card.
2506  *     Else we will sync the already mapped buffer to give it to the card.
2507  *  Description:
2508  *  The function allocates Rx side skbs and puts the physical
2509  *  address of these buffers into the RxD buffer pointers, so that the NIC
2510  *  can DMA the received frame into these locations.
2511  *  The NIC supports 3 receive modes, viz
2512  *  1. single buffer,
2513  *  2. three buffer and
2514  *  3. Five buffer modes.
2515  *  Each mode defines how many fragments the received frame will be split
2516  *  up into by the NIC. The frame is split into L3 header, L4 Header,
2517  *  L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
2518  *  is split into 3 fragments. As of now only single buffer mode is
2519  *  supported.
2520  *   Return Value:
2521  *  SUCCESS on success or an appropriate -ve value on failure.
2522  */
2523
2524 static int fill_rx_buffers(struct ring_info *ring, int from_card_up)
2525 {
2526         struct sk_buff *skb;
2527         struct RxD_t *rxdp;
2528         int off, size, block_no, block_no1;
2529         u32 alloc_tab = 0;
2530         u32 alloc_cnt;
2531         u64 tmp;
2532         struct buffAdd *ba;
2533         struct RxD_t *first_rxdp = NULL;
2534         u64 Buffer0_ptr = 0, Buffer1_ptr = 0;
2535         int rxd_index = 0;
2536         struct RxD1 *rxdp1;
2537         struct RxD3 *rxdp3;
2538         struct swStat *stats = &ring->nic->mac_control.stats_info->sw_stat;
2539
2540         alloc_cnt = ring->pkt_cnt - ring->rx_bufs_left;
2541
2542         block_no1 = ring->rx_curr_get_info.block_index;
2543         while (alloc_tab < alloc_cnt) {
2544                 block_no = ring->rx_curr_put_info.block_index;
2545
2546                 off = ring->rx_curr_put_info.offset;
2547
2548                 rxdp = ring->rx_blocks[block_no].rxds[off].virt_addr;
2549
2550                 rxd_index = off + 1;
2551                 if (block_no)
2552                         rxd_index += (block_no * ring->rxd_count);
2553
2554                 if ((block_no == block_no1) &&
2555                         (off == ring->rx_curr_get_info.offset) &&
2556                         (rxdp->Host_Control)) {
2557                         DBG_PRINT(INTR_DBG, "%s: Get and Put",
2558                                 ring->dev->name);
2559                         DBG_PRINT(INTR_DBG, " info equated\n");
2560                         goto end;
2561                 }
2562                 if (off && (off == ring->rxd_count)) {
2563                         ring->rx_curr_put_info.block_index++;
2564                         if (ring->rx_curr_put_info.block_index ==
2565                                                         ring->block_count)
2566                                 ring->rx_curr_put_info.block_index = 0;
2567                         block_no = ring->rx_curr_put_info.block_index;
2568                         off = 0;
2569                         ring->rx_curr_put_info.offset = off;
2570                         rxdp = ring->rx_blocks[block_no].block_virt_addr;
2571                         DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
2572                                   ring->dev->name, rxdp);
2573
2574                 }
2575
2576                 if ((rxdp->Control_1 & RXD_OWN_XENA) &&
2577                         ((ring->rxd_mode == RXD_MODE_3B) &&
2578                                 (rxdp->Control_2 & s2BIT(0)))) {
2579                         ring->rx_curr_put_info.offset = off;
2580                         goto end;
2581                 }
2582                 /* calculate size of skb based on ring mode */
2583                 size = ring->mtu + HEADER_ETHERNET_II_802_3_SIZE +
2584                                 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
2585                 if (ring->rxd_mode == RXD_MODE_1)
2586                         size += NET_IP_ALIGN;
2587                 else
2588                         size = ring->mtu + ALIGN_SIZE + BUF0_LEN + 4;
2589
2590                 /* allocate skb */
2591                 skb = dev_alloc_skb(size);
2592                 if(!skb) {
2593                         DBG_PRINT(INFO_DBG, "%s: Out of ", ring->dev->name);
2594                         DBG_PRINT(INFO_DBG, "memory to allocate SKBs\n");
2595                         if (first_rxdp) {
2596                                 wmb();
2597                                 first_rxdp->Control_1 |= RXD_OWN_XENA;
2598                         }
2599                         stats->mem_alloc_fail_cnt++;
2600
2601                         return -ENOMEM ;
2602                 }
2603                 stats->mem_allocated += skb->truesize;
2604
2605                 if (ring->rxd_mode == RXD_MODE_1) {
2606                         /* 1 buffer mode - normal operation mode */
2607                         rxdp1 = (struct RxD1*)rxdp;
2608                         memset(rxdp, 0, sizeof(struct RxD1));
2609                         skb_reserve(skb, NET_IP_ALIGN);
2610                         rxdp1->Buffer0_ptr = pci_map_single
2611                             (ring->pdev, skb->data, size - NET_IP_ALIGN,
2612                                 PCI_DMA_FROMDEVICE);
2613                         if(pci_dma_mapping_error(rxdp1->Buffer0_ptr))
2614                                 goto pci_map_failed;
2615
2616                         rxdp->Control_2 =
2617                                 SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
2618                         rxdp->Host_Control = (unsigned long) (skb);
2619                 } else if (ring->rxd_mode == RXD_MODE_3B) {
2620                         /*
2621                          * 2 buffer mode -
2622                          * 2 buffer mode provides 128
2623                          * byte aligned receive buffers.
2624                          */
2625
2626                         rxdp3 = (struct RxD3*)rxdp;
2627                         /* save buffer pointers to avoid frequent dma mapping */
2628                         Buffer0_ptr = rxdp3->Buffer0_ptr;
2629                         Buffer1_ptr = rxdp3->Buffer1_ptr;
2630                         memset(rxdp, 0, sizeof(struct RxD3));
2631                         /* restore the buffer pointers for dma sync*/
2632                         rxdp3->Buffer0_ptr = Buffer0_ptr;
2633                         rxdp3->Buffer1_ptr = Buffer1_ptr;
2634
2635                         ba = &ring->ba[block_no][off];
2636                         skb_reserve(skb, BUF0_LEN);
2637                         tmp = (u64)(unsigned long) skb->data;
2638                         tmp += ALIGN_SIZE;
2639                         tmp &= ~ALIGN_SIZE;
2640                         skb->data = (void *) (unsigned long)tmp;
2641                         skb_reset_tail_pointer(skb);
2642
2643                         if (from_card_up) {
2644                                 rxdp3->Buffer0_ptr =
2645                                    pci_map_single(ring->pdev, ba->ba_0,
2646                                         BUF0_LEN, PCI_DMA_FROMDEVICE);
2647                                 if (pci_dma_mapping_error(rxdp3->Buffer0_ptr))
2648                                         goto pci_map_failed;
2649                         } else
2650                                 pci_dma_sync_single_for_device(ring->pdev,
2651                                 (dma_addr_t) rxdp3->Buffer0_ptr,
2652                                     BUF0_LEN, PCI_DMA_FROMDEVICE);
2653
2654                         rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
2655                         if (ring->rxd_mode == RXD_MODE_3B) {
2656                                 /* Two buffer mode */
2657
2658                                 /*
2659                                  * Buffer2 will have L3/L4 header plus
2660                                  * L4 payload
2661                                  */
2662                                 rxdp3->Buffer2_ptr = pci_map_single
2663                                 (ring->pdev, skb->data, ring->mtu + 4,
2664                                                 PCI_DMA_FROMDEVICE);
2665
2666                                 if (pci_dma_mapping_error(rxdp3->Buffer2_ptr))
2667                                         goto pci_map_failed;
2668
2669                                 if (from_card_up) {
2670                                         rxdp3->Buffer1_ptr =
2671                                                 pci_map_single(ring->pdev,
2672                                                 ba->ba_1, BUF1_LEN,
2673                                                 PCI_DMA_FROMDEVICE);
2674
2675                                         if (pci_dma_mapping_error
2676                                                 (rxdp3->Buffer1_ptr)) {
2677                                                 pci_unmap_single
2678                                                         (ring->pdev,
2679                                                     (dma_addr_t)(unsigned long)
2680                                                         skb->data,
2681                                                         ring->mtu + 4,
2682                                                         PCI_DMA_FROMDEVICE);
2683                                                 goto pci_map_failed;
2684                                         }
2685                                 }
2686                                 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
2687                                 rxdp->Control_2 |= SET_BUFFER2_SIZE_3
2688                                                                 (ring->mtu + 4);
2689                         }
2690                         rxdp->Control_2 |= s2BIT(0);
2691                         rxdp->Host_Control = (unsigned long) (skb);
2692                 }
2693                 if (alloc_tab & ((1 << rxsync_frequency) - 1))
2694                         rxdp->Control_1 |= RXD_OWN_XENA;
2695                 off++;
2696                 if (off == (ring->rxd_count + 1))
2697                         off = 0;
2698                 ring->rx_curr_put_info.offset = off;
2699
2700                 rxdp->Control_2 |= SET_RXD_MARKER;
2701                 if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
2702                         if (first_rxdp) {
2703                                 wmb();
2704                                 first_rxdp->Control_1 |= RXD_OWN_XENA;
2705                         }
2706                         first_rxdp = rxdp;
2707                 }
2708                 ring->rx_bufs_left += 1;
2709                 alloc_tab++;
2710         }
2711
2712       end:
2713         /* Transfer ownership of first descriptor to adapter just before
2714          * exiting. Before that, use memory barrier so that ownership
2715          * and other fields are seen by adapter correctly.
2716          */
2717         if (first_rxdp) {
2718                 wmb();
2719                 first_rxdp->Control_1 |= RXD_OWN_XENA;
2720         }
2721
2722         return SUCCESS;
2723 pci_map_failed:
2724         stats->pci_map_fail_cnt++;
2725         stats->mem_freed += skb->truesize;
2726         dev_kfree_skb_irq(skb);
2727         return -ENOMEM;
2728 }
2729
2730 static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
2731 {
2732         struct net_device *dev = sp->dev;
2733         int j;
2734         struct sk_buff *skb;
2735         struct RxD_t *rxdp;
2736         struct mac_info *mac_control;
2737         struct buffAdd *ba;
2738         struct RxD1 *rxdp1;
2739         struct RxD3 *rxdp3;
2740
2741         mac_control = &sp->mac_control;
2742         for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) {
2743                 rxdp = mac_control->rings[ring_no].
2744                                 rx_blocks[blk].rxds[j].virt_addr;
2745                 skb = (struct sk_buff *)
2746                         ((unsigned long) rxdp->Host_Control);
2747                 if (!skb) {
2748                         continue;
2749                 }
2750                 if (sp->rxd_mode == RXD_MODE_1) {
2751                         rxdp1 = (struct RxD1*)rxdp;
2752                         pci_unmap_single(sp->pdev, (dma_addr_t)
2753                                 rxdp1->Buffer0_ptr,
2754                                 dev->mtu +
2755                                 HEADER_ETHERNET_II_802_3_SIZE
2756                                 + HEADER_802_2_SIZE +
2757                                 HEADER_SNAP_SIZE,
2758                                 PCI_DMA_FROMDEVICE);
2759                         memset(rxdp, 0, sizeof(struct RxD1));
2760                 } else if(sp->rxd_mode == RXD_MODE_3B) {
2761                         rxdp3 = (struct RxD3*)rxdp;
2762                         ba = &mac_control->rings[ring_no].
2763                                 ba[blk][j];
2764                         pci_unmap_single(sp->pdev, (dma_addr_t)
2765                                 rxdp3->Buffer0_ptr,
2766                                 BUF0_LEN,
2767                                 PCI_DMA_FROMDEVICE);
2768                         pci_unmap_single(sp->pdev, (dma_addr_t)
2769                                 rxdp3->Buffer1_ptr,
2770                                 BUF1_LEN,
2771                                 PCI_DMA_FROMDEVICE);
2772                         pci_unmap_single(sp->pdev, (dma_addr_t)
2773                                 rxdp3->Buffer2_ptr,
2774                                 dev->mtu + 4,
2775                                 PCI_DMA_FROMDEVICE);
2776                         memset(rxdp, 0, sizeof(struct RxD3));
2777                 }
2778                 sp->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
2779                 dev_kfree_skb(skb);
2780                 mac_control->rings[ring_no].rx_bufs_left -= 1;
2781         }
2782 }
2783
2784 /**
2785  *  free_rx_buffers - Frees all Rx buffers
2786  *  @sp: device private variable.
2787  *  Description:
2788  *  This function will free all Rx buffers allocated by host.
2789  *  Return Value:
2790  *  NONE.
2791  */
2792
2793 static void free_rx_buffers(struct s2io_nic *sp)
2794 {
2795         struct net_device *dev = sp->dev;
2796         int i, blk = 0, buf_cnt = 0;
2797         struct mac_info *mac_control;
2798         struct config_param *config;
2799
2800         mac_control = &sp->mac_control;
2801         config = &sp->config;
2802
2803         for (i = 0; i < config->rx_ring_num; i++) {
2804                 for (blk = 0; blk < rx_ring_sz[i]; blk++)
2805                         free_rxd_blk(sp,i,blk);
2806
2807                 mac_control->rings[i].rx_curr_put_info.block_index = 0;
2808                 mac_control->rings[i].rx_curr_get_info.block_index = 0;
2809                 mac_control->rings[i].rx_curr_put_info.offset = 0;
2810                 mac_control->rings[i].rx_curr_get_info.offset = 0;
2811                 mac_control->rings[i].rx_bufs_left = 0;
2812                 DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n",
2813                           dev->name, buf_cnt, i);
2814         }
2815 }
2816
2817 static int s2io_chk_rx_buffers(struct ring_info *ring)
2818 {
2819         if (fill_rx_buffers(ring, 0) == -ENOMEM) {
2820                 DBG_PRINT(INFO_DBG, "%s:Out of memory", ring->dev->name);
2821                 DBG_PRINT(INFO_DBG, " in Rx Intr!!\n");
2822         }
2823         return 0;
2824 }
2825
2826 /**
2827  * s2io_poll - Rx interrupt handler for NAPI support
2828  * @napi : pointer to the napi structure.
2829  * @budget : The number of packets that were budgeted to be processed
2830  * during  one pass through the 'Poll" function.
2831  * Description:
2832  * Comes into picture only if NAPI support has been incorporated. It does
2833  * the same thing that rx_intr_handler does, but not in a interrupt context
2834  * also It will process only a given number of packets.
2835  * Return value:
2836  * 0 on success and 1 if there are No Rx packets to be processed.
2837  */
2838
2839 static int s2io_poll_msix(struct napi_struct *napi, int budget)
2840 {
2841         struct ring_info *ring = container_of(napi, struct ring_info, napi);
2842         struct net_device *dev = ring->dev;
2843         struct config_param *config;
2844         struct mac_info *mac_control;
2845         int pkts_processed = 0;
2846         u8 __iomem *addr = NULL;
2847         u8 val8 = 0;
2848         struct s2io_nic *nic = dev->priv;
2849         struct XENA_dev_config __iomem *bar0 = nic->bar0;
2850         int budget_org = budget;
2851
2852         config = &nic->config;
2853         mac_control = &nic->mac_control;
2854
2855         if (unlikely(!is_s2io_card_up(nic)))
2856                 return 0;
2857
2858         pkts_processed = rx_intr_handler(ring, budget);
2859         s2io_chk_rx_buffers(ring);
2860
2861         if (pkts_processed < budget_org) {
2862                 netif_rx_complete(dev, napi);
2863                 /*Re Enable MSI-Rx Vector*/
2864                 addr = (u8 __iomem *)&bar0->xmsi_mask_reg;
2865                 addr += 7 - ring->ring_no;
2866                 val8 = (ring->ring_no == 0) ? 0x3f : 0xbf;
2867                 writeb(val8, addr);
2868                 val8 = readb(addr);
2869         }
2870         return pkts_processed;
2871 }
2872 static int s2io_poll_inta(struct napi_struct *napi, int budget)
2873 {
2874         struct s2io_nic *nic = container_of(napi, struct s2io_nic, napi);
2875         struct ring_info *ring;
2876         struct net_device *dev = nic->dev;
2877         struct config_param *config;
2878         struct mac_info *mac_control;
2879         int pkts_processed = 0;
2880         int ring_pkts_processed, i;
2881         struct XENA_dev_config __iomem *bar0 = nic->bar0;
2882         int budget_org = budget;
2883
2884         config = &nic->config;
2885         mac_control = &nic->mac_control;
2886
2887         if (unlikely(!is_s2io_card_up(nic)))
2888                 return 0;
2889
2890         for (i = 0; i < config->rx_ring_num; i++) {
2891                 ring = &mac_control->rings[i];
2892                 ring_pkts_processed = rx_intr_handler(ring, budget);
2893                 s2io_chk_rx_buffers(ring);
2894                 pkts_processed += ring_pkts_processed;
2895                 budget -= ring_pkts_processed;
2896                 if (budget <= 0)
2897                         break;
2898         }
2899         if (pkts_processed < budget_org) {
2900                 netif_rx_complete(dev, napi);
2901                 /* Re enable the Rx interrupts for the ring */
2902                 writeq(0, &bar0->rx_traffic_mask);
2903                 readl(&bar0->rx_traffic_mask);
2904         }
2905         return pkts_processed;
2906 }
2907
2908 #ifdef CONFIG_NET_POLL_CONTROLLER
2909 /**
2910  * s2io_netpoll - netpoll event handler entry point
2911  * @dev : pointer to the device structure.
2912  * Description:
2913  *      This function will be called by upper layer to check for events on the
2914  * interface in situations where interrupts are disabled. It is used for
2915  * specific in-kernel networking tasks, such as remote consoles and kernel
2916  * debugging over the network (example netdump in RedHat).
2917  */
2918 static void s2io_netpoll(struct net_device *dev)
2919 {
2920         struct s2io_nic *nic = dev->priv;
2921         struct mac_info *mac_control;
2922         struct config_param *config;
2923         struct XENA_dev_config __iomem *bar0 = nic->bar0;
2924         u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
2925         int i;
2926
2927         if (pci_channel_offline(nic->pdev))
2928                 return;
2929
2930         disable_irq(dev->irq);
2931
2932         mac_control = &nic->mac_control;
2933         config = &nic->config;
2934
2935         writeq(val64, &bar0->rx_traffic_int);
2936         writeq(val64, &bar0->tx_traffic_int);
2937
2938         /* we need to free up the transmitted skbufs or else netpoll will
2939          * run out of skbs and will fail and eventually netpoll application such
2940          * as netdump will fail.
2941          */
2942         for (i = 0; i < config->tx_fifo_num; i++)
2943                 tx_intr_handler(&mac_control->fifos[i]);
2944
2945         /* check for received packet and indicate up to network */
2946         for (i = 0; i < config->rx_ring_num; i++)
2947                 rx_intr_handler(&mac_control->rings[i], 0);
2948
2949         for (i = 0; i < config->rx_ring_num; i++) {
2950                 if (fill_rx_buffers(&mac_control->rings[i], 0) == -ENOMEM) {
2951                         DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
2952                         DBG_PRINT(INFO_DBG, " in Rx Netpoll!!\n");
2953                         break;
2954                 }
2955         }
2956         enable_irq(dev->irq);
2957         return;
2958 }
2959 #endif
2960
2961 /**
2962  *  rx_intr_handler - Rx interrupt handler
2963  *  @ring_info: per ring structure.
2964  *  @budget: budget for napi processing.
2965  *  Description:
2966  *  If the interrupt is because of a received frame or if the
2967  *  receive ring contains fresh as yet un-processed frames,this function is
2968  *  called. It picks out the RxD at which place the last Rx processing had
2969  *  stopped and sends the skb to the OSM's Rx handler and then increments
2970  *  the offset.
2971  *  Return Value:
2972  *  No. of napi packets processed.
2973  */
2974 static int rx_intr_handler(struct ring_info *ring_data, int budget)
2975 {
2976         int get_block, put_block;
2977         struct rx_curr_get_info get_info, put_info;
2978         struct RxD_t *rxdp;
2979         struct sk_buff *skb;
2980         int pkt_cnt = 0, napi_pkts = 0;
2981         int i;
2982         struct RxD1* rxdp1;
2983         struct RxD3* rxdp3;
2984
2985         get_info = ring_data->rx_curr_get_info;
2986         get_block = get_info.block_index;
2987         memcpy(&put_info, &ring_data->rx_curr_put_info, sizeof(put_info));
2988         put_block = put_info.block_index;
2989         rxdp = ring_data->rx_blocks[get_block].rxds[get_info.offset].virt_addr;
2990
2991         while (RXD_IS_UP2DT(rxdp)) {
2992                 /*
2993                  * If your are next to put index then it's
2994                  * FIFO full condition
2995                  */
2996                 if ((get_block == put_block) &&
2997                     (get_info.offset + 1) == put_info.offset) {
2998                         DBG_PRINT(INTR_DBG, "%s: Ring Full\n",
2999                                 ring_data->dev->name);
3000                         break;
3001                 }
3002                 skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control);
3003                 if (skb == NULL) {
3004                         DBG_PRINT(ERR_DBG, "%s: The skb is ",
3005                                   ring_data->dev->name);
3006                         DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
3007                         return 0;
3008                 }
3009                 if (ring_data->rxd_mode == RXD_MODE_1) {
3010                         rxdp1 = (struct RxD1*)rxdp;
3011                         pci_unmap_single(ring_data->pdev, (dma_addr_t)
3012                                 rxdp1->Buffer0_ptr,
3013                                 ring_data->mtu +
3014                                 HEADER_ETHERNET_II_802_3_SIZE +
3015                                 HEADER_802_2_SIZE +
3016                                 HEADER_SNAP_SIZE,
3017                                 PCI_DMA_FROMDEVICE);
3018                 } else if (ring_data->rxd_mode == RXD_MODE_3B) {
3019                         rxdp3 = (struct RxD3*)rxdp;
3020                         pci_dma_sync_single_for_cpu(ring_data->pdev, (dma_addr_t)
3021                                 rxdp3->Buffer0_ptr,
3022                                 BUF0_LEN, PCI_DMA_FROMDEVICE);
3023                         pci_unmap_single(ring_data->pdev, (dma_addr_t)
3024                                 rxdp3->Buffer2_ptr,
3025                                 ring_data->mtu + 4,
3026                                 PCI_DMA_FROMDEVICE);
3027                 }
3028                 prefetch(skb->data);
3029                 rx_osm_handler(ring_data, rxdp);
3030                 get_info.offset++;
3031                 ring_data->rx_curr_get_info.offset = get_info.offset;
3032                 rxdp = ring_data->rx_blocks[get_block].
3033                                 rxds[get_info.offset].virt_addr;
3034                 if (get_info.offset == rxd_count[ring_data->rxd_mode]) {
3035                         get_info.offset = 0;
3036                         ring_data->rx_curr_get_info.offset = get_info.offset;
3037                         get_block++;
3038                         if (get_block == ring_data->block_count)
3039                                 get_block = 0;
3040                         ring_data->rx_curr_get_info.block_index = get_block;
3041                         rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
3042                 }
3043
3044                 if (ring_data->nic->config.napi) {
3045                         budget--;
3046                         napi_pkts++;
3047                         if (!budget)
3048                                 break;
3049                 }
3050                 pkt_cnt++;
3051                 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
3052                         break;
3053         }
3054         if (ring_data->lro) {
3055                 /* Clear all LRO sessions before exiting */
3056                 for (i=0; i<MAX_LRO_SESSIONS; i++) {
3057                         struct lro *lro = &ring_data->lro0_n[i];
3058                         if (lro->in_use) {
3059                                 update_L3L4_header(ring_data->nic, lro);
3060                                 queue_rx_frame(lro->parent, lro->vlan_tag);
3061                                 clear_lro_session(lro);
3062                         }
3063                 }
3064         }
3065         return(napi_pkts);
3066 }
3067
3068 /**
3069  *  tx_intr_handler - Transmit interrupt handler
3070  *  @nic : device private variable
3071  *  Description:
3072  *  If an interrupt was raised to indicate DMA complete of the
3073  *  Tx packet, this function is called. It identifies the last TxD
3074  *  whose buffer was freed and frees all skbs whose data have already
3075  *  DMA'ed into the NICs internal memory.
3076  *  Return Value:
3077  *  NONE
3078  */
3079
3080 static void tx_intr_handler(struct fifo_info *fifo_data)
3081 {
3082         struct s2io_nic *nic = fifo_data->nic;
3083         struct tx_curr_get_info get_info, put_info;
3084         struct sk_buff *skb = NULL;
3085         struct TxD *txdlp;
3086         int pkt_cnt = 0;
3087         unsigned long flags = 0;
3088         u8 err_mask;
3089
3090         if (!spin_trylock_irqsave(&fifo_data->tx_lock, flags))
3091                         return;
3092
3093         get_info = fifo_data->tx_curr_get_info;
3094         memcpy(&put_info, &fifo_data->tx_curr_put_info, sizeof(put_info));
3095         txdlp = (struct TxD *) fifo_data->list_info[get_info.offset].
3096             list_virt_addr;
3097         while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
3098                (get_info.offset != put_info.offset) &&
3099                (txdlp->Host_Control)) {
3100                 /* Check for TxD errors */
3101                 if (txdlp->Control_1 & TXD_T_CODE) {
3102                         unsigned long long err;
3103                         err = txdlp->Control_1 & TXD_T_CODE;
3104                         if (err & 0x1) {
3105                                 nic->mac_control.stats_info->sw_stat.
3106                                                 parity_err_cnt++;
3107                         }
3108
3109                         /* update t_code statistics */
3110                         err_mask = err >> 48;
3111                         switch(err_mask) {
3112                                 case 2:
3113                                         nic->mac_control.stats_info->sw_stat.
3114                                                         tx_buf_abort_cnt++;
3115                                 break;
3116
3117                                 case 3:
3118                                         nic->mac_control.stats_info->sw_stat.
3119                                                         tx_desc_abort_cnt++;
3120                                 break;
3121
3122                                 case 7:
3123                                         nic->mac_control.stats_info->sw_stat.
3124                                                         tx_parity_err_cnt++;
3125                                 break;
3126
3127                                 case 10:
3128                                         nic->mac_control.stats_info->sw_stat.
3129                                                         tx_link_loss_cnt++;
3130                                 break;
3131
3132                                 case 15:
3133                                         nic->mac_control.stats_info->sw_stat.
3134                                                         tx_list_proc_err_cnt++;
3135                                 break;
3136                         }
3137                 }
3138
3139                 skb = s2io_txdl_getskb(fifo_data, txdlp, get_info.offset);
3140                 if (skb == NULL) {
3141                         spin_unlock_irqrestore(&fifo_data->tx_lock, flags);
3142                         DBG_PRINT(ERR_DBG, "%s: Null skb ",
3143                         __FUNCTION__);
3144                         DBG_PRINT(ERR_DBG, "in Tx Free Intr\n");
3145                         return;
3146                 }
3147                 pkt_cnt++;
3148
3149                 /* Updating the statistics block */
3150                 nic->stats.tx_bytes += skb->len;
3151                 nic->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
3152                 dev_kfree_skb_irq(skb);
3153
3154                 get_info.offset++;
3155                 if (get_info.offset == get_info.fifo_len + 1)
3156                         get_info.offset = 0;
3157                 txdlp = (struct TxD *) fifo_data->list_info
3158                     [get_info.offset].list_virt_addr;
3159                 fifo_data->tx_curr_get_info.offset =
3160                     get_info.offset;
3161         }
3162
3163         s2io_wake_tx_queue(fifo_data, pkt_cnt, nic->config.multiq);
3164
3165         spin_unlock_irqrestore(&fifo_data->tx_lock, flags);
3166 }
3167
3168 /**
3169  *  s2io_mdio_write - Function to write in to MDIO registers
3170  *  @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3171  *  @addr     : address value
3172  *  @value    : data value
3173  *  @dev      : pointer to net_device structure
3174  *  Description:
3175  *  This function is used to write values to the MDIO registers
3176  *  NONE
3177  */
3178 static void s2io_mdio_write(u32 mmd_type, u64 addr, u16 value, struct net_device *dev)
3179 {
3180         u64 val64 = 0x0;
3181         struct s2io_nic *sp = dev->priv;
3182         struct XENA_dev_config __iomem *bar0 = sp->bar0;
3183
3184         //address transaction
3185         val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3186                         | MDIO_MMD_DEV_ADDR(mmd_type)
3187                         | MDIO_MMS_PRT_ADDR(0x0);
3188         writeq(val64, &bar0->mdio_control);
3189         val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3190         writeq(val64, &bar0->mdio_control);
3191         udelay(100);
3192
3193         //Data transaction
3194         val64 = 0x0;
3195         val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3196                         | MDIO_MMD_DEV_ADDR(mmd_type)
3197                         | MDIO_MMS_PRT_ADDR(0x0)
3198                         | MDIO_MDIO_DATA(value)
3199                         | MDIO_OP(MDIO_OP_WRITE_TRANS);
3200         writeq(val64, &bar0->mdio_control);
3201         val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3202         writeq(val64, &bar0->mdio_control);
3203         udelay(100);
3204
3205         val64 = 0x0;
3206         val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3207         | MDIO_MMD_DEV_ADDR(mmd_type)
3208         | MDIO_MMS_PRT_ADDR(0x0)
3209         | MDIO_OP(MDIO_OP_READ_TRANS);
3210         writeq(val64, &bar0->mdio_control);
3211         val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3212         writeq(val64, &bar0->mdio_control);
3213         udelay(100);
3214
3215 }
3216
3217 /**
3218  *  s2io_mdio_read - Function to write in to MDIO registers
3219  *  @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3220  *  @addr     : address value
3221  *  @dev      : pointer to net_device structure
3222  *  Description:
3223  *  This function is used to read values to the MDIO registers
3224  *  NONE
3225  */
3226 static u64 s2io_mdio_read(u32 mmd_type, u64 addr, struct net_device *dev)
3227 {
3228         u64 val64 = 0x0;
3229         u64 rval64 = 0x0;
3230         struct s2io_nic *sp = dev->priv;
3231         struct XENA_dev_config __iomem *bar0 = sp->bar0;
3232
3233         /* address transaction */
3234         val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3235                         | MDIO_MMD_DEV_ADDR(mmd_type)
3236                         | MDIO_MMS_PRT_ADDR(0x0);
3237         writeq(val64, &bar0->mdio_control);
3238         val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3239         writeq(val64, &bar0->mdio_control);
3240         udelay(100);
3241
3242         /* Data transaction */
3243         val64 = 0x0;
3244         val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3245                         | MDIO_MMD_DEV_ADDR(mmd_type)
3246                         | MDIO_MMS_PRT_ADDR(0x0)
3247                         | MDIO_OP(MDIO_OP_READ_TRANS);
3248         writeq(val64, &bar0->mdio_control);
3249         val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3250         writeq(val64, &bar0->mdio_control);
3251         udelay(100);
3252
3253         /* Read the value from regs */
3254         rval64 = readq(&bar0->mdio_control);
3255         rval64 = rval64 & 0xFFFF0000;
3256         rval64 = rval64 >> 16;
3257         return rval64;
3258 }
3259 /**
3260  *  s2io_chk_xpak_counter - Function to check the status of the xpak counters
3261  *  @counter      : couter value to be updated
3262  *  @flag         : flag to indicate the status
3263  *  @type         : counter type
3264  *  Description:
3265  *  This function is to check the status of the xpak counters value
3266  *  NONE
3267  */
3268
3269 static void s2io_chk_xpak_counter(u64 *counter, u64 * regs_stat, u32 index, u16 flag, u16 type)
3270 {
3271         u64 mask = 0x3;
3272         u64 val64;
3273         int i;
3274         for(i = 0; i <index; i++)
3275                 mask = mask << 0x2;
3276
3277         if(flag > 0)
3278         {
3279                 *counter = *counter + 1;
3280                 val64 = *regs_stat & mask;
3281                 val64 = val64 >> (index * 0x2);
3282                 val64 = val64 + 1;
3283                 if(val64 == 3)
3284                 {
3285                         switch(type)
3286                         {
3287                         case 1:
3288                                 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3289                                           "service. Excessive temperatures may "
3290                                           "result in premature transceiver "
3291                                           "failure \n");
3292                         break;
3293                         case 2:
3294                                 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3295                                           "service Excessive bias currents may "
3296                                           "indicate imminent laser diode "
3297                                           "failure \n");
3298                         break;
3299                         case 3:
3300                                 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3301                                           "service Excessive laser output "
3302                                           "power may saturate far-end "
3303                                           "receiver\n");
3304                         break;
3305                         default:
3306                                 DBG_PRINT(ERR_DBG, "Incorrect XPAK Alarm "
3307                                           "type \n");
3308                         }
3309                         val64 = 0x0;
3310                 }
3311                 val64 = val64 << (index * 0x2);
3312                 *regs_stat = (*regs_stat & (~mask)) | (val64);
3313
3314         } else {
3315                 *regs_stat = *regs_stat & (~mask);
3316         }
3317 }
3318
3319 /**
3320  *  s2io_updt_xpak_counter - Function to update the xpak counters
3321  *  @dev         : pointer to net_device struct
3322  *  Description:
3323  *  This function is to upate the status of the xpak counters value
3324  *  NONE
3325  */
3326 static void s2io_updt_xpak_counter(struct net_device *dev)
3327 {
3328         u16 flag  = 0x0;
3329         u16 type  = 0x0;
3330         u16 val16 = 0x0;
3331         u64 val64 = 0x0;
3332         u64 addr  = 0x0;
3333
3334         struct s2io_nic *sp = dev->priv;
3335         struct stat_block *stat_info = sp->mac_control.stats_info;
3336
3337         /* Check the communication with the MDIO slave */
3338         addr = 0x0000;
3339         val64 = 0x0;
3340         val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3341         if((val64 == 0xFFFF) || (val64 == 0x0000))
3342         {
3343                 DBG_PRINT(ERR_DBG, "ERR: MDIO slave access failed - "
3344                           "Returned %llx\n", (unsigned long long)val64);
3345                 return;
3346         }
3347
3348         /* Check for the expecte value of 2040 at PMA address 0x0000 */
3349         if(val64 != 0x2040)
3350         {
3351                 DBG_PRINT(ERR_DBG, "Incorrect value at PMA address 0x0000 - ");
3352                 DBG_PRINT(ERR_DBG, "Returned: %llx- Expected: 0x2040\n",
3353                           (unsigned long long)val64);
3354                 return;
3355         }
3356
3357         /* Loading the DOM register to MDIO register */
3358         addr = 0xA100;
3359         s2io_mdio_write(MDIO_MMD_PMA_DEV_ADDR, addr, val16, dev);
3360         val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3361
3362         /* Reading the Alarm flags */
3363         addr = 0xA070;
3364         val64 = 0x0;
3365         val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3366
3367         flag = CHECKBIT(val64, 0x7);
3368         type = 1;
3369         s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_transceiver_temp_high,
3370                                 &stat_info->xpak_stat.xpak_regs_stat,
3371                                 0x0, flag, type);
3372
3373         if(CHECKBIT(val64, 0x6))
3374                 stat_info->xpak_stat.alarm_transceiver_temp_low++;
3375
3376         flag = CHECKBIT(val64, 0x3);
3377         type = 2;
3378         s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_bias_current_high,
3379                                 &stat_info->xpak_stat.xpak_regs_stat,
3380                                 0x2, flag, type);
3381
3382         if(CHECKBIT(val64, 0x2))
3383                 stat_info->xpak_stat.alarm_laser_bias_current_low++;
3384
3385         flag = CHECKBIT(val64, 0x1);
3386         type = 3;
3387         s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_output_power_high,
3388                                 &stat_info->xpak_stat.xpak_regs_stat,
3389                                 0x4, flag, type);
3390
3391         if(CHECKBIT(val64, 0x0))
3392                 stat_info->xpak_stat.alarm_laser_output_power_low++;
3393
3394         /* Reading the Warning flags */
3395         addr = 0xA074;
3396         val64 = 0x0;
3397         val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3398
3399         if(CHECKBIT(val64, 0x7))
3400                 stat_info->xpak_stat.warn_transceiver_temp_high++;
3401
3402         if(CHECKBIT(val64, 0x6))
3403                 stat_info->xpak_stat.warn_transceiver_temp_low++;
3404
3405         if(CHECKBIT(val64, 0x3))
3406                 stat_info->xpak_stat.warn_laser_bias_current_high++;
3407
3408         if(CHECKBIT(val64, 0x2))
3409                 stat_info->xpak_stat.warn_laser_bias_current_low++;
3410
3411         if(CHECKBIT(val64, 0x1))
3412                 stat_info->xpak_stat.warn_laser_output_power_high++;
3413
3414         if(CHECKBIT(val64, 0x0))
3415                 stat_info->xpak_stat.warn_laser_output_power_low++;
3416 }
3417
3418 /**
3419  *  wait_for_cmd_complete - waits for a command to complete.
3420  *  @sp : private member of the device structure, which is a pointer to the
3421  *  s2io_nic structure.
3422  *  Description: Function that waits for a command to Write into RMAC
3423  *  ADDR DATA registers to be completed and returns either success or
3424  *  error depending on whether the command was complete or not.
3425  *  Return value:
3426  *   SUCCESS on success and FAILURE on failure.
3427  */
3428
3429 static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit,
3430                                 int bit_state)
3431 {
3432         int ret = FAILURE, cnt = 0, delay = 1;
3433         u64 val64;
3434
3435         if ((bit_state != S2IO_BIT_RESET) && (bit_state != S2IO_BIT_SET))
3436                 return FAILURE;
3437
3438         do {
3439                 val64 = readq(addr);
3440                 if (bit_state == S2IO_BIT_RESET) {
3441                         if (!(val64 & busy_bit)) {
3442                                 ret = SUCCESS;
3443                                 break;
3444                         }
3445                 } else {
3446                         if (!(val64 & busy_bit)) {
3447                                 ret = SUCCESS;
3448                                 break;
3449                         }
3450                 }
3451
3452                 if(in_interrupt())
3453                         mdelay(delay);
3454                 else
3455                         msleep(delay);
3456
3457                 if (++cnt >= 10)
3458                         delay = 50;
3459         } while (cnt < 20);
3460         return ret;
3461 }
3462 /*
3463  * check_pci_device_id - Checks if the device id is supported
3464  * @id : device id
3465  * Description: Function to check if the pci device id is supported by driver.
3466  * Return value: Actual device id if supported else PCI_ANY_ID
3467  */
3468 static u16 check_pci_device_id(u16 id)
3469 {
3470         switch (id) {
3471         case PCI_DEVICE_ID_HERC_WIN:
3472         case PCI_DEVICE_ID_HERC_UNI:
3473                 return XFRAME_II_DEVICE;
3474         case PCI_DEVICE_ID_S2IO_UNI:
3475         case PCI_DEVICE_ID_S2IO_WIN:
3476                 return XFRAME_I_DEVICE;
3477         default:
3478                 return PCI_ANY_ID;
3479         }
3480 }
3481
3482 /**
3483  *  s2io_reset - Resets the card.
3484  *  @sp : private member of the device structure.
3485  *  Description: Function to Reset the card. This function then also
3486  *  restores the previously saved PCI configuration space registers as
3487  *  the card reset also resets the configuration space.
3488  *  Return value:
3489  *  void.
3490  */
3491
3492 static void s2io_reset(struct s2io_nic * sp)
3493 {
3494         struct XENA_dev_config __iomem *bar0 = sp->bar0;
3495         u64 val64;
3496         u16 subid, pci_cmd;
3497         int i;
3498         u16 val16;
3499         unsigned long long up_cnt, down_cnt, up_time, down_time, reset_cnt;
3500         unsigned long long mem_alloc_cnt, mem_free_cnt, watchdog_cnt;
3501
3502         DBG_PRINT(INIT_DBG,"%s - Resetting XFrame card %s\n",
3503                         __FUNCTION__, sp->dev->name);
3504
3505         /* Back up  the PCI-X CMD reg, dont want to lose MMRBC, OST settings */
3506         pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd));
3507
3508         val64 = SW_RESET_ALL;
3509         writeq(val64, &bar0->sw_reset);
3510         if (strstr(sp->product_name, "CX4")) {
3511                 msleep(750);
3512         }
3513         msleep(250);
3514         for (i = 0; i < S2IO_MAX_PCI_CONFIG_SPACE_REINIT; i++) {
3515
3516                 /* Restore the PCI state saved during initialization. */
3517                 pci_restore_state(sp->pdev);
3518                 pci_read_config_word(sp->pdev, 0x2, &val16);
3519                 if (check_pci_device_id(val16) != (u16)PCI_ANY_ID)
3520                         break;
3521                 msleep(200);
3522         }
3523
3524         if (check_pci_device_id(val16) == (u16)PCI_ANY_ID) {
3525                 DBG_PRINT(ERR_DBG,"%s SW_Reset failed!\n", __FUNCTION__);
3526         }
3527
3528         pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, pci_cmd);
3529
3530         s2io_init_pci(sp);
3531
3532         /* Set swapper to enable I/O register access */
3533         s2io_set_swapper(sp);
3534
3535         /* restore mac_addr entries */
3536         do_s2io_restore_unicast_mc(sp);
3537
3538         /* Restore the MSIX table entries from local variables */
3539         restore_xmsi_data(sp);
3540
3541         /* Clear certain PCI/PCI-X fields after reset */
3542         if (sp->device_type == XFRAME_II_DEVICE) {
3543                 /* Clear "detected parity error" bit */
3544                 pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000);
3545
3546                 /* Clearing PCIX Ecc status register */
3547                 pci_write_config_dword(sp->pdev, 0x68, 0x7C);
3548
3549                 /* Clearing PCI_STATUS error reflected here */
3550                 writeq(s2BIT(62), &bar0->txpic_int_reg);
3551         }
3552
3553         /* Reset device statistics maintained by OS */
3554         memset(&sp->stats, 0, sizeof (struct net_device_stats));
3555
3556         up_cnt = sp->mac_control.stats_info->sw_stat.link_up_cnt;
3557         down_cnt = sp->mac_control.stats_info->sw_stat.link_down_cnt;
3558         up_time = sp->mac_control.stats_info->sw_stat.link_up_time;
3559         down_time = sp->mac_control.stats_info->sw_stat.link_down_time;
3560         reset_cnt = sp->mac_control.stats_info->sw_stat.soft_reset_cnt;
3561         mem_alloc_cnt = sp->mac_control.stats_info->sw_stat.mem_allocated;
3562         mem_free_cnt = sp->mac_control.stats_info->sw_stat.mem_freed;
3563         watchdog_cnt = sp->mac_control.stats_info->sw_stat.watchdog_timer_cnt;
3564         /* save link up/down time/cnt, reset/memory/watchdog cnt */
3565         memset(sp->mac_control.stats_info, 0, sizeof(struct stat_block));
3566         /* restore link up/down time/cnt, reset/memory/watchdog cnt */
3567         sp->mac_control.stats_info->sw_stat.link_up_cnt = up_cnt;
3568         sp->mac_control.stats_info->sw_stat.link_down_cnt = down_cnt;
3569         sp->mac_control.stats_info->sw_stat.link_up_time = up_time;
3570         sp->mac_control.stats_info->sw_stat.link_down_time = down_time;
3571         sp->mac_control.stats_info->sw_stat.soft_reset_cnt = reset_cnt;
3572         sp->mac_control.stats_info->sw_stat.mem_allocated = mem_alloc_cnt;
3573         sp->mac_control.stats_info->sw_stat.mem_freed = mem_free_cnt;
3574         sp->mac_control.stats_info->sw_stat.watchdog_timer_cnt = watchdog_cnt;
3575
3576         /* SXE-002: Configure link and activity LED to turn it off */
3577         subid = sp->pdev->subsystem_device;
3578         if (((subid & 0xFF) >= 0x07) &&
3579             (sp->device_type == XFRAME_I_DEVICE)) {
3580                 val64 = readq(&bar0->gpio_control);
3581                 val64 |= 0x0000800000000000ULL;
3582                 writeq(val64, &bar0->gpio_control);
3583                 val64 = 0x0411040400000000ULL;
3584                 writeq(val64, (void __iomem *)bar0 + 0x2700);
3585         }
3586
3587         /*
3588          * Clear spurious ECC interrupts that would have occured on
3589          * XFRAME II cards after reset.
3590          */
3591         if (sp->device_type == XFRAME_II_DEVICE) {
3592                 val64 = readq(&bar0->pcc_err_reg);
3593                 writeq(val64, &bar0->pcc_err_reg);
3594         }
3595
3596         sp->device_enabled_once = FALSE;
3597 }
3598
3599 /**
3600  *  s2io_set_swapper - to set the swapper controle on the card
3601  *  @sp : private member of the device structure,
3602  *  pointer to the s2io_nic structure.
3603  *  Description: Function to set the swapper control on the card
3604  *  correctly depending on the 'endianness' of the system.
3605  *  Return value:
3606  *  SUCCESS on success and FAILURE on failure.
3607  */
3608
3609 static int s2io_set_swapper(struct s2io_nic * sp)
3610 {
3611         struct net_device *dev = sp->dev;
3612         struct XENA_dev_config __iomem *bar0 = sp->bar0;
3613         u64 val64, valt, valr;
3614
3615         /*
3616          * Set proper endian settings and verify the same by reading
3617          * the PIF Feed-back register.
3618          */
3619
3620         val64 = readq(&bar0->pif_rd_swapper_fb);
3621         if (val64 != 0x0123456789ABCDEFULL) {
3622                 int i = 0;
3623                 u64 value[] = { 0xC30000C3C30000C3ULL,   /* FE=1, SE=1 */
3624                                 0x8100008181000081ULL,  /* FE=1, SE=0 */
3625                                 0x4200004242000042ULL,  /* FE=0, SE=1 */
3626                                 0};                     /* FE=0, SE=0 */
3627
3628                 while(i<4) {
3629                         writeq(value[i], &bar0->swapper_ctrl);
3630                         val64 = readq(&bar0->pif_rd_swapper_fb);
3631                         if (val64 == 0x0123456789ABCDEFULL)
3632                                 break;
3633                         i++;
3634                 }
3635                 if (i == 4) {
3636                         DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3637                                 dev->name);
3638                         DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3639                                 (unsigned long long) val64);
3640                         return FAILURE;
3641                 }
3642                 valr = value[i];
3643         } else {
3644                 valr = readq(&bar0->swapper_ctrl);
3645         }
3646
3647         valt = 0x0123456789ABCDEFULL;
3648         writeq(valt, &bar0->xmsi_address);
3649         val64 = readq(&bar0->xmsi_address);
3650
3651         if(val64 != valt) {
3652                 int i = 0;
3653                 u64 value[] = { 0x00C3C30000C3C300ULL,  /* FE=1, SE=1 */
3654                                 0x0081810000818100ULL,  /* FE=1, SE=0 */
3655                                 0x0042420000424200ULL,  /* FE=0, SE=1 */
3656                                 0};                     /* FE=0, SE=0 */
3657
3658                 while(i<4) {
3659                         writeq((value[i] | valr), &bar0->swapper_ctrl);
3660                         writeq(valt, &bar0->xmsi_address);
3661                         val64 = readq(&bar0->xmsi_address);
3662                         if(val64 == valt)
3663                                 break;
3664                         i++;
3665                 }
3666                 if(i == 4) {
3667                         unsigned long long x = val64;
3668                         DBG_PRINT(ERR_DBG, "Write failed, Xmsi_addr ");
3669                         DBG_PRINT(ERR_DBG, "reads:0x%llx\n", x);
3670                         return FAILURE;
3671                 }
3672         }
3673         val64 = readq(&bar0->swapper_ctrl);
3674         val64 &= 0xFFFF000000000000ULL;
3675
3676 #ifdef  __BIG_ENDIAN
3677         /*
3678          * The device by default set to a big endian format, so a
3679          * big endian driver need not set anything.
3680          */
3681         val64 |= (SWAPPER_CTRL_TXP_FE |
3682                  SWAPPER_CTRL_TXP_SE |
3683                  SWAPPER_CTRL_TXD_R_FE |
3684                  SWAPPER_CTRL_TXD_W_FE |
3685                  SWAPPER_CTRL_TXF_R_FE |
3686                  SWAPPER_CTRL_RXD_R_FE |
3687                  SWAPPER_CTRL_RXD_W_FE |
3688                  SWAPPER_CTRL_RXF_W_FE |
3689                  SWAPPER_CTRL_XMSI_FE |
3690                  SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
3691         if (sp->config.intr_type == INTA)
3692                 val64 |= SWAPPER_CTRL_XMSI_SE;
3693         writeq(val64, &bar0->swapper_ctrl);
3694 #else
3695         /*
3696          * Initially we enable all bits to make it accessible by the
3697          * driver, then we selectively enable only those bits that
3698          * we want to set.
3699          */
3700         val64 |= (SWAPPER_CTRL_TXP_FE |
3701                  SWAPPER_CTRL_TXP_SE |
3702                  SWAPPER_CTRL_TXD_R_FE |
3703                  SWAPPER_CTRL_TXD_R_SE |
3704                  SWAPPER_CTRL_TXD_W_FE |
3705                  SWAPPER_CTRL_TXD_W_SE |
3706                  SWAPPER_CTRL_TXF_R_FE |
3707                  SWAPPER_CTRL_RXD_R_FE |
3708                  SWAPPER_CTRL_RXD_R_SE |
3709                  SWAPPER_CTRL_RXD_W_FE |
3710                  SWAPPER_CTRL_RXD_W_SE |
3711                  SWAPPER_CTRL_RXF_W_FE |
3712                  SWAPPER_CTRL_XMSI_FE |
3713                  SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
3714         if (sp->config.intr_type == INTA)
3715                 val64 |= SWAPPER_CTRL_XMSI_SE;
3716         writeq(val64, &bar0->swapper_ctrl);
3717 #endif
3718         val64 = readq(&bar0->swapper_ctrl);
3719
3720         /*
3721          * Verifying if endian settings are accurate by reading a
3722          * feedback register.
3723          */
3724         val64 = readq(&bar0->pif_rd_swapper_fb);
3725         if (val64 != 0x0123456789ABCDEFULL) {
3726                 /* Endian settings are incorrect, calls for another dekko. */
3727                 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3728                           dev->name);
3729                 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3730                           (unsigned long long) val64);
3731                 return FAILURE;
3732         }
3733
3734         return SUCCESS;
3735 }
3736
3737 static int wait_for_msix_trans(struct s2io_nic *nic, int i)
3738 {
3739         struct XENA_dev_config __iomem *bar0 = nic->bar0;
3740         u64 val64;
3741         int ret = 0, cnt = 0;
3742
3743         do {
3744                 val64 = readq(&bar0->xmsi_access);
3745                 if (!(val64 & s2BIT(15)))
3746                         break;
3747                 mdelay(1);
3748                 cnt++;
3749         } while(cnt < 5);
3750         if (cnt == 5) {
3751                 DBG_PRINT(ERR_DBG, "XMSI # %d Access failed\n", i);
3752                 ret = 1;
3753         }
3754
3755         return ret;
3756 }
3757
3758 static void restore_xmsi_data(struct s2io_nic *nic)
3759 {
3760         struct XENA_dev_config __iomem *bar0 = nic->bar0;
3761         u64 val64;
3762         int i, msix_index;
3763
3764
3765         if (nic->device_type == XFRAME_I_DEVICE)
3766                 return;
3767
3768         for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
3769                 msix_index = (i) ? ((i-1) * 8 + 1): 0;
3770                 writeq(nic->msix_info[i].addr, &bar0->xmsi_address);
3771                 writeq(nic->msix_info[i].data, &bar0->xmsi_data);
3772                 val64 = (s2BIT(7) | s2BIT(15) | vBIT(msix_index, 26, 6));
3773                 writeq(val64, &bar0->xmsi_access);
3774                 if (wait_for_msix_trans(nic, msix_index)) {
3775                         DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
3776                         continue;
3777                 }
3778         }
3779 }
3780
3781 static void store_xmsi_data(struct s2io_nic *nic)
3782 {
3783         struct XENA_dev_config __iomem *bar0 = nic->bar0;
3784         u64 val64, addr, data;
3785         int i, msix_index;
3786
3787         if (nic->device_type == XFRAME_I_DEVICE)
3788                 return;
3789
3790         /* Store and display */
3791         for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
3792                 msix_index = (i) ? ((i-1) * 8 + 1): 0;
3793                 val64 = (s2BIT(15) | vBIT(msix_index, 26, 6));
3794                 writeq(val64, &bar0->xmsi_access);
3795                 if (wait_for_msix_trans(nic, msix_index)) {
3796                         DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
3797                         continue;
3798                 }
3799                 addr = readq(&bar0->xmsi_address);
3800                 data = readq(&bar0->xmsi_data);
3801                 if (addr && data) {
3802                         nic->msix_info[i].addr = addr;
3803                         nic->msix_info[i].data = data;
3804                 }
3805         }
3806 }
3807
3808 static int s2io_enable_msi_x(struct s2io_nic *nic)
3809 {
3810         struct XENA_dev_config __iomem *bar0 = nic->bar0;
3811         u64 rx_mat;
3812         u16 msi_control; /* Temp variable */
3813         int ret, i, j, msix_indx = 1;
3814
3815         nic->entries = kmalloc(nic->num_entries * sizeof(struct msix_entry),
3816                                GFP_KERNEL);
3817         if (!nic->entries) {
3818                 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n", \
3819                         __FUNCTION__);
3820                 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
3821                 return -ENOMEM;
3822         }
3823         nic->mac_control.stats_info->sw_stat.mem_allocated
3824                 += (nic->num_entries * sizeof(struct msix_entry));
3825
3826         memset(nic->entries, 0, nic->num_entries * sizeof(struct msix_entry));
3827
3828         nic->s2io_entries =
3829                 kmalloc(nic->num_entries * sizeof(struct s2io_msix_entry),
3830                                    GFP_KERNEL);
3831         if (!nic->s2io_entries) {
3832                 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n",
3833                         __FUNCTION__);
3834                 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
3835                 kfree(nic->entries);
3836                 nic->mac_control.stats_info->sw_stat.mem_freed
3837                         += (nic->num_entries * sizeof(struct msix_entry));
3838                 return -ENOMEM;
3839         }
3840          nic->mac_control.stats_info->sw_stat.mem_allocated
3841                 += (nic->num_entries * sizeof(struct s2io_msix_entry));
3842         memset(nic->s2io_entries, 0,
3843                 nic->num_entries * sizeof(struct s2io_msix_entry));
3844
3845         nic->entries[0].entry = 0;
3846         nic->s2io_entries[0].entry = 0;
3847         nic->s2io_entries[0].in_use = MSIX_FLG;
3848         nic->s2io_entries[0].type = MSIX_ALARM_TYPE;
3849         nic->s2io_entries[0].arg = &nic->mac_control.fifos;
3850
3851         for (i = 1; i < nic->num_entries; i++) {
3852                 nic->entries[i].entry = ((i - 1) * 8) + 1;
3853                 nic->s2io_entries[i].entry = ((i - 1) * 8) + 1;
3854                 nic->s2io_entries[i].arg = NULL;
3855                 nic->s2io_entries[i].in_use = 0;
3856         }
3857
3858         rx_mat = readq(&bar0->rx_mat);
3859         for (j = 0; j < nic->config.rx_ring_num; j++) {
3860                 rx_mat |= RX_MAT_SET(j, msix_indx);
3861                 nic->s2io_entries[j+1].arg = &nic->mac_control.rings[j];
3862                 nic->s2io_entries[j+1].type = MSIX_RING_TYPE;
3863                 nic->s2io_entries[j+1].in_use = MSIX_FLG;
3864                 msix_indx += 8;
3865         }
3866         writeq(rx_mat, &bar0->rx_mat);
3867         readq(&bar0->rx_mat);
3868
3869         ret = pci_enable_msix(nic->pdev, nic->entries, nic->num_entries);
3870         /* We fail init if error or we get less vectors than min required */
3871         if (ret) {
3872                 DBG_PRINT(ERR_DBG, "%s: Enabling MSIX failed\n", nic->dev->name);
3873                 kfree(nic->entries);
3874                 nic->mac_control.stats_info->sw_stat.mem_freed
3875                         += (nic->num_entries * sizeof(struct msix_entry));
3876                 kfree(nic->s2io_entries);
3877                 nic->mac_control.stats_info->sw_stat.mem_freed
3878                         += (nic->num_entries * sizeof(struct s2io_msix_entry));
3879                 nic->entries = NULL;
3880                 nic->s2io_entries = NULL;
3881                 return -ENOMEM;
3882         }
3883
3884         /*
3885          * To enable MSI-X, MSI also needs to be enabled, due to a bug
3886          * in the herc NIC. (Temp change, needs to be removed later)
3887          */
3888         pci_read_config_word(nic->pdev, 0x42, &msi_control);
3889         msi_control |= 0x1; /* Enable MSI */
3890         pci_write_config_word(nic->pdev, 0x42, msi_control);
3891
3892         return 0;
3893 }
3894
3895 /* Handle software interrupt used during MSI(X) test */
3896 static irqreturn_t s2io_test_intr(int irq, void *dev_id)
3897 {
3898         struct s2io_nic *sp = dev_id;
3899
3900         sp->msi_detected = 1;
3901         wake_up(&sp->msi_wait);
3902
3903         return IRQ_HANDLED;
3904 }
3905
3906 /* Test interrupt path by forcing a a software IRQ */
3907 static int s2io_test_msi(struct s2io_nic *sp)
3908 {
3909         struct pci_dev *pdev = sp->pdev;
3910         struct XENA_dev_config __iomem *bar0 = sp->bar0;
3911         int err;
3912         u64 val64, saved64;
3913
3914         err = request_irq(sp->entries[1].vector, s2io_test_intr, 0,
3915                         sp->name, sp);
3916         if (err) {
3917                 DBG_PRINT(ERR_DBG, "%s: PCI %s: cannot assign irq %d\n",
3918                        sp->dev->name, pci_name(pdev), pdev->irq);
3919                 return err;
3920         }
3921
3922         init_waitqueue_head (&sp->msi_wait);
3923         sp->msi_detected = 0;
3924
3925         saved64 = val64 = readq(&bar0->scheduled_int_ctrl);
3926         val64 |= SCHED_INT_CTRL_ONE_SHOT;
3927         val64 |= SCHED_INT_CTRL_TIMER_EN;
3928         val64 |= SCHED_INT_CTRL_INT2MSI(1);
3929         writeq(val64, &bar0->scheduled_int_ctrl);
3930
3931         wait_event_timeout(sp->msi_wait, sp->msi_detected, HZ/10);
3932
3933         if (!sp->msi_detected) {
3934                 /* MSI(X) test failed, go back to INTx mode */
3935                 DBG_PRINT(ERR_DBG, "%s: PCI %s: No interrupt was generated "
3936                         "using MSI(X) during test\n", sp->dev->name,
3937                         pci_name(pdev));
3938
3939                 err = -EOPNOTSUPP;
3940         }
3941
3942         free_irq(sp->entries[1].vector, sp);
3943
3944         writeq(saved64, &bar0->scheduled_int_ctrl);
3945
3946         return err;
3947 }
3948
3949 static void remove_msix_isr(struct s2io_nic *sp)
3950 {
3951         int i;
3952         u16 msi_control;
3953
3954         for (i = 0; i < sp->num_entries; i++) {
3955                 if (sp->s2io_entries[i].in_use ==
3956                         MSIX_REGISTERED_SUCCESS) {
3957                         int vector = sp->entries[i].vector;
3958                         void *arg = sp->s2io_entries[i].arg;
3959                         free_irq(vector, arg);
3960                 }
3961         }
3962
3963         kfree(sp->entries);
3964         kfree(sp->s2io_entries);
3965         sp->entries = NULL;
3966         sp->s2io_entries = NULL;
3967
3968         pci_read_config_word(sp->pdev, 0x42, &msi_control);
3969         msi_control &= 0xFFFE; /* Disable MSI */
3970         pci_write_config_word(sp->pdev, 0x42, msi_control);
3971
3972         pci_disable_msix(sp->pdev);
3973 }
3974
3975 static void remove_inta_isr(struct s2io_nic *sp)
3976 {
3977         struct net_device *dev = sp->dev;
3978
3979         free_irq(sp->pdev->irq, dev);
3980 }
3981
3982 /* ********************************************************* *
3983  * Functions defined below concern the OS part of the driver *
3984  * ********************************************************* */
3985
3986 /**
3987  *  s2io_open - open entry point of the driver
3988  *  @dev : pointer to the device structure.
3989  *  Description:
3990  *  This function is the open entry point of the driver. It mainly calls a
3991  *  function to allocate Rx buffers and inserts them into the buffer
3992  *  descriptors and then enables the Rx part of the NIC.
3993  *  Return value:
3994  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3995  *   file on failure.
3996  */
3997
3998 static int s2io_open(struct net_device *dev)
3999 {
4000         struct s2io_nic *sp = dev->priv;
4001         int err = 0;
4002
4003         /*
4004          * Make sure you have link off by default every time
4005          * Nic is initialized
4006          */
4007         netif_carrier_off(dev);
4008         sp->last_link_state = 0;
4009
4010         /* Initialize H/W and enable interrupts */
4011         err = s2io_card_up(sp);
4012         if (err) {
4013                 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
4014                           dev->name);
4015                 goto hw_init_failed;
4016         }
4017
4018         if (do_s2io_prog_unicast(dev, dev->dev_addr) == FAILURE) {
4019                 DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
4020                 s2io_card_down(sp);
4021                 err = -ENODEV;
4022                 goto hw_init_failed;
4023         }
4024         s2io_start_all_tx_queue(sp);
4025         return 0;
4026
4027 hw_init_failed:
4028         if (sp->config.intr_type == MSI_X) {
4029                 if (sp->entries) {
4030                         kfree(sp->entries);
4031                         sp->mac_control.stats_info->sw_stat.mem_freed
4032                         += (sp->num_entries * sizeof(struct msix_entry));
4033                 }
4034                 if (sp->s2io_entries) {
4035                         kfree(sp->s2io_entries);
4036                         sp->mac_control.stats_info->sw_stat.mem_freed
4037                         += (sp->num_entries * sizeof(struct s2io_msix_entry));
4038                 }
4039         }
4040         return err;
4041 }
4042
4043 /**
4044  *  s2io_close -close entry point of the driver
4045  *  @dev : device pointer.
4046  *  Description:
4047  *  This is the stop entry point of the driver. It needs to undo exactly
4048  *  whatever was done by the open entry point,thus it's usually referred to
4049  *  as the close function.Among other things this function mainly stops the
4050  *  Rx side of the NIC and frees all the Rx buffers in the Rx rings.
4051  *  Return value:
4052  *  0 on success and an appropriate (-)ve integer as defined in errno.h
4053  *  file on failure.
4054  */
4055
4056 static int s2io_close(struct net_device *dev)
4057 {
4058         struct s2io_nic *sp = dev->priv;
4059         struct config_param *config = &sp->config;
4060         u64 tmp64;
4061         int offset;
4062
4063         /* Return if the device is already closed               *
4064         *  Can happen when s2io_card_up failed in change_mtu    *
4065         */
4066         if (!is_s2io_card_up(sp))
4067                 return 0;
4068
4069         s2io_stop_all_tx_queue(sp);
4070         /* delete all populated mac entries */
4071         for (offset = 1; offset < config->max_mc_addr; offset++) {
4072                 tmp64 = do_s2io_read_unicast_mc(sp, offset);
4073                 if (tmp64 != S2IO_DISABLE_MAC_ENTRY)
4074                         do_s2io_delete_unicast_mc(sp, tmp64);
4075         }
4076
4077         s2io_card_down(sp);
4078
4079         return 0;
4080 }
4081
4082 /**
4083  *  s2io_xmit - Tx entry point of te driver
4084  *  @skb : the socket buffer containing the Tx data.
4085  *  @dev : device pointer.
4086  *  Description :
4087  *  This function is the Tx entry point of the driver. S2IO NIC supports
4088  *  certain protocol assist features on Tx side, namely  CSO, S/G, LSO.
4089  *  NOTE: when device cant queue the pkt,just the trans_start variable will
4090  *  not be upadted.
4091  *  Return value:
4092  *  0 on success & 1 on failure.
4093  */
4094
4095 static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
4096 {
4097         struct s2io_nic *sp = dev->priv;
4098         u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
4099         register u64 val64;
4100         struct TxD *txdp;
4101         struct TxFIFO_element __iomem *tx_fifo;
4102         unsigned long flags = 0;
4103         u16 vlan_tag = 0;
4104         struct fifo_info *fifo = NULL;
4105         struct mac_info *mac_control;
4106         struct config_param *config;
4107         int do_spin_lock = 1;
4108         int offload_type;
4109         int enable_per_list_interrupt = 0;
4110         struct swStat *stats = &sp->mac_control.stats_info->sw_stat;
4111
4112         mac_control = &sp->mac_control;
4113         config = &sp->config;
4114
4115         DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
4116
4117         if (unlikely(skb->len <= 0)) {
4118                 DBG_PRINT(TX_DBG, "%s:Buffer has no data..\n", dev->name);
4119                 dev_kfree_skb_any(skb);
4120                 return 0;
4121         }
4122
4123         if (!is_s2io_card_up(sp)) {
4124                 DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
4125                           dev->name);
4126                 dev_kfree_skb(skb);
4127                 return 0;
4128         }
4129
4130         queue = 0;
4131         if (sp->vlgrp && vlan_tx_tag_present(skb))
4132                 vlan_tag = vlan_tx_tag_get(skb);
4133         if (sp->config.tx_steering_type == TX_DEFAULT_STEERING) {
4134                 if (skb->protocol == htons(ETH_P_IP)) {
4135                         struct iphdr *ip;
4136                         struct tcphdr *th;
4137                         ip = ip_hdr(skb);
4138
4139                         if ((ip->frag_off & htons(IP_OFFSET|IP_MF)) == 0) {
4140                                 th = (struct tcphdr *)(((unsigned char *)ip) +
4141                                                 ip->ihl*4);
4142
4143                                 if (ip->protocol == IPPROTO_TCP) {
4144                                         queue_len = sp->total_tcp_fifos;
4145                                         queue = (ntohs(th->source) +
4146                                                         ntohs(th->dest)) &
4147                                             sp->fifo_selector[queue_len - 1];
4148                                         if (queue >= queue_len)
4149                                                 queue = queue_len - 1;
4150                                 } else if (ip->protocol == IPPROTO_UDP) {
4151                                         queue_len = sp->total_udp_fifos;
4152                                         queue = (ntohs(th->source) +
4153                                                         ntohs(th->dest)) &
4154                                             sp->fifo_selector[queue_len - 1];
4155                                         if (queue >= queue_len)
4156                                                 queue = queue_len - 1;
4157                                         queue += sp->udp_fifo_idx;
4158                                         if (skb->len > 1024)
4159                                                 enable_per_list_interrupt = 1;
4160                                         do_spin_lock = 0;
4161                                 }
4162                         }
4163                 }
4164         } else if (sp->config.tx_steering_type == TX_PRIORITY_STEERING)
4165                 /* get fifo number based on skb->priority value */
4166                 queue = config->fifo_mapping
4167                                         [skb->priority & (MAX_TX_FIFOS - 1)];
4168         fifo = &mac_control->fifos[queue];
4169
4170         if (do_spin_lock)
4171                 spin_lock_irqsave(&fifo->tx_lock, flags);
4172         else {
4173                 if (unlikely(!spin_trylock_irqsave(&fifo->tx_lock, flags)))
4174                         return NETDEV_TX_LOCKED;
4175         }
4176
4177         if (sp->config.multiq) {
4178                 if (__netif_subqueue_stopped(dev, fifo->fifo_no)) {
4179                         spin_unlock_irqrestore(&fifo->tx_lock, flags);
4180                         return NETDEV_TX_BUSY;
4181                 }
4182         } else if (unlikely(fifo->queue_state == FIFO_QUEUE_STOP)) {
4183                 if (netif_queue_stopped(dev)) {
4184                         spin_unlock_irqrestore(&fifo->tx_lock, flags);
4185                         return NETDEV_TX_BUSY;
4186                 }
4187         }
4188
4189         put_off = (u16) fifo->tx_curr_put_info.offset;
4190         get_off = (u16) fifo->tx_curr_get_info.offset;
4191         txdp = (struct TxD *) fifo->list_info[put_off].list_virt_addr;
4192
4193         queue_len = fifo->tx_curr_put_info.fifo_len + 1;
4194         /* Avoid "put" pointer going beyond "get" pointer */
4195         if (txdp->Host_Control ||
4196                    ((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4197                 DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n");
4198                 s2io_stop_tx_queue(sp, fifo->fifo_no);
4199                 dev_kfree_skb(skb);
4200                 spin_unlock_irqrestore(&fifo->tx_lock, flags);
4201                 return 0;
4202         }
4203
4204         offload_type = s2io_offload_type(skb);
4205         if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
4206                 txdp->Control_1 |= TXD_TCP_LSO_EN;
4207                 txdp->Control_1 |= TXD_TCP_LSO_MSS(s2io_tcp_mss(skb));
4208         }
4209         if (skb->ip_summed == CHECKSUM_PARTIAL) {
4210                 txdp->Control_2 |=
4211                     (TXD_TX_CKO_IPV4_EN | TXD_TX_CKO_TCP_EN |
4212                      TXD_TX_CKO_UDP_EN);
4213         }
4214         txdp->Control_1 |= TXD_GATHER_CODE_FIRST;
4215         txdp->Control_1 |= TXD_LIST_OWN_XENA;
4216         txdp->Control_2 |= TXD_INT_NUMBER(fifo->fifo_no);
4217         if (enable_per_list_interrupt)
4218                 if (put_off & (queue_len >> 5))
4219                         txdp->Control_2 |= TXD_INT_TYPE_PER_LIST;
4220         if (vlan_tag) {
4221                 txdp->Control_2 |= TXD_VLAN_ENABLE;
4222                 txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
4223         }
4224
4225         frg_len = skb->len - skb->data_len;
4226         if (offload_type == SKB_GSO_UDP) {
4227                 int ufo_size;
4228
4229                 ufo_size = s2io_udp_mss(skb);
4230                 ufo_size &= ~7;
4231                 txdp->Control_1 |= TXD_UFO_EN;
4232                 txdp->Control_1 |= TXD_UFO_MSS(ufo_size);
4233                 txdp->Control_1 |= TXD_BUFFER0_SIZE(8);
4234 #ifdef __BIG_ENDIAN
4235                 /* both variants do cpu_to_be64(be32_to_cpu(...)) */
4236                 fifo->ufo_in_band_v[put_off] =
4237                                 (__force u64)skb_shinfo(skb)->ip6_frag_id;
4238 #else
4239                 fifo->ufo_in_band_v[put_off] =
4240                                 (__force u64)skb_shinfo(skb)->ip6_frag_id << 32;
4241 #endif
4242                 txdp->Host_Control = (unsigned long)fifo->ufo_in_band_v;
4243                 txdp->Buffer_Pointer = pci_map_single(sp->pdev,
4244                                         fifo->ufo_in_band_v,
4245                                         sizeof(u64), PCI_DMA_TODEVICE);
4246                 if (pci_dma_mapping_error(txdp->Buffer_Pointer))
4247                         goto pci_map_failed;
4248                 txdp++;
4249         }
4250
4251         txdp->Buffer_Pointer = pci_map_single
4252             (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE);
4253         if (pci_dma_mapping_error(txdp->Buffer_Pointer))
4254                 goto pci_map_failed;
4255
4256         txdp->Host_Control = (unsigned long) skb;
4257         txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len);
4258         if (offload_type == SKB_GSO_UDP)
4259                 txdp->Control_1 |= TXD_UFO_EN;
4260
4261         frg_cnt = skb_shinfo(skb)->nr_frags;
4262         /* For fragmented SKB. */
4263         for (i = 0; i < frg_cnt; i++) {
4264                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4265                 /* A '0' length fragment will be ignored */
4266                 if (!frag->size)
4267                         continue;
4268                 txdp++;
4269                 txdp->Buffer_Pointer = (u64) pci_map_page
4270                     (sp->pdev, frag->page, frag->page_offset,
4271                      frag->size, PCI_DMA_TODEVICE);
4272                 txdp->Control_1 = TXD_BUFFER0_SIZE(frag->size);
4273                 if (offload_type == SKB_GSO_UDP)
4274                         txdp->Control_1 |= TXD_UFO_EN;
4275         }
4276         txdp->Control_1 |= TXD_GATHER_CODE_LAST;
4277
4278         if (offload_type == SKB_GSO_UDP)
4279                 frg_cnt++; /* as Txd0 was used for inband header */
4280
4281         tx_fifo = mac_control->tx_FIFO_start[queue];
4282         val64 = fifo->list_info[put_off].list_phy_addr;
4283         writeq(val64, &tx_fifo->TxDL_Pointer);
4284
4285         val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
4286                  TX_FIFO_LAST_LIST);
4287         if (offload_type)
4288                 val64 |= TX_FIFO_SPECIAL_FUNC;
4289
4290         writeq(val64, &tx_fifo->List_Control);
4291
4292         mmiowb();
4293
4294         put_off++;
4295         if (put_off == fifo->tx_curr_put_info.fifo_len + 1)
4296                 put_off = 0;
4297         fifo->tx_curr_put_info.offset = put_off;
4298
4299         /* Avoid "put" pointer going beyond "get" pointer */
4300         if (((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4301                 sp->mac_control.stats_info->sw_stat.fifo_full_cnt++;
4302                 DBG_PRINT(TX_DBG,
4303                           "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
4304                           put_off, get_off);
4305                 s2io_stop_tx_queue(sp, fifo->fifo_no);
4306         }
4307         mac_control->stats_info->sw_stat.mem_allocated += skb->truesize;
4308         dev->trans_start = jiffies;
4309         spin_unlock_irqrestore(&fifo->tx_lock, flags);
4310
4311         if (sp->config.intr_type == MSI_X)
4312                 tx_intr_handler(fifo);
4313
4314         return 0;
4315 pci_map_failed:
4316         stats->pci_map_fail_cnt++;
4317         s2io_stop_tx_queue(sp, fifo->fifo_no);
4318         stats->mem_freed += skb->truesize;
4319         dev_kfree_skb(skb);
4320         spin_unlock_irqrestore(&fifo->tx_lock, flags);
4321         return 0;
4322 }
4323
4324 static void
4325 s2io_alarm_handle(unsigned long data)
4326 {
4327         struct s2io_nic *sp = (struct s2io_nic *)data;
4328         struct net_device *dev = sp->dev;
4329
4330         s2io_handle_errors(dev);
4331         mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
4332 }
4333
4334 static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id)
4335 {
4336         struct ring_info *ring = (struct ring_info *)dev_id;
4337         struct s2io_nic *sp = ring->nic;
4338         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4339         struct net_device *dev = sp->dev;
4340
4341         if (unlikely(!is_s2io_card_up(sp)))
4342                 return IRQ_HANDLED;
4343
4344         if (sp->config.napi) {
4345                 u8 __iomem *addr = NULL;
4346                 u8 val8 = 0;
4347
4348                 addr = (u8 __iomem *)&bar0->xmsi_mask_reg;
4349                 addr += (7 - ring->ring_no);
4350                 val8 = (ring->ring_no == 0) ? 0x7f : 0xff;
4351                 writeb(val8, addr);
4352                 val8 = readb(addr);
4353                 netif_rx_schedule(dev, &ring->napi);
4354         } else {
4355                 rx_intr_handler(ring, 0);
4356                 s2io_chk_rx_buffers(ring);
4357         }
4358
4359         return IRQ_HANDLED;
4360 }
4361
4362 static irqreturn_t s2io_msix_fifo_handle(int irq, void *dev_id)
4363 {
4364         int i;
4365         struct fifo_info *fifos = (struct fifo_info *)dev_id;
4366         struct s2io_nic *sp = fifos->nic;
4367         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4368         struct config_param *config  = &sp->config;
4369         u64 reason;
4370
4371         if (unlikely(!is_s2io_card_up(sp)))
4372                 return IRQ_NONE;
4373
4374         reason = readq(&bar0->general_int_status);
4375         if (unlikely(reason == S2IO_MINUS_ONE))
4376                 /* Nothing much can be done. Get out */
4377                 return IRQ_HANDLED;
4378
4379         writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
4380
4381         if (reason & GEN_INTR_TXTRAFFIC)
4382                 writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
4383
4384         for (i = 0; i < config->tx_fifo_num; i++)
4385                 tx_intr_handler(&fifos[i]);
4386
4387         writeq(sp->general_int_mask, &bar0->general_int_mask);
4388         readl(&bar0->general_int_status);
4389
4390         return IRQ_HANDLED;
4391 }
4392
4393 static void s2io_txpic_intr_handle(struct s2io_nic *sp)
4394 {
4395         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4396         u64 val64;
4397
4398         val64 = readq(&bar0->pic_int_status);
4399         if (val64 & PIC_INT_GPIO) {
4400                 val64 = readq(&bar0->gpio_int_reg);
4401                 if ((val64 & GPIO_INT_REG_LINK_DOWN) &&
4402                     (val64 & GPIO_INT_REG_LINK_UP)) {
4403                         /*
4404                          * This is unstable state so clear both up/down
4405                          * interrupt and adapter to re-evaluate the link state.
4406                          */
4407                         val64 |=  GPIO_INT_REG_LINK_DOWN;
4408                         val64 |= GPIO_INT_REG_LINK_UP;
4409                         writeq(val64, &bar0->gpio_int_reg);
4410                         val64 = readq(&bar0->gpio_int_mask);
4411                         val64 &= ~(GPIO_INT_MASK_LINK_UP |
4412                                    GPIO_INT_MASK_LINK_DOWN);
4413                         writeq(val64, &bar0->gpio_int_mask);
4414                 }
4415                 else if (val64 & GPIO_INT_REG_LINK_UP) {
4416                         val64 = readq(&bar0->adapter_status);
4417                                 /* Enable Adapter */
4418                         val64 = readq(&bar0->adapter_control);
4419                         val64 |= ADAPTER_CNTL_EN;
4420                         writeq(val64, &bar0->adapter_control);
4421                         val64 |= ADAPTER_LED_ON;
4422                         writeq(val64, &bar0->adapter_control);
4423                         if (!sp->device_enabled_once)
4424                                 sp->device_enabled_once = 1;
4425
4426                         s2io_link(sp, LINK_UP);
4427                         /*
4428                          * unmask link down interrupt and mask link-up
4429                          * intr
4430                          */
4431                         val64 = readq(&bar0->gpio_int_mask);
4432                         val64 &= ~GPIO_INT_MASK_LINK_DOWN;
4433                         val64 |= GPIO_INT_MASK_LINK_UP;
4434                         writeq(val64, &bar0->gpio_int_mask);
4435
4436                 }else if (val64 & GPIO_INT_REG_LINK_DOWN) {
4437                         val64 = readq(&bar0->adapter_status);
4438                         s2io_link(sp, LINK_DOWN);
4439                         /* Link is down so unmaks link up interrupt */
4440                         val64 = readq(&bar0->gpio_int_mask);
4441                         val64 &= ~GPIO_INT_MASK_LINK_UP;
4442                         val64 |= GPIO_INT_MASK_LINK_DOWN;
4443                         writeq(val64, &bar0->gpio_int_mask);
4444
4445                         /* turn off LED */
4446                         val64 = readq(&bar0->adapter_control);
4447                         val64 = val64 &(~ADAPTER_LED_ON);
4448                         writeq(val64, &bar0->adapter_control);
4449                 }
4450         }
4451         val64 = readq(&bar0->gpio_int_mask);
4452 }
4453
4454 /**
4455  *  do_s2io_chk_alarm_bit - Check for alarm and incrment the counter
4456  *  @value: alarm bits
4457  *  @addr: address value
4458  *  @cnt: counter variable
4459  *  Description: Check for alarm and increment the counter
4460  *  Return Value:
4461  *  1 - if alarm bit set
4462  *  0 - if alarm bit is not set
4463  */
4464 static int do_s2io_chk_alarm_bit(u64 value, void __iomem * addr,
4465                           unsigned long long *cnt)
4466 {
4467         u64 val64;
4468         val64 = readq(addr);
4469         if ( val64 & value ) {
4470                 writeq(val64, addr);
4471                 (*cnt)++;
4472                 return 1;
4473         }
4474         return 0;
4475
4476 }
4477
4478 /**
4479  *  s2io_handle_errors - Xframe error indication handler
4480  *  @nic: device private variable
4481  *  Description: Handle alarms such as loss of link, single or
4482  *  double ECC errors, critical and serious errors.
4483  *  Return Value:
4484  *  NONE
4485  */
4486 static void s2io_handle_errors(void * dev_id)
4487 {
4488         struct net_device *dev = (struct net_device *) dev_id;
4489         struct s2io_nic *sp = dev->priv;
4490         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4491         u64 temp64 = 0,val64=0;
4492         int i = 0;
4493
4494         struct swStat *sw_stat = &sp->mac_control.stats_info->sw_stat;
4495         struct xpakStat *stats = &sp->mac_control.stats_info->xpak_stat;
4496
4497         if (!is_s2io_card_up(sp))
4498                 return;
4499
4500         if (pci_channel_offline(sp->pdev))
4501                 return;
4502
4503         memset(&sw_stat->ring_full_cnt, 0,
4504                 sizeof(sw_stat->ring_full_cnt));
4505
4506         /* Handling the XPAK counters update */
4507         if(stats->xpak_timer_count < 72000) {
4508                 /* waiting for an hour */
4509                 stats->xpak_timer_count++;
4510         } else {
4511                 s2io_updt_xpak_counter(dev);
4512                 /* reset the count to zero */
4513                 stats->xpak_timer_count = 0;
4514         }
4515
4516         /* Handling link status change error Intr */
4517         if (s2io_link_fault_indication(sp) == MAC_RMAC_ERR_TIMER) {
4518                 val64 = readq(&bar0->mac_rmac_err_reg);
4519                 writeq(val64, &bar0->mac_rmac_err_reg);
4520                 if (val64 & RMAC_LINK_STATE_CHANGE_INT)
4521                         schedule_work(&sp->set_link_task);
4522         }
4523
4524         /* In case of a serious error, the device will be Reset. */
4525         if (do_s2io_chk_alarm_bit(SERR_SOURCE_ANY, &bar0->serr_source,
4526                                 &sw_stat->serious_err_cnt))
4527                 goto reset;
4528
4529         /* Check for data parity error */
4530         if (do_s2io_chk_alarm_bit(GPIO_INT_REG_DP_ERR_INT, &bar0->gpio_int_reg,
4531                                 &sw_stat->parity_err_cnt))
4532                 goto reset;
4533
4534         /* Check for ring full counter */
4535         if (sp->device_type == XFRAME_II_DEVICE) {
4536                 val64 = readq(&bar0->ring_bump_counter1);
4537                 for (i=0; i<4; i++) {
4538                         temp64 = ( val64 & vBIT(0xFFFF,(i*16),16));
4539                         temp64 >>= 64 - ((i+1)*16);
4540                         sw_stat->ring_full_cnt[i] += temp64;
4541                 }
4542
4543                 val64 = readq(&bar0->ring_bump_counter2);
4544                 for (i=0; i<4; i++) {
4545                         temp64 = ( val64 & vBIT(0xFFFF,(i*16),16));
4546                         temp64 >>= 64 - ((i+1)*16);
4547                          sw_stat->ring_full_cnt[i+4] += temp64;
4548                 }
4549         }
4550
4551         val64 = readq(&bar0->txdma_int_status);
4552         /*check for pfc_err*/
4553         if (val64 & TXDMA_PFC_INT) {
4554                 if (do_s2io_chk_alarm_bit(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM|
4555                                 PFC_MISC_0_ERR | PFC_MISC_1_ERR|
4556                                 PFC_PCIX_ERR, &bar0->pfc_err_reg,
4557                                 &sw_stat->pfc_err_cnt))
4558                         goto reset;
4559                 do_s2io_chk_alarm_bit(PFC_ECC_SG_ERR, &bar0->pfc_err_reg,
4560                                 &sw_stat->pfc_err_cnt);
4561         }
4562
4563         /*check for tda_err*/
4564         if (val64 & TXDMA_TDA_INT) {
4565                 if(do_s2io_chk_alarm_bit(TDA_Fn_ECC_DB_ERR | TDA_SM0_ERR_ALARM |
4566                                 TDA_SM1_ERR_ALARM, &bar0->tda_err_reg,
4567                                 &sw_stat->tda_err_cnt))
4568                         goto reset;
4569                 do_s2io_chk_alarm_bit(TDA_Fn_ECC_SG_ERR | TDA_PCIX_ERR,
4570                                 &bar0->tda_err_reg, &sw_stat->tda_err_cnt);
4571         }
4572         /*check for pcc_err*/
4573         if (val64 & TXDMA_PCC_INT) {
4574                 if (do_s2io_chk_alarm_bit(PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM
4575                                 | PCC_N_SERR | PCC_6_COF_OV_ERR
4576                                 | PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR
4577                                 | PCC_7_LSO_OV_ERR | PCC_FB_ECC_DB_ERR
4578                                 | PCC_TXB_ECC_DB_ERR, &bar0->pcc_err_reg,
4579                                 &sw_stat->pcc_err_cnt))
4580                         goto reset;
4581                 do_s2io_chk_alarm_bit(PCC_FB_ECC_SG_ERR | PCC_TXB_ECC_SG_ERR,
4582                                 &bar0->pcc_err_reg, &sw_stat->pcc_err_cnt);
4583         }
4584
4585         /*check for tti_err*/
4586         if (val64 & TXDMA_TTI_INT) {
4587                 if (do_s2io_chk_alarm_bit(TTI_SM_ERR_ALARM, &bar0->tti_err_reg,
4588                                 &sw_stat->tti_err_cnt))
4589                         goto reset;
4590                 do_s2io_chk_alarm_bit(TTI_ECC_SG_ERR | TTI_ECC_DB_ERR,
4591                                 &bar0->tti_err_reg, &sw_stat->tti_err_cnt);
4592         }
4593
4594         /*check for lso_err*/
4595         if (val64 & TXDMA_LSO_INT) {
4596                 if (do_s2io_chk_alarm_bit(LSO6_ABORT | LSO7_ABORT
4597                                 | LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM,
4598                                 &bar0->lso_err_reg, &sw_stat->lso_err_cnt))
4599                         goto reset;
4600                 do_s2io_chk_alarm_bit(LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
4601                                 &bar0->lso_err_reg, &sw_stat->lso_err_cnt);
4602         }
4603
4604         /*check for tpa_err*/
4605         if (val64 & TXDMA_TPA_INT) {
4606                 if (do_s2io_chk_alarm_bit(TPA_SM_ERR_ALARM, &bar0->tpa_err_reg,
4607                         &sw_stat->tpa_err_cnt))
4608                         goto reset;
4609                 do_s2io_chk_alarm_bit(TPA_TX_FRM_DROP, &bar0->tpa_err_reg,
4610                         &sw_stat->tpa_err_cnt);
4611         }
4612
4613         /*check for sm_err*/
4614         if (val64 & TXDMA_SM_INT) {
4615                 if (do_s2io_chk_alarm_bit(SM_SM_ERR_ALARM, &bar0->sm_err_reg,
4616                         &sw_stat->sm_err_cnt))
4617                         goto reset;
4618         }
4619
4620         val64 = readq(&bar0->mac_int_status);
4621         if (val64 & MAC_INT_STATUS_TMAC_INT) {
4622                 if (do_s2io_chk_alarm_bit(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR,
4623                                 &bar0->mac_tmac_err_reg,
4624                                 &sw_stat->mac_tmac_err_cnt))
4625                         goto reset;
4626                 do_s2io_chk_alarm_bit(TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR
4627                                 | TMAC_DESC_ECC_SG_ERR | TMAC_DESC_ECC_DB_ERR,
4628                                 &bar0->mac_tmac_err_reg,
4629                                 &sw_stat->mac_tmac_err_cnt);
4630         }
4631
4632         val64 = readq(&bar0->xgxs_int_status);
4633         if (val64 & XGXS_INT_STATUS_TXGXS) {
4634                 if (do_s2io_chk_alarm_bit(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR,
4635                                 &bar0->xgxs_txgxs_err_reg,
4636                                 &sw_stat->xgxs_txgxs_err_cnt))
4637                         goto reset;
4638                 do_s2io_chk_alarm_bit(TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
4639                                 &bar0->xgxs_txgxs_err_reg,
4640                                 &sw_stat->xgxs_txgxs_err_cnt);
4641         }
4642
4643         val64 = readq(&bar0->rxdma_int_status);
4644         if (val64 & RXDMA_INT_RC_INT_M) {
4645                 if (do_s2io_chk_alarm_bit(RC_PRCn_ECC_DB_ERR | RC_FTC_ECC_DB_ERR
4646                                 | RC_PRCn_SM_ERR_ALARM |RC_FTC_SM_ERR_ALARM,
4647                                 &bar0->rc_err_reg, &sw_stat->rc_err_cnt))
4648                         goto reset;
4649                 do_s2io_chk_alarm_bit(RC_PRCn_ECC_SG_ERR | RC_FTC_ECC_SG_ERR
4650                                 | RC_RDA_FAIL_WR_Rn, &bar0->rc_err_reg,
4651                                 &sw_stat->rc_err_cnt);
4652                 if (do_s2io_chk_alarm_bit(PRC_PCI_AB_RD_Rn | PRC_PCI_AB_WR_Rn
4653                                 | PRC_PCI_AB_F_WR_Rn, &bar0->prc_pcix_err_reg,
4654                                 &sw_stat->prc_pcix_err_cnt))
4655                         goto reset;
4656                 do_s2io_chk_alarm_bit(PRC_PCI_DP_RD_Rn | PRC_PCI_DP_WR_Rn
4657                                 | PRC_PCI_DP_F_WR_Rn, &bar0->prc_pcix_err_reg,
4658                                 &sw_stat->prc_pcix_err_cnt);
4659         }
4660
4661         if (val64 & RXDMA_INT_RPA_INT_M) {
4662                 if (do_s2io_chk_alarm_bit(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR,
4663                                 &bar0->rpa_err_reg, &sw_stat->rpa_err_cnt))
4664                         goto reset;
4665                 do_s2io_chk_alarm_bit(RPA_ECC_SG_ERR | RPA_ECC_DB_ERR,
4666                                 &bar0->rpa_err_reg, &sw_stat->rpa_err_cnt);
4667         }
4668
4669         if (val64 & RXDMA_INT_RDA_INT_M) {
4670                 if (do_s2io_chk_alarm_bit(RDA_RXDn_ECC_DB_ERR
4671                                 | RDA_FRM_ECC_DB_N_AERR | RDA_SM1_ERR_ALARM
4672                                 | RDA_SM0_ERR_ALARM | RDA_RXD_ECC_DB_SERR,
4673                                 &bar0->rda_err_reg, &sw_stat->rda_err_cnt))
4674                         goto reset;
4675                 do_s2io_chk_alarm_bit(RDA_RXDn_ECC_SG_ERR | RDA_FRM_ECC_SG_ERR
4676                                 | RDA_MISC_ERR | RDA_PCIX_ERR,
4677                                 &bar0->rda_err_reg, &sw_stat->rda_err_cnt);
4678         }
4679
4680         if (val64 & RXDMA_INT_RTI_INT_M) {
4681                 if (do_s2io_chk_alarm_bit(RTI_SM_ERR_ALARM, &bar0->rti_err_reg,
4682                                 &sw_stat->rti_err_cnt))
4683                         goto reset;
4684                 do_s2io_chk_alarm_bit(RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
4685                                 &bar0->rti_err_reg, &sw_stat->rti_err_cnt);
4686         }
4687
4688         val64 = readq(&bar0->mac_int_status);
4689         if (val64 & MAC_INT_STATUS_RMAC_INT) {
4690                 if (do_s2io_chk_alarm_bit(RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR,
4691                                 &bar0->mac_rmac_err_reg,
4692                                 &sw_stat->mac_rmac_err_cnt))
4693                         goto reset;
4694                 do_s2io_chk_alarm_bit(RMAC_UNUSED_INT|RMAC_SINGLE_ECC_ERR|
4695                                 RMAC_DOUBLE_ECC_ERR, &bar0->mac_rmac_err_reg,
4696                                 &sw_stat->mac_rmac_err_cnt);
4697         }
4698
4699         val64 = readq(&bar0->xgxs_int_status);
4700         if (val64 & XGXS_INT_STATUS_RXGXS) {
4701                 if (do_s2io_chk_alarm_bit(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR,
4702                                 &bar0->xgxs_rxgxs_err_reg,
4703                                 &sw_stat->xgxs_rxgxs_err_cnt))
4704                         goto reset;
4705         }
4706
4707         val64 = readq(&bar0->mc_int_status);
4708         if(val64 & MC_INT_STATUS_MC_INT) {
4709                 if (do_s2io_chk_alarm_bit(MC_ERR_REG_SM_ERR, &bar0->mc_err_reg,
4710                                 &sw_stat->mc_err_cnt))
4711                         goto reset;
4712
4713                 /* Handling Ecc errors */
4714                 if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
4715                         writeq(val64, &bar0->mc_err_reg);
4716                         if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
4717                                 sw_stat->double_ecc_errs++;
4718                                 if (sp->device_type != XFRAME_II_DEVICE) {
4719                                         /*
4720                                          * Reset XframeI only if critical error
4721                                          */
4722                                         if (val64 &
4723                                                 (MC_ERR_REG_MIRI_ECC_DB_ERR_0 |
4724                                                 MC_ERR_REG_MIRI_ECC_DB_ERR_1))
4725                                                                 goto reset;
4726                                         }
4727                         } else
4728                                 sw_stat->single_ecc_errs++;
4729                 }
4730         }
4731         return;
4732
4733 reset:
4734         s2io_stop_all_tx_queue(sp);
4735         schedule_work(&sp->rst_timer_task);
4736         sw_stat->soft_reset_cnt++;
4737         return;
4738 }
4739
4740 /**
4741  *  s2io_isr - ISR handler of the device .
4742  *  @irq: the irq of the device.
4743  *  @dev_id: a void pointer to the dev structure of the NIC.
4744  *  Description:  This function is the ISR handler of the device. It
4745  *  identifies the reason for the interrupt and calls the relevant
4746  *  service routines. As a contongency measure, this ISR allocates the
4747  *  recv buffers, if their numbers are below the panic value which is
4748  *  presently set to 25% of the original number of rcv buffers allocated.
4749  *  Return value:
4750  *   IRQ_HANDLED: will be returned if IRQ was handled by this routine
4751  *   IRQ_NONE: will be returned if interrupt is not from our device
4752  */
4753 static irqreturn_t s2io_isr(int irq, void *dev_id)
4754 {
4755         struct net_device *dev = (struct net_device *) dev_id;
4756         struct s2io_nic *sp = dev->priv;
4757         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4758         int i;
4759         u64 reason = 0;
4760         struct mac_info *mac_control;
4761         struct config_param *config;
4762
4763         /* Pretend we handled any irq's from a disconnected card */
4764         if (pci_channel_offline(sp->pdev))
4765                 return IRQ_NONE;
4766
4767         if (!is_s2io_card_up(sp))
4768                 return IRQ_NONE;
4769
4770         mac_control = &sp->mac_control;
4771         config = &sp->config;
4772
4773         /*
4774          * Identify the cause for interrupt and call the appropriate
4775          * interrupt handler. Causes for the interrupt could be;
4776          * 1. Rx of packet.
4777          * 2. Tx complete.
4778          * 3. Link down.
4779          */
4780         reason = readq(&bar0->general_int_status);
4781
4782         if (unlikely(reason == S2IO_MINUS_ONE) ) {
4783                 /* Nothing much can be done. Get out */
4784                 return IRQ_HANDLED;
4785         }
4786
4787         if (reason & (GEN_INTR_RXTRAFFIC |
4788                 GEN_INTR_TXTRAFFIC | GEN_INTR_TXPIC))
4789         {
4790                 writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
4791
4792                 if (config->napi) {
4793                         if (reason & GEN_INTR_RXTRAFFIC) {
4794                                 netif_rx_schedule(dev, &sp->napi);
4795                                 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_mask);
4796                                 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4797                                 readl(&bar0->rx_traffic_int);
4798                         }
4799                 } else {
4800                         /*
4801                          * rx_traffic_int reg is an R1 register, writing all 1's
4802                          * will ensure that the actual interrupt causing bit
4803                          * get's cleared and hence a read can be avoided.
4804                          */
4805                         if (reason & GEN_INTR_RXTRAFFIC)
4806                                 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4807
4808                         for (i = 0; i < config->rx_ring_num; i++)
4809                                 rx_intr_handler(&mac_control->rings[i], 0);
4810                 }
4811
4812                 /*
4813                  * tx_traffic_int reg is an R1 register, writing all 1's
4814                  * will ensure that the actual interrupt causing bit get's
4815                  * cleared and hence a read can be avoided.
4816                  */
4817                 if (reason & GEN_INTR_TXTRAFFIC)
4818                         writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
4819
4820                 for (i = 0; i < config->tx_fifo_num; i++)
4821                         tx_intr_handler(&mac_control->fifos[i]);
4822
4823                 if (reason & GEN_INTR_TXPIC)
4824                         s2io_txpic_intr_handle(sp);
4825
4826                 /*
4827                  * Reallocate the buffers from the interrupt handler itself.
4828                  */
4829                 if (!config->napi) {
4830                         for (i = 0; i < config->rx_ring_num; i++)
4831                                 s2io_chk_rx_buffers(&mac_control->rings[i]);
4832                 }
4833                 writeq(sp->general_int_mask, &bar0->general_int_mask);
4834                 readl(&bar0->general_int_status);
4835
4836                 return IRQ_HANDLED;
4837
4838         }
4839         else if (!reason) {
4840                 /* The interrupt was not raised by us */
4841                 return IRQ_NONE;
4842         }
4843
4844         return IRQ_HANDLED;
4845 }
4846
4847 /**
4848  * s2io_updt_stats -
4849  */
4850 static void s2io_updt_stats(struct s2io_nic *sp)
4851 {
4852         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4853         u64 val64;
4854         int cnt = 0;
4855
4856         if (is_s2io_card_up(sp)) {
4857                 /* Apprx 30us on a 133 MHz bus */
4858                 val64 = SET_UPDT_CLICKS(10) |
4859                         STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN;
4860                 writeq(val64, &bar0->stat_cfg);
4861                 do {
4862                         udelay(100);
4863                         val64 = readq(&bar0->stat_cfg);
4864                         if (!(val64 & s2BIT(0)))
4865                                 break;
4866                         cnt++;
4867                         if (cnt == 5)
4868                                 break; /* Updt failed */
4869                 } while(1);
4870         }
4871 }
4872
4873 /**
4874  *  s2io_get_stats - Updates the device statistics structure.
4875  *  @dev : pointer to the device structure.
4876  *  Description:
4877  *  This function updates the device statistics structure in the s2io_nic
4878  *  structure and returns a pointer to the same.
4879  *  Return value:
4880  *  pointer to the updated net_device_stats structure.
4881  */
4882
4883 static struct net_device_stats *s2io_get_stats(struct net_device *dev)
4884 {
4885         struct s2io_nic *sp = dev->priv;
4886         struct mac_info *mac_control;
4887         struct config_param *config;
4888         int i;
4889
4890
4891         mac_control = &sp->mac_control;
4892         config = &sp->config;
4893
4894         /* Configure Stats for immediate updt */
4895         s2io_updt_stats(sp);
4896
4897         sp->stats.tx_packets =
4898                 le32_to_cpu(mac_control->stats_info->tmac_frms);
4899         sp->stats.tx_errors =
4900                 le32_to_cpu(mac_control->stats_info->tmac_any_err_frms);
4901         sp->stats.rx_errors =
4902                 le64_to_cpu(mac_control->stats_info->rmac_drop_frms);
4903         sp->stats.multicast =
4904                 le32_to_cpu(mac_control->stats_info->rmac_vld_mcst_frms);
4905         sp->stats.rx_length_errors =
4906                 le64_to_cpu(mac_control->stats_info->rmac_long_frms);
4907
4908         /* collect per-ring rx_packets and rx_bytes */
4909         sp->stats.rx_packets = sp->stats.rx_bytes = 0;
4910         for (i = 0; i < config->rx_ring_num; i++) {
4911                 sp->stats.rx_packets += mac_control->rings[i].rx_packets;
4912                 sp->stats.rx_bytes += mac_control->rings[i].rx_bytes;
4913         }
4914
4915         return (&sp->stats);
4916 }
4917
4918 /**
4919  *  s2io_set_multicast - entry point for multicast address enable/disable.
4920  *  @dev : pointer to the device structure
4921  *  Description:
4922  *  This function is a driver entry point which gets called by the kernel
4923  *  whenever multicast addresses must be enabled/disabled. This also gets
4924  *  called to set/reset promiscuous mode. Depending on the deivce flag, we
4925  *  determine, if multicast address must be enabled or if promiscuous mode
4926  *  is to be disabled etc.
4927  *  Return value:
4928  *  void.
4929  */
4930
4931 static void s2io_set_multicast(struct net_device *dev)
4932 {
4933         int i, j, prev_cnt;
4934         struct dev_mc_list *mclist;
4935         struct s2io_nic *sp = dev->priv;
4936         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4937         u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
4938             0xfeffffffffffULL;
4939         u64 dis_addr = S2IO_DISABLE_MAC_ENTRY, mac_addr = 0;
4940         void __iomem *add;
4941         struct config_param *config = &sp->config;
4942
4943         if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
4944                 /*  Enable all Multicast addresses */
4945                 writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
4946                        &bar0->rmac_addr_data0_mem);
4947                 writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
4948                        &bar0->rmac_addr_data1_mem);
4949                 val64 = RMAC_ADDR_CMD_MEM_WE |
4950                     RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4951                     RMAC_ADDR_CMD_MEM_OFFSET(config->max_mc_addr - 1);
4952                 writeq(val64, &bar0->rmac_addr_cmd_mem);
4953                 /* Wait till command completes */
4954                 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4955                                         RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4956                                         S2IO_BIT_RESET);
4957
4958                 sp->m_cast_flg = 1;
4959                 sp->all_multi_pos = config->max_mc_addr - 1;
4960         } else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
4961                 /*  Disable all Multicast addresses */
4962                 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4963                        &bar0->rmac_addr_data0_mem);
4964                 writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
4965                        &bar0->rmac_addr_data1_mem);
4966                 val64 = RMAC_ADDR_CMD_MEM_WE |
4967                     RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4968                     RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
4969                 writeq(val64, &bar0->rmac_addr_cmd_mem);
4970                 /* Wait till command completes */
4971                 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4972                                         RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4973                                         S2IO_BIT_RESET);
4974
4975                 sp->m_cast_flg = 0;
4976                 sp->all_multi_pos = 0;
4977         }
4978
4979         if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
4980                 /*  Put the NIC into promiscuous mode */
4981                 add = &bar0->mac_cfg;
4982                 val64 = readq(&bar0->mac_cfg);
4983                 val64 |= MAC_CFG_RMAC_PROM_ENABLE;
4984
4985                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4986                 writel((u32) val64, add);
4987                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4988                 writel((u32) (val64 >> 32), (add + 4));
4989
4990                 if (vlan_tag_strip != 1) {
4991                         val64 = readq(&bar0->rx_pa_cfg);
4992                         val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
4993                         writeq(val64, &bar0->rx_pa_cfg);
4994                         vlan_strip_flag = 0;
4995                 }
4996
4997                 val64 = readq(&bar0->mac_cfg);
4998                 sp->promisc_flg = 1;
4999                 DBG_PRINT(INFO_DBG, "%s: entered promiscuous mode\n",
5000                           dev->name);
5001         } else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
5002                 /*  Remove the NIC from promiscuous mode */
5003                 add = &bar0->mac_cfg;
5004                 val64 = readq(&bar0->mac_cfg);
5005                 val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
5006
5007                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
5008                 writel((u32) val64, add);
5009                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
5010                 writel((u32) (val64 >> 32), (add + 4));
5011
5012                 if (vlan_tag_strip != 0) {
5013                         val64 = readq(&bar0->rx_pa_cfg);
5014                         val64 |= RX_PA_CFG_STRIP_VLAN_TAG;
5015                         writeq(val64, &bar0->rx_pa_cfg);
5016                         vlan_strip_flag = 1;
5017                 }
5018
5019                 val64 = readq(&bar0->mac_cfg);
5020                 sp->promisc_flg = 0;
5021                 DBG_PRINT(INFO_DBG, "%s: left promiscuous mode\n",
5022                           dev->name);
5023         }
5024
5025         /*  Update individual M_CAST address list */
5026         if ((!sp->m_cast_flg) && dev->mc_count) {
5027                 if (dev->mc_count >
5028                     (config->max_mc_addr - config->max_mac_addr)) {
5029                         DBG_PRINT(ERR_DBG, "%s: No more Rx filters ",
5030                                   dev->name);
5031                         DBG_PRINT(ERR_DBG, "can be added, please enable ");
5032                         DBG_PRINT(ERR_DBG, "ALL_MULTI instead\n");
5033                         return;
5034                 }
5035
5036                 prev_cnt = sp->mc_addr_count;
5037                 sp->mc_addr_count = dev->mc_count;
5038
5039                 /* Clear out the previous list of Mc in the H/W. */
5040                 for (i = 0; i < prev_cnt; i++) {
5041                         writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
5042                                &bar0->rmac_addr_data0_mem);
5043                         writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
5044                                 &bar0->rmac_addr_data1_mem);
5045                         val64 = RMAC_ADDR_CMD_MEM_WE |
5046                             RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5047                             RMAC_ADDR_CMD_MEM_OFFSET
5048                             (config->mc_start_offset + i);
5049                         writeq(val64, &bar0->rmac_addr_cmd_mem);
5050
5051                         /* Wait for command completes */
5052                         if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5053                                         RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5054                                         S2IO_BIT_RESET)) {
5055                                 DBG_PRINT(ERR_DBG, "%s: Adding ",
5056                                           dev->name);
5057                                 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
5058                                 return;
5059                         }
5060                 }
5061
5062                 /* Create the new Rx filter list and update the same in H/W. */
5063                 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
5064                      i++, mclist = mclist->next) {
5065                         memcpy(sp->usr_addrs[i].addr, mclist->dmi_addr,
5066                                ETH_ALEN);
5067                         mac_addr = 0;
5068                         for (j = 0; j < ETH_ALEN; j++) {
5069                                 mac_addr |= mclist->dmi_addr[j];
5070                                 mac_addr <<= 8;
5071                         }
5072                         mac_addr >>= 8;
5073                         writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
5074                                &bar0->rmac_addr_data0_mem);
5075                         writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
5076                                 &bar0->rmac_addr_data1_mem);
5077                         val64 = RMAC_ADDR_CMD_MEM_WE |
5078                             RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5079                             RMAC_ADDR_CMD_MEM_OFFSET
5080                             (i + config->mc_start_offset);
5081                         writeq(val64, &bar0->rmac_addr_cmd_mem);
5082
5083                         /* Wait for command completes */
5084                         if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5085                                         RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5086                                         S2IO_BIT_RESET)) {
5087                                 DBG_PRINT(ERR_DBG, "%s: Adding ",
5088                                           dev->name);
5089                                 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
5090                                 return;
5091                         }
5092                 }
5093         }
5094 }
5095
5096 /* read from CAM unicast & multicast addresses and store it in
5097  * def_mac_addr structure
5098  */
5099 void do_s2io_store_unicast_mc(struct s2io_nic *sp)
5100 {
5101         int offset;
5102         u64 mac_addr = 0x0;
5103         struct config_param *config = &sp->config;
5104
5105         /* store unicast & multicast mac addresses */
5106         for (offset = 0; offset < config->max_mc_addr; offset++) {
5107                 mac_addr = do_s2io_read_unicast_mc(sp, offset);
5108                 /* if read fails disable the entry */
5109                 if (mac_addr == FAILURE)
5110                         mac_addr = S2IO_DISABLE_MAC_ENTRY;
5111                 do_s2io_copy_mac_addr(sp, offset, mac_addr);
5112         }
5113 }
5114
5115 /* restore unicast & multicast MAC to CAM from def_mac_addr structure */
5116 static void do_s2io_restore_unicast_mc(struct s2io_nic *sp)
5117 {
5118         int offset;
5119         struct config_param *config = &sp->config;
5120         /* restore unicast mac address */
5121         for (offset = 0; offset < config->max_mac_addr; offset++)
5122                 do_s2io_prog_unicast(sp->dev,
5123                         sp->def_mac_addr[offset].mac_addr);
5124
5125         /* restore multicast mac address */
5126         for (offset = config->mc_start_offset;
5127                 offset < config->max_mc_addr; offset++)
5128                 do_s2io_add_mc(sp, sp->def_mac_addr[offset].mac_addr);
5129 }
5130
5131 /* add a multicast MAC address to CAM */
5132 static int do_s2io_add_mc(struct s2io_nic *sp, u8 *addr)
5133 {
5134         int i;
5135         u64 mac_addr = 0;
5136         struct config_param *config = &sp->config;
5137
5138         for (i = 0; i < ETH_ALEN; i++) {
5139                 mac_addr <<= 8;
5140                 mac_addr |= addr[i];
5141         }
5142         if ((0ULL == mac_addr) || (mac_addr == S2IO_DISABLE_MAC_ENTRY))
5143                 return SUCCESS;
5144
5145         /* check if the multicast mac already preset in CAM */
5146         for (i = config->mc_start_offset; i < config->max_mc_addr; i++) {
5147                 u64 tmp64;
5148                 tmp64 = do_s2io_read_unicast_mc(sp, i);
5149                 if (tmp64 == S2IO_DISABLE_MAC_ENTRY) /* CAM entry is empty */
5150                         break;
5151
5152                 if (tmp64 == mac_addr)
5153                         return SUCCESS;
5154         }
5155         if (i == config->max_mc_addr) {
5156                 DBG_PRINT(ERR_DBG,
5157                         "CAM full no space left for multicast MAC\n");
5158                 return FAILURE;
5159         }
5160         /* Update the internal structure with this new mac address */
5161         do_s2io_copy_mac_addr(sp, i, mac_addr);
5162
5163         return (do_s2io_add_mac(sp, mac_addr, i));
5164 }
5165
5166 /* add MAC address to CAM */
5167 static int do_s2io_add_mac(struct s2io_nic *sp, u64 addr, int off)
5168 {
5169         u64 val64;
5170         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5171
5172         writeq(RMAC_ADDR_DATA0_MEM_ADDR(addr),
5173                 &bar0->rmac_addr_data0_mem);
5174
5175         val64 =
5176                 RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5177                 RMAC_ADDR_CMD_MEM_OFFSET(off);
5178         writeq(val64, &bar0->rmac_addr_cmd_mem);
5179
5180         /* Wait till command completes */
5181         if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5182                 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5183                 S2IO_BIT_RESET)) {
5184                 DBG_PRINT(INFO_DBG, "do_s2io_add_mac failed\n");
5185                 return FAILURE;
5186         }
5187         return SUCCESS;
5188 }
5189 /* deletes a specified unicast/multicast mac entry from CAM */
5190 static int do_s2io_delete_unicast_mc(struct s2io_nic *sp, u64 addr)
5191 {
5192         int offset;
5193         u64 dis_addr = S2IO_DISABLE_MAC_ENTRY, tmp64;
5194         struct config_param *config = &sp->config;
5195
5196         for (offset = 1;
5197                 offset < config->max_mc_addr; offset++) {
5198                 tmp64 = do_s2io_read_unicast_mc(sp, offset);
5199                 if (tmp64 == addr) {
5200                         /* disable the entry by writing  0xffffffffffffULL */
5201                         if (do_s2io_add_mac(sp, dis_addr, offset) ==  FAILURE)
5202                                 return FAILURE;
5203                         /* store the new mac list from CAM */
5204                         do_s2io_store_unicast_mc(sp);
5205                         return SUCCESS;
5206                 }
5207         }
5208         DBG_PRINT(ERR_DBG, "MAC address 0x%llx not found in CAM\n",
5209                         (unsigned long long)addr);
5210         return FAILURE;
5211 }
5212
5213 /* read mac entries from CAM */
5214 static u64 do_s2io_read_unicast_mc(struct s2io_nic *sp, int offset)
5215 {
5216         u64 tmp64 = 0xffffffffffff0000ULL, val64;
5217         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5218
5219         /* read mac addr */
5220         val64 =
5221                 RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5222                 RMAC_ADDR_CMD_MEM_OFFSET(offset);
5223         writeq(val64, &bar0->rmac_addr_cmd_mem);
5224
5225         /* Wait till command completes */
5226         if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5227                 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5228                 S2IO_BIT_RESET)) {
5229                 DBG_PRINT(INFO_DBG, "do_s2io_read_unicast_mc failed\n");
5230                 return FAILURE;
5231         }
5232         tmp64 = readq(&bar0->rmac_addr_data0_mem);
5233         return (tmp64 >> 16);
5234 }
5235
5236 /**
5237  * s2io_set_mac_addr driver entry point
5238  */
5239
5240 static int s2io_set_mac_addr(struct net_device *dev, void *p)
5241 {
5242         struct sockaddr *addr = p;
5243
5244         if (!is_valid_ether_addr(addr->sa_data))
5245                 return -EINVAL;
5246
5247         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5248
5249         /* store the MAC address in CAM */
5250         return (do_s2io_prog_unicast(dev, dev->dev_addr));
5251 }
5252 /**
5253  *  do_s2io_prog_unicast - Programs the Xframe mac address
5254  *  @dev : pointer to the device structure.
5255  *  @addr: a uchar pointer to the new mac address which is to be set.
5256  *  Description : This procedure will program the Xframe to receive
5257  *  frames with new Mac Address
5258  *  Return value: SUCCESS on success and an appropriate (-)ve integer
5259  *  as defined in errno.h file on failure.
5260  */
5261
5262 static int do_s2io_prog_unicast(struct net_device *dev, u8 *addr)
5263 {
5264         struct s2io_nic *sp = dev->priv;
5265         register u64 mac_addr = 0, perm_addr = 0;
5266         int i;
5267         u64 tmp64;
5268         struct config_param *config = &sp->config;
5269
5270         /*
5271         * Set the new MAC address as the new unicast filter and reflect this
5272         * change on the device address registered with the OS. It will be
5273         * at offset 0.
5274         */
5275         for (i = 0; i < ETH_ALEN; i++) {
5276                 mac_addr <<= 8;
5277                 mac_addr |= addr[i];
5278                 perm_addr <<= 8;
5279                 perm_addr |= sp->def_mac_addr[0].mac_addr[i];
5280         }
5281
5282         /* check if the dev_addr is different than perm_addr */
5283         if (mac_addr == perm_addr)
5284                 return SUCCESS;
5285
5286         /* check if the mac already preset in CAM */
5287         for (i = 1; i < config->max_mac_addr; i++) {
5288                 tmp64 = do_s2io_read_unicast_mc(sp, i);
5289                 if (tmp64 == S2IO_DISABLE_MAC_ENTRY) /* CAM entry is empty */
5290                         break;
5291
5292                 if (tmp64 == mac_addr) {
5293                         DBG_PRINT(INFO_DBG,
5294                         "MAC addr:0x%llx already present in CAM\n",
5295                         (unsigned long long)mac_addr);
5296                         return SUCCESS;
5297                 }
5298         }
5299         if (i == config->max_mac_addr) {
5300                 DBG_PRINT(ERR_DBG, "CAM full no space left for Unicast MAC\n");
5301                 return FAILURE;
5302         }
5303         /* Update the internal structure with this new mac address */
5304         do_s2io_copy_mac_addr(sp, i, mac_addr);
5305         return (do_s2io_add_mac(sp, mac_addr, i));
5306 }
5307
5308 /**
5309  * s2io_ethtool_sset - Sets different link parameters.
5310  * @sp : private member of the device structure, which is a pointer to the  * s2io_nic structure.
5311  * @info: pointer to the structure with parameters given by ethtool to set
5312  * link information.
5313  * Description:
5314  * The function sets different link parameters provided by the user onto
5315  * the NIC.
5316  * Return value:
5317  * 0 on success.
5318 */
5319
5320 static int s2io_ethtool_sset(struct net_device *dev,
5321                              struct ethtool_cmd *info)
5322 {
5323         struct s2io_nic *sp = dev->priv;
5324         if ((info->autoneg == AUTONEG_ENABLE) ||
5325             (info->speed != SPEED_10000) || (info->duplex != DUPLEX_FULL))
5326                 return -EINVAL;
5327         else {
5328                 s2io_close(sp->dev);
5329                 s2io_open(sp->dev);
5330         }
5331
5332         return 0;
5333 }
5334
5335 /**
5336  * s2io_ethtol_gset - Return link specific information.
5337  * @sp : private member of the device structure, pointer to the
5338  *      s2io_nic structure.
5339  * @info : pointer to the structure with parameters given by ethtool
5340  * to return link information.
5341  * Description:
5342  * Returns link specific information like speed, duplex etc.. to ethtool.
5343  * Return value :
5344  * return 0 on success.
5345  */
5346
5347 static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
5348 {
5349         struct s2io_nic *sp = dev->priv;
5350         info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
5351         info->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
5352         info->port = PORT_FIBRE;
5353
5354         /* info->transceiver */
5355         info->transceiver = XCVR_EXTERNAL;
5356
5357         if (netif_carrier_ok(sp->dev)) {
5358                 info->speed = 10000;
5359                 info->duplex = DUPLEX_FULL;
5360         } else {
5361                 info->speed = -1;
5362                 info->duplex = -1;
5363         }
5364
5365         info->autoneg = AUTONEG_DISABLE;
5366         return 0;
5367 }
5368
5369 /**
5370  * s2io_ethtool_gdrvinfo - Returns driver specific information.
5371  * @sp : private member of the device structure, which is a pointer to the
5372  * s2io_nic structure.
5373  * @info : pointer to the structure with parameters given by ethtool to
5374  * return driver information.
5375  * Description:
5376  * Returns driver specefic information like name, version etc.. to ethtool.
5377  * Return value:
5378  *  void
5379  */
5380
5381 static void s2io_ethtool_gdrvinfo(struct net_device *dev,
5382                                   struct ethtool_drvinfo *info)
5383 {
5384         struct s2io_nic *sp = dev->priv;
5385
5386         strncpy(info->driver, s2io_driver_name, sizeof(info->driver));
5387         strncpy(info->version, s2io_driver_version, sizeof(info->version));
5388         strncpy(info->fw_version, "", sizeof(info->fw_version));
5389         strncpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info));
5390         info->regdump_len = XENA_REG_SPACE;
5391         info->eedump_len = XENA_EEPROM_SPACE;
5392 }
5393
5394 /**
5395  *  s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
5396  *  @sp: private member of the device structure, which is a pointer to the
5397  *  s2io_nic structure.
5398  *  @regs : pointer to the structure with parameters given by ethtool for
5399  *  dumping the registers.
5400  *  @reg_space: The input argumnet into which all the registers are dumped.
5401  *  Description:
5402  *  Dumps the entire register space of xFrame NIC into the user given
5403  *  buffer area.
5404  * Return value :
5405  * void .
5406 */
5407
5408 static void s2io_ethtool_gregs(struct net_device *dev,
5409                                struct ethtool_regs *regs, void *space)
5410 {
5411         int i;
5412         u64 reg;
5413         u8 *reg_space = (u8 *) space;
5414         struct s2io_nic *sp = dev->priv;
5415
5416         regs->len = XENA_REG_SPACE;
5417         regs->version = sp->pdev->subsystem_device;
5418
5419         for (i = 0; i < regs->len; i += 8) {
5420                 reg = readq(sp->bar0 + i);
5421                 memcpy((reg_space + i), &reg, 8);
5422         }
5423 }
5424
5425 /**
5426  *  s2io_phy_id  - timer function that alternates adapter LED.
5427  *  @data : address of the private member of the device structure, which
5428  *  is a pointer to the s2io_nic structure, provided as an u32.
5429  * Description: This is actually the timer function that alternates the
5430  * adapter LED bit of the adapter control bit to set/reset every time on
5431  * invocation. The timer is set for 1/2 a second, hence tha NIC blinks
5432  *  once every second.
5433 */
5434 static void s2io_phy_id(unsigned long data)
5435 {
5436         struct s2io_nic *sp = (struct s2io_nic *) data;
5437         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5438         u64 val64 = 0;
5439         u16 subid;
5440
5441         subid = sp->pdev->subsystem_device;
5442         if ((sp->device_type == XFRAME_II_DEVICE) ||
5443                    ((subid & 0xFF) >= 0x07)) {
5444                 val64 = readq(&bar0->gpio_control);
5445                 val64 ^= GPIO_CTRL_GPIO_0;
5446                 writeq(val64, &bar0->gpio_control);
5447         } else {
5448                 val64 = readq(&bar0->adapter_control);
5449                 val64 ^= ADAPTER_LED_ON;
5450                 writeq(val64, &bar0->adapter_control);
5451         }
5452
5453         mod_timer(&sp->id_timer, jiffies + HZ / 2);
5454 }
5455
5456 /**
5457  * s2io_ethtool_idnic - To physically identify the nic on the system.
5458  * @sp : private member of the device structure, which is a pointer to the
5459  * s2io_nic structure.
5460  * @id : pointer to the structure with identification parameters given by
5461  * ethtool.
5462  * Description: Used to physically identify the NIC on the system.
5463  * The Link LED will blink for a time specified by the user for
5464  * identification.
5465  * NOTE: The Link has to be Up to be able to blink the LED. Hence
5466  * identification is possible only if it's link is up.
5467  * Return value:
5468  * int , returns 0 on success
5469  */
5470
5471 static int s2io_ethtool_idnic(struct net_device *dev, u32 data)
5472 {
5473         u64 val64 = 0, last_gpio_ctrl_val;
5474         struct s2io_nic *sp = dev->priv;
5475         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5476         u16 subid;
5477
5478         subid = sp->pdev->subsystem_device;
5479         last_gpio_ctrl_val = readq(&bar0->gpio_control);
5480         if ((sp->device_type == XFRAME_I_DEVICE) &&
5481                 ((subid & 0xFF) < 0x07)) {
5482                 val64 = readq(&bar0->adapter_control);
5483                 if (!(val64 & ADAPTER_CNTL_EN)) {
5484                         printk(KERN_ERR
5485                                "Adapter Link down, cannot blink LED\n");
5486                         return -EFAULT;
5487                 }
5488         }
5489         if (sp->id_timer.function == NULL) {
5490                 init_timer(&sp->id_timer);
5491                 sp->id_timer.function = s2io_phy_id;
5492                 sp->id_timer.data = (unsigned long) sp;
5493         }
5494         mod_timer(&sp->id_timer, jiffies);
5495         if (data)
5496                 msleep_interruptible(data * HZ);
5497         else
5498                 msleep_interruptible(MAX_FLICKER_TIME);
5499         del_timer_sync(&sp->id_timer);
5500
5501         if (CARDS_WITH_FAULTY_LINK_INDICATORS(sp->device_type, subid)) {
5502                 writeq(last_gpio_ctrl_val, &bar0->gpio_control);
5503                 last_gpio_ctrl_val = readq(&bar0->gpio_control);
5504         }
5505
5506         return 0;
5507 }
5508
5509 static void s2io_ethtool_gringparam(struct net_device *dev,
5510                                     struct ethtool_ringparam *ering)
5511 {
5512         struct s2io_nic *sp = dev->priv;
5513         int i,tx_desc_count=0,rx_desc_count=0;
5514
5515         if (sp->rxd_mode == RXD_MODE_1)
5516                 ering->rx_max_pending = MAX_RX_DESC_1;
5517         else if (sp->rxd_mode == RXD_MODE_3B)
5518                 ering->rx_max_pending = MAX_RX_DESC_2;
5519
5520         ering->tx_max_pending = MAX_TX_DESC;
5521         for (i = 0 ; i < sp->config.tx_fifo_num ; i++)
5522                 tx_desc_count += sp->config.tx_cfg[i].fifo_len;
5523
5524         DBG_PRINT(INFO_DBG,"\nmax txds : %d\n",sp->config.max_txds);
5525         ering->tx_pending = tx_desc_count;
5526         rx_desc_count = 0;
5527         for (i = 0 ; i < sp->config.rx_ring_num ; i++)
5528                 rx_desc_count += sp->config.rx_cfg[i].num_rxd;
5529
5530         ering->rx_pending = rx_desc_count;
5531
5532         ering->rx_mini_max_pending = 0;
5533         ering->rx_mini_pending = 0;
5534         if(sp->rxd_mode == RXD_MODE_1)
5535                 ering->rx_jumbo_max_pending = MAX_RX_DESC_1;
5536         else if (sp->rxd_mode == RXD_MODE_3B)
5537                 ering->rx_jumbo_max_pending = MAX_RX_DESC_2;
5538         ering->rx_jumbo_pending = rx_desc_count;
5539 }
5540
5541 /**
5542  * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
5543  * @sp : private member of the device structure, which is a pointer to the
5544  *      s2io_nic structure.
5545  * @ep : pointer to the structure with pause parameters given by ethtool.
5546  * Description:
5547  * Returns the Pause frame generation and reception capability of the NIC.
5548  * Return value:
5549  *  void
5550  */
5551 static void s2io_ethtool_getpause_data(struct net_device *dev,
5552                                        struct ethtool_pauseparam *ep)
5553 {
5554         u64 val64;
5555         struct s2io_nic *sp = dev->priv;
5556         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5557
5558         val64 = readq(&bar0->rmac_pause_cfg);
5559         if (val64 & RMAC_PAUSE_GEN_ENABLE)
5560                 ep->tx_pause = TRUE;
5561         if (val64 & RMAC_PAUSE_RX_ENABLE)
5562                 ep->rx_pause = TRUE;
5563         ep->autoneg = FALSE;
5564 }
5565
5566 /**
5567  * s2io_ethtool_setpause_data -  set/reset pause frame generation.
5568  * @sp : private member of the device structure, which is a pointer to the
5569  *      s2io_nic structure.
5570  * @ep : pointer to the structure with pause parameters given by ethtool.
5571  * Description:
5572  * It can be used to set or reset Pause frame generation or reception
5573  * support of the NIC.
5574  * Return value:
5575  * int, returns 0 on Success
5576  */
5577
5578 static int s2io_ethtool_setpause_data(struct net_device *dev,
5579                                struct ethtool_pauseparam *ep)
5580 {
5581         u64 val64;
5582         struct s2io_nic *sp = dev->priv;
5583         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5584
5585         val64 = readq(&bar0->rmac_pause_cfg);
5586         if (ep->tx_pause)
5587                 val64 |= RMAC_PAUSE_GEN_ENABLE;
5588         else
5589                 val64 &= ~RMAC_PAUSE_GEN_ENABLE;
5590         if (ep->rx_pause)
5591                 val64 |= RMAC_PAUSE_RX_ENABLE;
5592         else
5593                 val64 &= ~RMAC_PAUSE_RX_ENABLE;
5594         writeq(val64, &bar0->rmac_pause_cfg);
5595         return 0;
5596 }
5597
5598 /**
5599  * read_eeprom - reads 4 bytes of data from user given offset.
5600  * @sp : private member of the device structure, which is a pointer to the
5601  *      s2io_nic structure.
5602  * @off : offset at which the data must be written
5603  * @data : Its an output parameter where the data read at the given
5604  *      offset is stored.
5605  * Description:
5606  * Will read 4 bytes of data from the user given offset and return the
5607  * read data.
5608  * NOTE: Will allow to read only part of the EEPROM visible through the
5609  *   I2C bus.
5610  * Return value:
5611  *  -1 on failure and 0 on success.
5612  */
5613
5614 #define S2IO_DEV_ID             5
5615 static int read_eeprom(struct s2io_nic * sp, int off, u64 * data)
5616 {
5617         int ret = -1;
5618         u32 exit_cnt = 0;
5619         u64 val64;
5620         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5621
5622         if (sp->device_type == XFRAME_I_DEVICE) {
5623                 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
5624                     I2C_CONTROL_BYTE_CNT(0x3) | I2C_CONTROL_READ |
5625                     I2C_CONTROL_CNTL_START;
5626                 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
5627
5628                 while (exit_cnt < 5) {
5629                         val64 = readq(&bar0->i2c_control);
5630                         if (I2C_CONTROL_CNTL_END(val64)) {
5631                                 *data = I2C_CONTROL_GET_DATA(val64);
5632                                 ret = 0;
5633                                 break;
5634                         }
5635                         msleep(50);
5636                         exit_cnt++;
5637                 }
5638         }
5639
5640         if (sp->device_type == XFRAME_II_DEVICE) {
5641                 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
5642                         SPI_CONTROL_BYTECNT(0x3) |
5643                         SPI_CONTROL_CMD(0x3) | SPI_CONTROL_ADDR(off);
5644                 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5645                 val64 |= SPI_CONTROL_REQ;
5646                 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5647                 while (exit_cnt < 5) {
5648                         val64 = readq(&bar0->spi_control);
5649                         if (val64 & SPI_CONTROL_NACK) {
5650                                 ret = 1;
5651                                 break;
5652                         } else if (val64 & SPI_CONTROL_DONE) {
5653                                 *data = readq(&bar0->spi_data);
5654                                 *data &= 0xffffff;
5655                                 ret = 0;
5656                                 break;
5657                         }
5658                         msleep(50);
5659                         exit_cnt++;
5660                 }
5661         }
5662         return ret;
5663 }
5664
5665 /**
5666  *  write_eeprom - actually writes the relevant part of the data value.
5667  *  @sp : private member of the device structure, which is a pointer to the
5668  *       s2io_nic structure.
5669  *  @off : offset at which the data must be written
5670  *  @data : The data that is to be written
5671  *  @cnt : Number of bytes of the data that are actually to be written into
5672  *  the Eeprom. (max of 3)
5673  * Description:
5674  *  Actually writes the relevant part of the data value into the Eeprom
5675  *  through the I2C bus.
5676  * Return value:
5677  *  0 on success, -1 on failure.
5678  */
5679
5680 static int write_eeprom(struct s2io_nic * sp, int off, u64 data, int cnt)
5681 {
5682         int exit_cnt = 0, ret = -1;
5683         u64 val64;
5684         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5685
5686         if (sp->device_type == XFRAME_I_DEVICE) {
5687                 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
5688                     I2C_CONTROL_BYTE_CNT(cnt) | I2C_CONTROL_SET_DATA((u32)data) |
5689                     I2C_CONTROL_CNTL_START;
5690                 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
5691
5692                 while (exit_cnt < 5) {
5693                         val64 = readq(&bar0->i2c_control);
5694                         if (I2C_CONTROL_CNTL_END(val64)) {
5695                                 if (!(val64 & I2C_CONTROL_NACK))
5696                                         ret = 0;
5697                                 break;
5698                         }
5699                         msleep(50);
5700                         exit_cnt++;
5701                 }
5702         }
5703
5704         if (sp->device_type == XFRAME_II_DEVICE) {
5705                 int write_cnt = (cnt == 8) ? 0 : cnt;
5706                 writeq(SPI_DATA_WRITE(data,(cnt<<3)), &bar0->spi_data);
5707
5708                 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
5709                         SPI_CONTROL_BYTECNT(write_cnt) |
5710                         SPI_CONTROL_CMD(0x2) | SPI_CONTROL_ADDR(off);
5711                 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5712                 val64 |= SPI_CONTROL_REQ;
5713                 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5714                 while (exit_cnt < 5) {
5715                         val64 = readq(&bar0->spi_control);
5716                         if (val64 & SPI_CONTROL_NACK) {
5717                                 ret = 1;
5718                                 break;
5719                         } else if (val64 & SPI_CONTROL_DONE) {
5720                                 ret = 0;
5721                                 break;
5722                         }
5723                         msleep(50);
5724                         exit_cnt++;
5725                 }
5726         }
5727         return ret;
5728 }
5729 static void s2io_vpd_read(struct s2io_nic *nic)
5730 {
5731         u8 *vpd_data;
5732         u8 data;
5733         int i=0, cnt, fail = 0;
5734         int vpd_addr = 0x80;
5735
5736         if (nic->device_type == XFRAME_II_DEVICE) {
5737                 strcpy(nic->product_name, "Xframe II 10GbE network adapter");
5738                 vpd_addr = 0x80;
5739         }
5740         else {
5741                 strcpy(nic->product_name, "Xframe I 10GbE network adapter");
5742                 vpd_addr = 0x50;
5743         }
5744         strcpy(nic->serial_num, "NOT AVAILABLE");
5745
5746         vpd_data = kmalloc(256, GFP_KERNEL);
5747         if (!vpd_data) {
5748                 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
5749                 return;
5750         }
5751         nic->mac_control.stats_info->sw_stat.mem_allocated += 256;
5752
5753         for (i = 0; i < 256; i +=4 ) {
5754                 pci_write_config_byte(nic->pdev, (vpd_addr + 2), i);
5755                 pci_read_config_byte(nic->pdev,  (vpd_addr + 2), &data);
5756                 pci_write_config_byte(nic->pdev, (vpd_addr + 3), 0);
5757                 for (cnt = 0; cnt <5; cnt++) {
5758                         msleep(2);
5759                         pci_read_config_byte(nic->pdev, (vpd_addr + 3), &data);
5760                         if (data == 0x80)
5761                                 break;
5762                 }
5763                 if (cnt >= 5) {
5764                         DBG_PRINT(ERR_DBG, "Read of VPD data failed\n");
5765                         fail = 1;
5766                         break;
5767                 }
5768                 pci_read_config_dword(nic->pdev,  (vpd_addr + 4),
5769                                       (u32 *)&vpd_data[i]);
5770         }
5771
5772         if(!fail) {
5773                 /* read serial number of adapter */
5774                 for (cnt = 0; cnt < 256; cnt++) {
5775                 if ((vpd_data[cnt] == 'S') &&
5776                         (vpd_data[cnt+1] == 'N') &&
5777                         (vpd_data[cnt+2] < VPD_STRING_LEN)) {
5778                                 memset(nic->serial_num, 0, VPD_STRING_LEN);
5779                                 memcpy(nic->serial_num, &vpd_data[cnt + 3],
5780                                         vpd_data[cnt+2]);
5781                                 break;
5782                         }
5783                 }
5784         }
5785
5786         if ((!fail) && (vpd_data[1] < VPD_STRING_LEN)) {
5787                 memset(nic->product_name, 0, vpd_data[1]);
5788                 memcpy(nic->product_name, &vpd_data[3], vpd_data[1]);
5789         }
5790         kfree(vpd_data);
5791         nic->mac_control.stats_info->sw_stat.mem_freed += 256;
5792 }
5793
5794 /**
5795  *  s2io_ethtool_geeprom  - reads the value stored in the Eeprom.
5796  *  @sp : private member of the device structure, which is a pointer to the *       s2io_nic structure.
5797  *  @eeprom : pointer to the user level structure provided by ethtool,
5798  *  containing all relevant information.
5799  *  @data_buf : user defined value to be written into Eeprom.
5800  *  Description: Reads the values stored in the Eeprom at given offset
5801  *  for a given length. Stores these values int the input argument data
5802  *  buffer 'data_buf' and returns these to the caller (ethtool.)
5803  *  Return value:
5804  *  int  0 on success
5805  */
5806
5807 static int s2io_ethtool_geeprom(struct net_device *dev,
5808                          struct ethtool_eeprom *eeprom, u8 * data_buf)
5809 {
5810         u32 i, valid;
5811         u64 data;
5812         struct s2io_nic *sp = dev->priv;
5813
5814         eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
5815
5816         if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
5817                 eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
5818
5819         for (i = 0; i < eeprom->len; i += 4) {
5820                 if (read_eeprom(sp, (eeprom->offset + i), &data)) {
5821                         DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
5822                         return -EFAULT;
5823                 }
5824                 valid = INV(data);
5825                 memcpy((data_buf + i), &valid, 4);
5826         }
5827         return 0;
5828 }
5829
5830 /**
5831  *  s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
5832  *  @sp : private member of the device structure, which is a pointer to the
5833  *  s2io_nic structure.
5834  *  @eeprom : pointer to the user level structure provided by ethtool,
5835  *  containing all relevant information.
5836  *  @data_buf ; user defined value to be written into Eeprom.
5837  *  Description:
5838  *  Tries to write the user provided value in the Eeprom, at the offset
5839  *  given by the user.
5840  *  Return value:
5841  *  0 on success, -EFAULT on failure.
5842  */
5843
5844 static int s2io_ethtool_seeprom(struct net_device *dev,
5845                                 struct ethtool_eeprom *eeprom,
5846                                 u8 * data_buf)
5847 {
5848         int len = eeprom->len, cnt = 0;
5849         u64 valid = 0, data;
5850         struct s2io_nic *sp = dev->priv;
5851
5852         if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
5853                 DBG_PRINT(ERR_DBG,
5854                           "ETHTOOL_WRITE_EEPROM Err: Magic value ");
5855                 DBG_PRINT(ERR_DBG, "is wrong, Its not 0x%x\n",
5856                           eeprom->magic);
5857                 return -EFAULT;
5858         }
5859
5860         while (len) {
5861                 data = (u32) data_buf[cnt] & 0x000000FF;
5862                 if (data) {
5863                         valid = (u32) (data << 24);
5864                 } else
5865                         valid = data;
5866
5867                 if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
5868                         DBG_PRINT(ERR_DBG,
5869                                   "ETHTOOL_WRITE_EEPROM Err: Cannot ");
5870                         DBG_PRINT(ERR_DBG,
5871                                   "write into the specified offset\n");
5872                         return -EFAULT;
5873                 }
5874                 cnt++;
5875                 len--;
5876         }
5877
5878         return 0;
5879 }
5880
5881 /**
5882  * s2io_register_test - reads and writes into all clock domains.
5883  * @sp : private member of the device structure, which is a pointer to the
5884  * s2io_nic structure.
5885  * @data : variable that returns the result of each of the test conducted b
5886  * by the driver.
5887  * Description:
5888  * Read and write into all clock domains. The NIC has 3 clock domains,
5889  * see that registers in all the three regions are accessible.
5890  * Return value:
5891  * 0 on success.
5892  */
5893
5894 static int s2io_register_test(struct s2io_nic * sp, uint64_t * data)
5895 {
5896         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5897         u64 val64 = 0, exp_val;
5898         int fail = 0;
5899
5900         val64 = readq(&bar0->pif_rd_swapper_fb);
5901         if (val64 != 0x123456789abcdefULL) {
5902                 fail = 1;
5903                 DBG_PRINT(INFO_DBG, "Read Test level 1 fails\n");
5904         }
5905
5906         val64 = readq(&bar0->rmac_pause_cfg);
5907         if (val64 != 0xc000ffff00000000ULL) {
5908                 fail = 1;
5909                 DBG_PRINT(INFO_DBG, "Read Test level 2 fails\n");
5910         }
5911
5912         val64 = readq(&bar0->rx_queue_cfg);
5913         if (sp->device_type == XFRAME_II_DEVICE)
5914                 exp_val = 0x0404040404040404ULL;
5915         else
5916                 exp_val = 0x0808080808080808ULL;
5917         if (val64 != exp_val) {
5918                 fail = 1;
5919                 DBG_PRINT(INFO_DBG, "Read Test level 3 fails\n");
5920         }
5921
5922         val64 = readq(&bar0->xgxs_efifo_cfg);
5923         if (val64 != 0x000000001923141EULL) {
5924                 fail = 1;
5925                 DBG_PRINT(INFO_DBG, "Read Test level 4 fails\n");
5926         }
5927
5928         val64 = 0x5A5A5A5A5A5A5A5AULL;
5929         writeq(val64, &bar0->xmsi_data);
5930         val64 = readq(&bar0->xmsi_data);
5931         if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
5932                 fail = 1;
5933                 DBG_PRINT(ERR_DBG, "Write Test level 1 fails\n");
5934         }
5935
5936         val64 = 0xA5A5A5A5A5A5A5A5ULL;
5937         writeq(val64, &bar0->xmsi_data);
5938         val64 = readq(&bar0->xmsi_data);
5939         if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
5940                 fail = 1;
5941                 DBG_PRINT(ERR_DBG, "Write Test level 2 fails\n");
5942         }
5943
5944         *data = fail;
5945         return fail;
5946 }
5947
5948 /**
5949  * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
5950  * @sp : private member of the device structure, which is a pointer to the
5951  * s2io_nic structure.
5952  * @data:variable that returns the result of each of the test conducted by
5953  * the driver.
5954  * Description:
5955  * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
5956  * register.
5957  * Return value:
5958  * 0 on success.
5959  */
5960
5961 static int s2io_eeprom_test(struct s2io_nic * sp, uint64_t * data)
5962 {
5963         int fail = 0;
5964         u64 ret_data, org_4F0, org_7F0;
5965         u8 saved_4F0 = 0, saved_7F0 = 0;
5966         struct net_device *dev = sp->dev;
5967
5968         /* Test Write Error at offset 0 */
5969         /* Note that SPI interface allows write access to all areas
5970          * of EEPROM. Hence doing all negative testing only for Xframe I.
5971          */
5972         if (sp->device_type == XFRAME_I_DEVICE)
5973                 if (!write_eeprom(sp, 0, 0, 3))
5974                         fail = 1;
5975
5976         /* Save current values at offsets 0x4F0 and 0x7F0 */
5977         if (!read_eeprom(sp, 0x4F0, &org_4F0))
5978                 saved_4F0 = 1;
5979         if (!read_eeprom(sp, 0x7F0, &org_7F0))
5980                 saved_7F0 = 1;
5981
5982         /* Test Write at offset 4f0 */
5983         if (write_eeprom(sp, 0x4F0, 0x012345, 3))
5984                 fail = 1;
5985         if (read_eeprom(sp, 0x4F0, &ret_data))
5986                 fail = 1;
5987
5988         if (ret_data != 0x012345) {
5989                 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x4F0. "
5990                         "Data written %llx Data read %llx\n",
5991                         dev->name, (unsigned long long)0x12345,
5992                         (unsigned long long)ret_data);
5993                 fail = 1;
5994         }
5995
5996         /* Reset the EEPROM data go FFFF */
5997         write_eeprom(sp, 0x4F0, 0xFFFFFF, 3);
5998
5999         /* Test Write Request Error at offset 0x7c */
6000         if (sp->device_type == XFRAME_I_DEVICE)
6001                 if (!write_eeprom(sp, 0x07C, 0, 3))
6002                         fail = 1;
6003
6004         /* Test Write Request at offset 0x7f0 */
6005         if (write_eeprom(sp, 0x7F0, 0x012345, 3))
6006                 fail = 1;
6007         if (read_eeprom(sp, 0x7F0, &ret_data))
6008                 fail = 1;
6009
6010         if (ret_data != 0x012345) {
6011                 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x7F0. "
6012                         "Data written %llx Data read %llx\n",
6013                         dev->name, (unsigned long long)0x12345,
6014                         (unsigned long long)ret_data);
6015                 fail = 1;
6016         }
6017
6018         /* Reset the EEPROM data go FFFF */
6019         write_eeprom(sp, 0x7F0, 0xFFFFFF, 3);
6020
6021         if (sp->device_type == XFRAME_I_DEVICE) {
6022                 /* Test Write Error at offset 0x80 */
6023                 if (!write_eeprom(sp, 0x080, 0, 3))
6024                         fail = 1;
6025
6026                 /* Test Write Error at offset 0xfc */
6027                 if (!write_eeprom(sp, 0x0FC, 0, 3))
6028                         fail = 1;
6029
6030                 /* Test Write Error at offset 0x100 */
6031                 if (!write_eeprom(sp, 0x100, 0, 3))
6032                         fail = 1;
6033
6034                 /* Test Write Error at offset 4ec */
6035                 if (!write_eeprom(sp, 0x4EC, 0, 3))
6036                         fail = 1;
6037         }
6038
6039         /* Restore values at offsets 0x4F0 and 0x7F0 */
6040         if (saved_4F0)
6041                 write_eeprom(sp, 0x4F0, org_4F0, 3);
6042         if (saved_7F0)
6043                 write_eeprom(sp, 0x7F0, org_7F0, 3);
6044
6045         *data = fail;
6046         return fail;
6047 }
6048
6049 /**
6050  * s2io_bist_test - invokes the MemBist test of the card .
6051  * @sp : private member of the device structure, which is a pointer to the
6052  * s2io_nic structure.
6053  * @data:variable that returns the result of each of the test conducted by
6054  * the driver.
6055  * Description:
6056  * This invokes the MemBist test of the card. We give around
6057  * 2 secs time for the Test to complete. If it's still not complete
6058  * within this peiod, we consider that the test failed.
6059  * Return value:
6060  * 0 on success and -1 on failure.
6061  */
6062
6063 static int s2io_bist_test(struct s2io_nic * sp, uint64_t * data)
6064 {
6065         u8 bist = 0;
6066         int cnt = 0, ret = -1;
6067
6068         pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
6069         bist |= PCI_BIST_START;
6070         pci_write_config_word(sp->pdev, PCI_BIST, bist);
6071
6072         while (cnt < 20) {
6073                 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
6074                 if (!(bist & PCI_BIST_START)) {
6075                         *data = (bist & PCI_BIST_CODE_MASK);
6076                         ret = 0;
6077                         break;
6078                 }
6079                 msleep(100);
6080                 cnt++;
6081         }
6082
6083         return ret;
6084 }
6085
6086 /**
6087  * s2io-link_test - verifies the link state of the nic
6088  * @sp ; private member of the device structure, which is a pointer to the
6089  * s2io_nic structure.
6090  * @data: variable that returns the result of each of the test conducted by
6091  * the driver.
6092  * Description:
6093  * The function verifies the link state of the NIC and updates the input
6094  * argument 'data' appropriately.
6095  * Return value:
6096  * 0 on success.
6097  */
6098
6099 static int s2io_link_test(struct s2io_nic * sp, uint64_t * data)
6100 {
6101         struct XENA_dev_config __iomem *bar0 = sp->bar0;
6102         u64 val64;
6103
6104         val64 = readq(&bar0->adapter_status);
6105         if(!(LINK_IS_UP(val64)))
6106                 *data = 1;
6107         else
6108                 *data = 0;
6109
6110         return *data;
6111 }
6112
6113 /**
6114  * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
6115  * @sp - private member of the device structure, which is a pointer to the
6116  * s2io_nic structure.
6117  * @data - variable that returns the result of each of the test
6118  * conducted by the driver.
6119  * Description:
6120  *  This is one of the offline test that tests the read and write
6121  *  access to the RldRam chip on the NIC.
6122  * Return value:
6123  *  0 on success.
6124  */
6125
6126 static int s2io_rldram_test(struct s2io_nic * sp, uint64_t * data)
6127 {
6128         struct XENA_dev_config __iomem *bar0 = sp->bar0;
6129         u64 val64;
6130         int cnt, iteration = 0, test_fail = 0;
6131
6132         val64 = readq(&bar0->adapter_control);
6133         val64 &= ~ADAPTER_ECC_EN;
6134         writeq(val64, &bar0->adapter_control);
6135
6136         val64 = readq(&bar0->mc_rldram_test_ctrl);
6137         val64 |= MC_RLDRAM_TEST_MODE;
6138         SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
6139
6140         val64 = readq(&bar0->mc_rldram_mrs);
6141         val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
6142         SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
6143
6144         val64 |= MC_RLDRAM_MRS_ENABLE;
6145         SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
6146
6147         while (iteration < 2) {
6148                 val64 = 0x55555555aaaa0000ULL;
6149                 if (iteration == 1) {
6150                         val64 ^= 0xFFFFFFFFFFFF0000ULL;
6151                 }
6152                 writeq(val64, &bar0->mc_rldram_test_d0);
6153
6154                 val64 = 0xaaaa5a5555550000ULL;
6155                 if (iteration == 1) {
6156                         val64 ^= 0xFFFFFFFFFFFF0000ULL;
6157                 }
6158                 writeq(val64, &bar0->mc_rldram_test_d1);
6159
6160                 val64 = 0x55aaaaaaaa5a0000ULL;
6161                 if (iteration == 1) {
6162                         val64 ^= 0xFFFFFFFFFFFF0000ULL;
6163                 }
6164                 writeq(val64, &bar0->mc_rldram_test_d2);
6165
6166                 val64 = (u64) (0x0000003ffffe0100ULL);
6167                 writeq(val64, &bar0->mc_rldram_test_add);
6168
6169                 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_WRITE |
6170                         MC_RLDRAM_TEST_GO;
6171                 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
6172
6173                 for (cnt = 0; cnt < 5; cnt++) {
6174                         val64 = readq(&bar0->mc_rldram_test_ctrl);
6175                         if (val64 & MC_RLDRAM_TEST_DONE)
6176                                 break;
6177                         msleep(200);
6178                 }
6179
6180                 if (cnt == 5)
6181                         break;
6182
6183                 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
6184                 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
6185
6186                 for (cnt = 0; cnt < 5; cnt++) {
6187                         val64 = readq(&bar0->mc_rldram_test_ctrl);
6188                         if (val64 & MC_RLDRAM_TEST_DONE)
6189                                 break;
6190                         msleep(500);
6191                 }
6192
6193                 if (cnt == 5)
6194                         break;
6195
6196                 val64 = readq(&bar0->mc_rldram_test_ctrl);
6197                 if (!(val64 & MC_RLDRAM_TEST_PASS))
6198                         test_fail = 1;
6199
6200                 iteration++;
6201         }
6202
6203         *data = test_fail;
6204
6205         /* Bring the adapter out of test mode */
6206         SPECIAL_REG_WRITE(0, &bar0->mc_rldram_test_ctrl, LF);
6207
6208         return test_fail;
6209 }
6210
6211 /**
6212  *  s2io_ethtool_test - conducts 6 tsets to determine the health of card.
6213  *  @sp : private member of the device structure, which is a pointer to the
6214  *  s2io_nic structure.
6215  *  @ethtest : pointer to a ethtool command specific structure that will be
6216  *  returned to the user.
6217  *  @data : variable that returns the result of each of the test
6218  * conducted by the driver.
6219  * Description:
6220  *  This function conducts 6 tests ( 4 offline and 2 online) to determine
6221  *  the health of the card.
6222  * Return value:
6223  *  void
6224  */
6225
6226 static void s2io_ethtool_test(struct net_device *dev,
6227                               struct ethtool_test *ethtest,
6228                               uint64_t * data)
6229 {
6230         struct s2io_nic *sp = dev->priv;
6231         int orig_state = netif_running(sp->dev);
6232
6233         if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
6234                 /* Offline Tests. */
6235                 if (orig_state)
6236                         s2io_close(sp->dev);
6237
6238                 if (s2io_register_test(sp, &data[0]))
6239                         ethtest->flags |= ETH_TEST_FL_FAILED;
6240
6241                 s2io_reset(sp);
6242
6243                 if (s2io_rldram_test(sp, &data[3]))
6244                         ethtest->flags |= ETH_TEST_FL_FAILED;
6245
6246                 s2io_reset(sp);
6247
6248                 if (s2io_eeprom_test(sp, &data[1]))
6249                         ethtest->flags |= ETH_TEST_FL_FAILED;
6250
6251                 if (s2io_bist_test(sp, &data[4]))
6252                         ethtest->flags |= ETH_TEST_FL_FAILED;
6253
6254                 if (orig_state)
6255                         s2io_open(sp->dev);
6256
6257                 data[2] = 0;
6258         } else {
6259                 /* Online Tests. */
6260                 if (!orig_state) {
6261                         DBG_PRINT(ERR_DBG,
6262                                   "%s: is not up, cannot run test\n",
6263                                   dev->name);
6264                         data[0] = -1;
6265                         data[1] = -1;
6266                         data[2] = -1;
6267                         data[3] = -1;
6268                         data[4] = -1;
6269                 }
6270
6271                 if (s2io_link_test(sp, &data[2]))
6272                         ethtest->flags |= ETH_TEST_FL_FAILED;
6273
6274                 data[0] = 0;
6275                 data[1] = 0;
6276                 data[3] = 0;
6277                 data[4] = 0;
6278         }
6279 }
6280
6281 static void s2io_get_ethtool_stats(struct net_device *dev,
6282                                    struct ethtool_stats *estats,
6283                                    u64 * tmp_stats)
6284 {
6285         int i = 0, k;
6286         struct s2io_nic *sp = dev->priv;
6287         struct stat_block *stat_info = sp->mac_control.stats_info;
6288
6289         s2io_updt_stats(sp);
6290         tmp_stats[i++] =
6291                 (u64)le32_to_cpu(stat_info->tmac_frms_oflow) << 32  |
6292                 le32_to_cpu(stat_info->tmac_frms);
6293         tmp_stats[i++] =
6294                 (u64)le32_to_cpu(stat_info->tmac_data_octets_oflow) << 32 |
6295                 le32_to_cpu(stat_info->tmac_data_octets);
6296         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_drop_frms);
6297         tmp_stats[i++] =
6298                 (u64)le32_to_cpu(stat_info->tmac_mcst_frms_oflow) << 32 |
6299                 le32_to_cpu(stat_info->tmac_mcst_frms);
6300         tmp_stats[i++] =
6301                 (u64)le32_to_cpu(stat_info->tmac_bcst_frms_oflow) << 32 |
6302                 le32_to_cpu(stat_info->tmac_bcst_frms);
6303         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_pause_ctrl_frms);
6304         tmp_stats[i++] =
6305                 (u64)le32_to_cpu(stat_info->tmac_ttl_octets_oflow) << 32 |
6306                 le32_to_cpu(stat_info->tmac_ttl_octets);
6307         tmp_stats[i++] =
6308                 (u64)le32_to_cpu(stat_info->tmac_ucst_frms_oflow) << 32 |
6309                 le32_to_cpu(stat_info->tmac_ucst_frms);
6310         tmp_stats[i++] =
6311                 (u64)le32_to_cpu(stat_info->tmac_nucst_frms_oflow) << 32 |
6312                 le32_to_cpu(stat_info->tmac_nucst_frms);
6313         tmp_stats[i++] =
6314                 (u64)le32_to_cpu(stat_info->tmac_any_err_frms_oflow) << 32 |
6315                 le32_to_cpu(stat_info->tmac_any_err_frms);
6316         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_ttl_less_fb_octets);
6317         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_vld_ip_octets);
6318         tmp_stats[i++] =
6319                 (u64)le32_to_cpu(stat_info->tmac_vld_ip_oflow) << 32 |
6320                 le32_to_cpu(stat_info->tmac_vld_ip);
6321         tmp_stats[i++] =
6322                 (u64)le32_to_cpu(stat_info->tmac_drop_ip_oflow) << 32 |
6323                 le32_to_cpu(stat_info->tmac_drop_ip);
6324         tmp_stats[i++] =
6325                 (u64)le32_to_cpu(stat_info->tmac_icmp_oflow) << 32 |
6326                 le32_to_cpu(stat_info->tmac_icmp);
6327         tmp_stats[i++] =
6328                 (u64)le32_to_cpu(stat_info->tmac_rst_tcp_oflow) << 32 |
6329                 le32_to_cpu(stat_info->tmac_rst_tcp);
6330         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_tcp);
6331         tmp_stats[i++] = (u64)le32_to_cpu(stat_info->tmac_udp_oflow) << 32 |
6332                 le32_to_cpu(stat_info->tmac_udp);
6333         tmp_stats[i++] =
6334                 (u64)le32_to_cpu(stat_info->rmac_vld_frms_oflow) << 32 |
6335                 le32_to_cpu(stat_info->rmac_vld_frms);
6336         tmp_stats[i++] =
6337                 (u64)le32_to_cpu(stat_info->rmac_data_octets_oflow) << 32 |
6338                 le32_to_cpu(stat_info->rmac_data_octets);
6339         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_fcs_err_frms);
6340         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_drop_frms);
6341         tmp_stats[i++] =
6342                 (u64)le32_to_cpu(stat_info->rmac_vld_mcst_frms_oflow) << 32 |
6343                 le32_to_cpu(stat_info->rmac_vld_mcst_frms);
6344         tmp_stats[i++] =
6345                 (u64)le32_to_cpu(stat_info->rmac_vld_bcst_frms_oflow) << 32 |
6346                 le32_to_cpu(stat_info->rmac_vld_bcst_frms);
6347         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_in_rng_len_err_frms);
6348         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_out_rng_len_err_frms);
6349         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_long_frms);
6350         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_pause_ctrl_frms);
6351         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_unsup_ctrl_frms);
6352         tmp_stats[i++] =
6353                 (u64)le32_to_cpu(stat_info->rmac_ttl_octets_oflow) << 32 |
6354                 le32_to_cpu(stat_info->rmac_ttl_octets);
6355         tmp_stats[i++] =
6356                 (u64)le32_to_cpu(stat_info->rmac_accepted_ucst_frms_oflow)
6357                 << 32 | le32_to_cpu(stat_info->rmac_accepted_ucst_frms);
6358         tmp_stats[i++] =
6359                 (u64)le32_to_cpu(stat_info->rmac_accepted_nucst_frms_oflow)
6360                  << 32 | le32_to_cpu(stat_info->rmac_accepted_nucst_frms);
6361         tmp_stats[i++] =
6362                 (u64)le32_to_cpu(stat_info->rmac_discarded_frms_oflow) << 32 |
6363                 le32_to_cpu(stat_info->rmac_discarded_frms);
6364         tmp_stats[i++] =
6365                 (u64)le32_to_cpu(stat_info->rmac_drop_events_oflow)
6366                  << 32 | le32_to_cpu(stat_info->rmac_drop_events);
6367         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_less_fb_octets);
6368         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_frms);
6369         tmp_stats[i++] =
6370                 (u64)le32_to_cpu(stat_info->rmac_usized_frms_oflow) << 32 |
6371                 le32_to_cpu(stat_info->rmac_usized_frms);
6372         tmp_stats[i++] =
6373                 (u64)le32_to_cpu(stat_info->rmac_osized_frms_oflow) << 32 |
6374                 le32_to_cpu(stat_info->rmac_osized_frms);
6375         tmp_stats[i++] =
6376                 (u64)le32_to_cpu(stat_info->rmac_frag_frms_oflow) << 32 |
6377                 le32_to_cpu(stat_info->rmac_frag_frms);
6378         tmp_stats[i++] =
6379                 (u64)le32_to_cpu(stat_info->rmac_jabber_frms_oflow) << 32 |
6380                 le32_to_cpu(stat_info->rmac_jabber_frms);
6381         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_64_frms);
6382         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_65_127_frms);
6383         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_128_255_frms);
6384         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_256_511_frms);
6385         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_512_1023_frms);
6386         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_1024_1518_frms);
6387         tmp_stats[i++] =
6388                 (u64)le32_to_cpu(stat_info->rmac_ip_oflow) << 32 |
6389                 le32_to_cpu(stat_info->rmac_ip);
6390         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ip_octets);
6391         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_hdr_err_ip);
6392         tmp_stats[i++] =
6393                 (u64)le32_to_cpu(stat_info->rmac_drop_ip_oflow) << 32 |
6394                 le32_to_cpu(stat_info->rmac_drop_ip);
6395         tmp_stats[i++] =
6396                 (u64)le32_to_cpu(stat_info->rmac_icmp_oflow) << 32 |
6397                 le32_to_cpu(stat_info->rmac_icmp);
6398         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_tcp);
6399         tmp_stats[i++] =
6400                 (u64)le32_to_cpu(stat_info->rmac_udp_oflow) << 32 |
6401                 le32_to_cpu(stat_info->rmac_udp);
6402         tmp_stats[i++] =
6403                 (u64)le32_to_cpu(stat_info->rmac_err_drp_udp_oflow) << 32 |
6404                 le32_to_cpu(stat_info->rmac_err_drp_udp);
6405         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_err_sym);
6406         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q0);
6407         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q1);
6408         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q2);
6409         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q3);
6410         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q4);
6411         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q5);
6412         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q6);
6413         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q7);
6414         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q0);
6415         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q1);
6416         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q2);
6417         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q3);
6418         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q4);
6419         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q5);
6420         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q6);
6421         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q7);
6422         tmp_stats[i++] =
6423                 (u64)le32_to_cpu(stat_info->rmac_pause_cnt_oflow) << 32 |
6424                 le32_to_cpu(stat_info->rmac_pause_cnt);
6425         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_data_err_cnt);
6426         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_ctrl_err_cnt);
6427         tmp_stats[i++] =
6428                 (u64)le32_to_cpu(stat_info->rmac_accepted_ip_oflow) << 32 |
6429                 le32_to_cpu(stat_info->rmac_accepted_ip);
6430         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_tcp);
6431         tmp_stats[i++] = le32_to_cpu(stat_info->rd_req_cnt);
6432         tmp_stats[i++] = le32_to_cpu(stat_info->new_rd_req_cnt);
6433         tmp_stats[i++] = le32_to_cpu(stat_info->new_rd_req_rtry_cnt);
6434         tmp_stats[i++] = le32_to_cpu(stat_info->rd_rtry_cnt);
6435         tmp_stats[i++] = le32_to_cpu(stat_info->wr_rtry_rd_ack_cnt);
6436         tmp_stats[i++] = le32_to_cpu(stat_info->wr_req_cnt);
6437         tmp_stats[i++] = le32_to_cpu(stat_info->new_wr_req_cnt);
6438         tmp_stats[i++] = le32_to_cpu(stat_info->new_wr_req_rtry_cnt);
6439         tmp_stats[i++] = le32_to_cpu(stat_info->wr_rtry_cnt);
6440         tmp_stats[i++] = le32_to_cpu(stat_info->wr_disc_cnt);
6441         tmp_stats[i++] = le32_to_cpu(stat_info->rd_rtry_wr_ack_cnt);
6442         tmp_stats[i++] = le32_to_cpu(stat_info->txp_wr_cnt);
6443         tmp_stats[i++] = le32_to_cpu(stat_info->txd_rd_cnt);
6444         tmp_stats[i++] = le32_to_cpu(stat_info->txd_wr_cnt);
6445         tmp_stats[i++] = le32_to_cpu(stat_info->rxd_rd_cnt);
6446         tmp_stats[i++] = le32_to_cpu(stat_info->rxd_wr_cnt);
6447         tmp_stats[i++] = le32_to_cpu(stat_info->txf_rd_cnt);
6448         tmp_stats[i++] = le32_to_cpu(stat_info->rxf_wr_cnt);
6449
6450         /* Enhanced statistics exist only for Hercules */
6451         if(sp->device_type == XFRAME_II_DEVICE) {
6452                 tmp_stats[i++] =
6453                                 le64_to_cpu(stat_info->rmac_ttl_1519_4095_frms);
6454                 tmp_stats[i++] =
6455                                 le64_to_cpu(stat_info->rmac_ttl_4096_8191_frms);
6456                 tmp_stats[i++] =
6457                                 le64_to_cpu(stat_info->rmac_ttl_8192_max_frms);
6458                 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_gt_max_frms);
6459                 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_osized_alt_frms);
6460                 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_jabber_alt_frms);
6461                 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_gt_max_alt_frms);
6462                 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_vlan_frms);
6463                 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_len_discard);
6464                 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_fcs_discard);
6465                 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_pf_discard);
6466                 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_da_discard);
6467                 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_red_discard);
6468                 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_rts_discard);
6469                 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_ingm_full_discard);
6470                 tmp_stats[i++] = le32_to_cpu(stat_info->link_fault_cnt);
6471         }
6472
6473         tmp_stats[i++] = 0;
6474         tmp_stats[i++] = stat_info->sw_stat.single_ecc_errs;
6475         tmp_stats[i++] = stat_info->sw_stat.double_ecc_errs;
6476         tmp_stats[i++] = stat_info->sw_stat.parity_err_cnt;
6477         tmp_stats[i++] = stat_info->sw_stat.serious_err_cnt;
6478         tmp_stats[i++] = stat_info->sw_stat.soft_reset_cnt;
6479         tmp_stats[i++] = stat_info->sw_stat.fifo_full_cnt;
6480         for (k = 0; k < MAX_RX_RINGS; k++)
6481                 tmp_stats[i++] = stat_info->sw_stat.ring_full_cnt[k];
6482         tmp_stats[i++] = stat_info->xpak_stat.alarm_transceiver_temp_high;
6483         tmp_stats[i++] = stat_info->xpak_stat.alarm_transceiver_temp_low;
6484         tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_bias_current_high;
6485         tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_bias_current_low;
6486         tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_output_power_high;
6487         tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_output_power_low;
6488         tmp_stats[i++] = stat_info->xpak_stat.warn_transceiver_temp_high;
6489         tmp_stats[i++] = stat_info->xpak_stat.warn_transceiver_temp_low;
6490         tmp_stats[i++] = stat_info->xpak_stat.warn_laser_bias_current_high;
6491         tmp_stats[i++] = stat_info->xpak_stat.warn_laser_bias_current_low;
6492         tmp_stats[i++] = stat_info->xpak_stat.warn_laser_output_power_high;
6493         tmp_stats[i++] = stat_info->xpak_stat.warn_laser_output_power_low;
6494         tmp_stats[i++] = stat_info->sw_stat.clubbed_frms_cnt;
6495         tmp_stats[i++] = stat_info->sw_stat.sending_both;
6496         tmp_stats[i++] = stat_info->sw_stat.outof_sequence_pkts;
6497         tmp_stats[i++] = stat_info->sw_stat.flush_max_pkts;
6498         if (stat_info->sw_stat.num_aggregations) {
6499                 u64 tmp = stat_info->sw_stat.sum_avg_pkts_aggregated;
6500                 int count = 0;
6501                 /*
6502                  * Since 64-bit divide does not work on all platforms,
6503                  * do repeated subtraction.
6504                  */
6505                 while (tmp >= stat_info->sw_stat.num_aggregations) {
6506                         tmp -= stat_info->sw_stat.num_aggregations;
6507                         count++;
6508                 }
6509                 tmp_stats[i++] = count;
6510         }
6511         else
6512                 tmp_stats[i++] = 0;
6513         tmp_stats[i++] = stat_info->sw_stat.mem_alloc_fail_cnt;
6514         tmp_stats[i++] = stat_info->sw_stat.pci_map_fail_cnt;
6515         tmp_stats[i++] = stat_info->sw_stat.watchdog_timer_cnt;
6516         tmp_stats[i++] = stat_info->sw_stat.mem_allocated;
6517         tmp_stats[i++] = stat_info->sw_stat.mem_freed;
6518         tmp_stats[i++] = stat_info->sw_stat.link_up_cnt;
6519         tmp_stats[i++] = stat_info->sw_stat.link_down_cnt;
6520         tmp_stats[i++] = stat_info->sw_stat.link_up_time;
6521         tmp_stats[i++] = stat_info->sw_stat.link_down_time;
6522
6523         tmp_stats[i++] = stat_info->sw_stat.tx_buf_abort_cnt;
6524         tmp_stats[i++] = stat_info->sw_stat.tx_desc_abort_cnt;
6525         tmp_stats[i++] = stat_info->sw_stat.tx_parity_err_cnt;
6526         tmp_stats[i++] = stat_info->sw_stat.tx_link_loss_cnt;
6527         tmp_stats[i++] = stat_info->sw_stat.tx_list_proc_err_cnt;
6528
6529         tmp_stats[i++] = stat_info->sw_stat.rx_parity_err_cnt;
6530         tmp_stats[i++] = stat_info->sw_stat.rx_abort_cnt;
6531         tmp_stats[i++] = stat_info->sw_stat.rx_parity_abort_cnt;
6532         tmp_stats[i++] = stat_info->sw_stat.rx_rda_fail_cnt;
6533         tmp_stats[i++] = stat_info->sw_stat.rx_unkn_prot_cnt;
6534         tmp_stats[i++] = stat_info->sw_stat.rx_fcs_err_cnt;
6535         tmp_stats[i++] = stat_info->sw_stat.rx_buf_size_err_cnt;
6536         tmp_stats[i++] = stat_info->sw_stat.rx_rxd_corrupt_cnt;
6537         tmp_stats[i++] = stat_info->sw_stat.rx_unkn_err_cnt;
6538         tmp_stats[i++] = stat_info->sw_stat.tda_err_cnt;
6539         tmp_stats[i++] = stat_info->sw_stat.pfc_err_cnt;
6540         tmp_stats[i++] = stat_info->sw_stat.pcc_err_cnt;
6541         tmp_stats[i++] = stat_info->sw_stat.tti_err_cnt;
6542         tmp_stats[i++] = stat_info->sw_stat.tpa_err_cnt;
6543         tmp_stats[i++] = stat_info->sw_stat.sm_err_cnt;
6544         tmp_stats[i++] = stat_info->sw_stat.lso_err_cnt;
6545         tmp_stats[i++] = stat_info->sw_stat.mac_tmac_err_cnt;
6546         tmp_stats[i++] = stat_info->sw_stat.mac_rmac_err_cnt;
6547         tmp_stats[i++] = stat_info->sw_stat.xgxs_txgxs_err_cnt;
6548         tmp_stats[i++] = stat_info->sw_stat.xgxs_rxgxs_err_cnt;
6549         tmp_stats[i++] = stat_info->sw_stat.rc_err_cnt;
6550         tmp_stats[i++] = stat_info->sw_stat.prc_pcix_err_cnt;
6551         tmp_stats[i++] = stat_info->sw_stat.rpa_err_cnt;
6552         tmp_stats[i++] = stat_info->sw_stat.rda_err_cnt;
6553         tmp_stats[i++] = stat_info->sw_stat.rti_err_cnt;
6554         tmp_stats[i++] = stat_info->sw_stat.mc_err_cnt;
6555 }
6556
6557 static int s2io_ethtool_get_regs_len(struct net_device *dev)
6558 {
6559         return (XENA_REG_SPACE);
6560 }
6561
6562
6563 static u32 s2io_ethtool_get_rx_csum(struct net_device * dev)
6564 {
6565         struct s2io_nic *sp = dev->priv;
6566
6567         return (sp->rx_csum);
6568 }
6569
6570 static int s2io_ethtool_set_rx_csum(struct net_device *dev, u32 data)
6571 {
6572         struct s2io_nic *sp = dev->priv;
6573
6574         if (data)
6575                 sp->rx_csum = 1;
6576         else
6577                 sp->rx_csum = 0;
6578
6579         return 0;
6580 }
6581
6582 static int s2io_get_eeprom_len(struct net_device *dev)
6583 {
6584         return (XENA_EEPROM_SPACE);
6585 }
6586
6587 static int s2io_get_sset_count(struct net_device *dev, int sset)
6588 {
6589         struct s2io_nic *sp = dev->priv;
6590
6591         switch (sset) {
6592         case ETH_SS_TEST:
6593                 return S2IO_TEST_LEN;
6594         case ETH_SS_STATS:
6595                 switch(sp->device_type) {
6596                 case XFRAME_I_DEVICE:
6597                         return XFRAME_I_STAT_LEN;
6598                 case XFRAME_II_DEVICE:
6599                         return XFRAME_II_STAT_LEN;
6600                 default:
6601                         return 0;
6602                 }
6603         default:
6604                 return -EOPNOTSUPP;
6605         }
6606 }
6607
6608 static void s2io_ethtool_get_strings(struct net_device *dev,
6609                                      u32 stringset, u8 * data)
6610 {
6611         int stat_size = 0;
6612         struct s2io_nic *sp = dev->priv;
6613
6614         switch (stringset) {
6615         case ETH_SS_TEST:
6616                 memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
6617                 break;
6618         case ETH_SS_STATS:
6619                 stat_size = sizeof(ethtool_xena_stats_keys);
6620                 memcpy(data, &ethtool_xena_stats_keys,stat_size);
6621                 if(sp->device_type == XFRAME_II_DEVICE) {
6622                         memcpy(data + stat_size,
6623                                 &ethtool_enhanced_stats_keys,
6624                                 sizeof(ethtool_enhanced_stats_keys));
6625                         stat_size += sizeof(ethtool_enhanced_stats_keys);
6626                 }
6627
6628                 memcpy(data + stat_size, &ethtool_driver_stats_keys,
6629                         sizeof(ethtool_driver_stats_keys));
6630         }
6631 }
6632
6633 static int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
6634 {
6635         if (data)
6636                 dev->features |= NETIF_F_IP_CSUM;
6637         else
6638                 dev->features &= ~NETIF_F_IP_CSUM;
6639
6640         return 0;
6641 }
6642
6643 static u32 s2io_ethtool_op_get_tso(struct net_device *dev)
6644 {
6645         return (dev->features & NETIF_F_TSO) != 0;
6646 }
6647 static int s2io_ethtool_op_set_tso(struct net_device *dev, u32 data)
6648 {
6649         if (data)
6650                 dev->features |= (NETIF_F_TSO | NETIF_F_TSO6);
6651         else
6652                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
6653
6654         return 0;
6655 }
6656
6657 static const struct ethtool_ops netdev_ethtool_ops = {
6658         .get_settings = s2io_ethtool_gset,
6659         .set_settings = s2io_ethtool_sset,
6660         .get_drvinfo = s2io_ethtool_gdrvinfo,
6661         .get_regs_len = s2io_ethtool_get_regs_len,
6662         .get_regs = s2io_ethtool_gregs,
6663         .get_link = ethtool_op_get_link,
6664         .get_eeprom_len = s2io_get_eeprom_len,
6665         .get_eeprom = s2io_ethtool_geeprom,
6666         .set_eeprom = s2io_ethtool_seeprom,
6667         .get_ringparam = s2io_ethtool_gringparam,
6668         .get_pauseparam = s2io_ethtool_getpause_data,
6669         .set_pauseparam = s2io_ethtool_setpause_data,
6670         .get_rx_csum = s2io_ethtool_get_rx_csum,
6671         .set_rx_csum = s2io_ethtool_set_rx_csum,
6672         .set_tx_csum = s2io_ethtool_op_set_tx_csum,
6673         .set_sg = ethtool_op_set_sg,
6674         .get_tso = s2io_ethtool_op_get_tso,
6675         .set_tso = s2io_ethtool_op_set_tso,
6676         .set_ufo = ethtool_op_set_ufo,
6677         .self_test = s2io_ethtool_test,
6678         .get_strings = s2io_ethtool_get_strings,
6679         .phys_id = s2io_ethtool_idnic,
6680         .get_ethtool_stats = s2io_get_ethtool_stats,
6681         .get_sset_count = s2io_get_sset_count,
6682 };
6683
6684 /**
6685  *  s2io_ioctl - Entry point for the Ioctl
6686  *  @dev :  Device pointer.
6687  *  @ifr :  An IOCTL specefic structure, that can contain a pointer to
6688  *  a proprietary structure used to pass information to the driver.
6689  *  @cmd :  This is used to distinguish between the different commands that
6690  *  can be passed to the IOCTL functions.
6691  *  Description:
6692  *  Currently there are no special functionality supported in IOCTL, hence
6693  *  function always return EOPNOTSUPPORTED
6694  */
6695
6696 static int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6697 {
6698         return -EOPNOTSUPP;
6699 }
6700
6701 /**
6702  *  s2io_change_mtu - entry point to change MTU size for the device.
6703  *   @dev : device pointer.
6704  *   @new_mtu : the new MTU size for the device.
6705  *   Description: A driver entry point to change MTU size for the device.
6706  *   Before changing the MTU the device must be stopped.
6707  *  Return value:
6708  *   0 on success and an appropriate (-)ve integer as defined in errno.h
6709  *   file on failure.
6710  */
6711
6712 static int s2io_change_mtu(struct net_device *dev, int new_mtu)
6713 {
6714         struct s2io_nic *sp = dev->priv;
6715         int ret = 0;
6716
6717         if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) {
6718                 DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n",
6719                           dev->name);
6720                 return -EPERM;
6721         }
6722
6723         dev->mtu = new_mtu;
6724         if (netif_running(dev)) {
6725                 s2io_stop_all_tx_queue(sp);
6726                 s2io_card_down(sp);
6727                 ret = s2io_card_up(sp);
6728                 if (ret) {
6729                         DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
6730                                   __FUNCTION__);
6731                         return ret;
6732                 }
6733                 s2io_wake_all_tx_queue(sp);
6734         } else { /* Device is down */
6735                 struct XENA_dev_config __iomem *bar0 = sp->bar0;
6736                 u64 val64 = new_mtu;
6737
6738                 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
6739         }
6740
6741         return ret;
6742 }
6743
6744 /**
6745  * s2io_set_link - Set the LInk status
6746  * @data: long pointer to device private structue
6747  * Description: Sets the link status for the adapter
6748  */
6749
6750 static void s2io_set_link(struct work_struct *work)
6751 {
6752         struct s2io_nic *nic = container_of(work, struct s2io_nic, set_link_task);
6753         struct net_device *dev = nic->dev;
6754         struct XENA_dev_config __iomem *bar0 = nic->bar0;
6755         register u64 val64;
6756         u16 subid;
6757
6758         rtnl_lock();
6759
6760         if (!netif_running(dev))
6761                 goto out_unlock;
6762
6763         if (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(nic->state))) {
6764                 /* The card is being reset, no point doing anything */
6765                 goto out_unlock;
6766         }
6767
6768         subid = nic->pdev->subsystem_device;
6769         if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
6770                 /*
6771                  * Allow a small delay for the NICs self initiated
6772                  * cleanup to complete.
6773                  */
6774                 msleep(100);
6775         }
6776
6777         val64 = readq(&bar0->adapter_status);
6778         if (LINK_IS_UP(val64)) {
6779                 if (!(readq(&bar0->adapter_control) & ADAPTER_CNTL_EN)) {
6780                         if (verify_xena_quiescence(nic)) {
6781                                 val64 = readq(&bar0->adapter_control);
6782                                 val64 |= ADAPTER_CNTL_EN;
6783                                 writeq(val64, &bar0->adapter_control);
6784                                 if (CARDS_WITH_FAULTY_LINK_INDICATORS(
6785                                         nic->device_type, subid)) {
6786                                         val64 = readq(&bar0->gpio_control);
6787                                         val64 |= GPIO_CTRL_GPIO_0;
6788                                         writeq(val64, &bar0->gpio_control);
6789                                         val64 = readq(&bar0->gpio_control);
6790                                 } else {
6791                                         val64 |= ADAPTER_LED_ON;
6792                                         writeq(val64, &bar0->adapter_control);
6793                                 }
6794                                 nic->device_enabled_once = TRUE;
6795                         } else {
6796                                 DBG_PRINT(ERR_DBG, "%s: Error: ", dev->name);
6797                                 DBG_PRINT(ERR_DBG, "device is not Quiescent\n");
6798                                 s2io_stop_all_tx_queue(nic);
6799                         }
6800                 }
6801                 val64 = readq(&bar0->adapter_control);
6802                 val64 |= ADAPTER_LED_ON;
6803                 writeq(val64, &bar0->adapter_control);
6804                 s2io_link(nic, LINK_UP);
6805         } else {
6806                 if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
6807                                                       subid)) {
6808                         val64 = readq(&bar0->gpio_control);
6809                         val64 &= ~GPIO_CTRL_GPIO_0;
6810                         writeq(val64, &bar0->gpio_control);
6811                         val64 = readq(&bar0->gpio_control);
6812                 }
6813                 /* turn off LED */
6814                 val64 = readq(&bar0->adapter_control);
6815                 val64 = val64 &(~ADAPTER_LED_ON);
6816                 writeq(val64, &bar0->adapter_control);
6817                 s2io_link(nic, LINK_DOWN);
6818         }
6819         clear_bit(__S2IO_STATE_LINK_TASK, &(nic->state));
6820
6821 out_unlock:
6822         rtnl_unlock();
6823 }
6824
6825 static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
6826                                 struct buffAdd *ba,
6827                                 struct sk_buff **skb, u64 *temp0, u64 *temp1,
6828                                 u64 *temp2, int size)
6829 {
6830         struct net_device *dev = sp->dev;
6831         struct swStat *stats = &sp->mac_control.stats_info->sw_stat;
6832
6833         if ((sp->rxd_mode == RXD_MODE_1) && (rxdp->Host_Control == 0)) {
6834                 struct RxD1 *rxdp1 = (struct RxD1 *)rxdp;
6835                 /* allocate skb */
6836                 if (*skb) {
6837                         DBG_PRINT(INFO_DBG, "SKB is not NULL\n");
6838                         /*
6839                          * As Rx frame are not going to be processed,
6840                          * using same mapped address for the Rxd
6841                          * buffer pointer
6842                          */
6843                         rxdp1->Buffer0_ptr = *temp0;
6844                 } else {
6845                         *skb = dev_alloc_skb(size);
6846                         if (!(*skb)) {
6847                                 DBG_PRINT(INFO_DBG, "%s: Out of ", dev->name);
6848                                 DBG_PRINT(INFO_DBG, "memory to allocate ");
6849                                 DBG_PRINT(INFO_DBG, "1 buf mode SKBs\n");
6850                                 sp->mac_control.stats_info->sw_stat. \
6851                                         mem_alloc_fail_cnt++;
6852                                 return -ENOMEM ;
6853                         }
6854                         sp->mac_control.stats_info->sw_stat.mem_allocated
6855                                 += (*skb)->truesize;
6856                         /* storing the mapped addr in a temp variable
6857                          * such it will be used for next rxd whose
6858                          * Host Control is NULL
6859                          */
6860                         rxdp1->Buffer0_ptr = *temp0 =
6861                                 pci_map_single( sp->pdev, (*skb)->data,
6862                                         size - NET_IP_ALIGN,
6863                                         PCI_DMA_FROMDEVICE);
6864                         if (pci_dma_mapping_error(rxdp1->Buffer0_ptr))
6865                                 goto memalloc_failed;
6866                         rxdp->Host_Control = (unsigned long) (*skb);
6867                 }
6868         } else if ((sp->rxd_mode == RXD_MODE_3B) && (rxdp->Host_Control == 0)) {
6869                 struct RxD3 *rxdp3 = (struct RxD3 *)rxdp;
6870                 /* Two buffer Mode */
6871                 if (*skb) {
6872                         rxdp3->Buffer2_ptr = *temp2;
6873                         rxdp3->Buffer0_ptr = *temp0;
6874                         rxdp3->Buffer1_ptr = *temp1;
6875                 } else {
6876                         *skb = dev_alloc_skb(size);
6877                         if (!(*skb)) {
6878                                 DBG_PRINT(INFO_DBG, "%s: Out of ", dev->name);
6879                                 DBG_PRINT(INFO_DBG, "memory to allocate ");
6880                                 DBG_PRINT(INFO_DBG, "2 buf mode SKBs\n");
6881                                 sp->mac_control.stats_info->sw_stat. \
6882                                         mem_alloc_fail_cnt++;
6883                                 return -ENOMEM;
6884                         }
6885                         sp->mac_control.stats_info->sw_stat.mem_allocated
6886                                 += (*skb)->truesize;
6887                         rxdp3->Buffer2_ptr = *temp2 =
6888                                 pci_map_single(sp->pdev, (*skb)->data,
6889                                                dev->mtu + 4,
6890                                                PCI_DMA_FROMDEVICE);
6891                         if (pci_dma_mapping_error(rxdp3->Buffer2_ptr))
6892                                 goto memalloc_failed;
6893                         rxdp3->Buffer0_ptr = *temp0 =
6894                                 pci_map_single( sp->pdev, ba->ba_0, BUF0_LEN,
6895                                                 PCI_DMA_FROMDEVICE);
6896                         if (pci_dma_mapping_error(rxdp3->Buffer0_ptr)) {
6897                                 pci_unmap_single (sp->pdev,
6898                                         (dma_addr_t)rxdp3->Buffer2_ptr,
6899                                         dev->mtu + 4, PCI_DMA_FROMDEVICE);
6900                                 goto memalloc_failed;
6901                         }
6902                         rxdp->Host_Control = (unsigned long) (*skb);
6903
6904                         /* Buffer-1 will be dummy buffer not used */
6905                         rxdp3->Buffer1_ptr = *temp1 =
6906                                 pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN,
6907                                                 PCI_DMA_FROMDEVICE);
6908                         if (pci_dma_mapping_error(rxdp3->Buffer1_ptr)) {
6909                                 pci_unmap_single (sp->pdev,
6910                                         (dma_addr_t)rxdp3->Buffer0_ptr,
6911                                         BUF0_LEN, PCI_DMA_FROMDEVICE);
6912                                 pci_unmap_single (sp->pdev,
6913                                         (dma_addr_t)rxdp3->Buffer2_ptr,
6914                                         dev->mtu + 4, PCI_DMA_FROMDEVICE);
6915                                 goto memalloc_failed;
6916                         }
6917                 }
6918         }
6919         return 0;
6920         memalloc_failed:
6921                 stats->pci_map_fail_cnt++;
6922                 stats->mem_freed += (*skb)->truesize;
6923                 dev_kfree_skb(*skb);
6924                 return -ENOMEM;
6925 }
6926
6927 static void set_rxd_buffer_size(struct s2io_nic *sp, struct RxD_t *rxdp,
6928                                 int size)
6929 {
6930         struct net_device *dev = sp->dev;
6931         if (sp->rxd_mode == RXD_MODE_1) {
6932                 rxdp->Control_2 = SET_BUFFER0_SIZE_1( size - NET_IP_ALIGN);
6933         } else if (sp->rxd_mode == RXD_MODE_3B) {
6934                 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
6935                 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
6936                 rxdp->Control_2 |= SET_BUFFER2_SIZE_3( dev->mtu + 4);
6937         }
6938 }
6939
6940 static  int rxd_owner_bit_reset(struct s2io_nic *sp)
6941 {
6942         int i, j, k, blk_cnt = 0, size;
6943         struct mac_info * mac_control = &sp->mac_control;
6944         struct config_param *config = &sp->config;
6945         struct net_device *dev = sp->dev;
6946         struct RxD_t *rxdp = NULL;
6947         struct sk_buff *skb = NULL;
6948         struct buffAdd *ba = NULL;
6949         u64 temp0_64 = 0, temp1_64 = 0, temp2_64 = 0;
6950
6951         /* Calculate the size based on ring mode */
6952         size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
6953                 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
6954         if (sp->rxd_mode == RXD_MODE_1)
6955                 size += NET_IP_ALIGN;
6956         else if (sp->rxd_mode == RXD_MODE_3B)
6957                 size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
6958
6959         for (i = 0; i < config->rx_ring_num; i++) {
6960                 blk_cnt = config->rx_cfg[i].num_rxd /
6961                         (rxd_count[sp->rxd_mode] +1);
6962
6963                 for (j = 0; j < blk_cnt; j++) {
6964                         for (k = 0; k < rxd_count[sp->rxd_mode]; k++) {
6965                                 rxdp = mac_control->rings[i].
6966                                         rx_blocks[j].rxds[k].virt_addr;
6967                                 if(sp->rxd_mode == RXD_MODE_3B)
6968                                         ba = &mac_control->rings[i].ba[j][k];
6969                                 if (set_rxd_buffer_pointer(sp, rxdp, ba,
6970                                                        &skb,(u64 *)&temp0_64,
6971                                                        (u64 *)&temp1_64,
6972                                                        (u64 *)&temp2_64,
6973                                                         size) == -ENOMEM) {
6974                                         return 0;
6975                                 }
6976
6977                                 set_rxd_buffer_size(sp, rxdp, size);
6978                                 wmb();
6979                                 /* flip the Ownership bit to Hardware */
6980                                 rxdp->Control_1 |= RXD_OWN_XENA;
6981                         }
6982                 }
6983         }
6984         return 0;
6985
6986 }
6987
6988 static int s2io_add_isr(struct s2io_nic * sp)
6989 {
6990         int ret = 0;
6991         struct net_device *dev = sp->dev;
6992         int err = 0;
6993
6994         if (sp->config.intr_type == MSI_X)
6995                 ret = s2io_enable_msi_x(sp);
6996         if (ret) {
6997                 DBG_PRINT(ERR_DBG, "%s: Defaulting to INTA\n", dev->name);
6998                 sp->config.intr_type = INTA;
6999         }
7000
7001         /* Store the values of the MSIX table in the struct s2io_nic structure */
7002         store_xmsi_data(sp);
7003
7004         /* After proper initialization of H/W, register ISR */
7005         if (sp->config.intr_type == MSI_X) {
7006                 int i, msix_rx_cnt = 0;
7007
7008                 for (i = 0; i < sp->num_entries; i++) {
7009                         if (sp->s2io_entries[i].in_use == MSIX_FLG) {
7010                                 if (sp->s2io_entries[i].type ==
7011                                         MSIX_RING_TYPE) {
7012                                         sprintf(sp->desc[i], "%s:MSI-X-%d-RX",
7013                                                 dev->name, i);
7014                                         err = request_irq(sp->entries[i].vector,
7015                                                 s2io_msix_ring_handle, 0,
7016                                                 sp->desc[i],
7017                                                 sp->s2io_entries[i].arg);
7018                                 } else if (sp->s2io_entries[i].type ==
7019                                         MSIX_ALARM_TYPE) {
7020                                         sprintf(sp->desc[i], "%s:MSI-X-%d-TX",
7021                                         dev->name, i);
7022                                         err = request_irq(sp->entries[i].vector,
7023                                                 s2io_msix_fifo_handle, 0,
7024                                                 sp->desc[i],
7025                                                 sp->s2io_entries[i].arg);
7026
7027                                 }
7028                                 /* if either data or addr is zero print it. */
7029                                 if (!(sp->msix_info[i].addr &&
7030                                         sp->msix_info[i].data)) {
7031                                         DBG_PRINT(ERR_DBG,
7032                                                 "%s @Addr:0x%llx Data:0x%llx\n",
7033                                                 sp->desc[i],
7034                                                 (unsigned long long)
7035                                                 sp->msix_info[i].addr,
7036                                                 (unsigned long long)
7037                                                 ntohl(sp->msix_info[i].data));
7038                                 } else
7039                                         msix_rx_cnt++;
7040                                 if (err) {
7041                                         remove_msix_isr(sp);
7042
7043                                         DBG_PRINT(ERR_DBG,
7044                                                 "%s:MSI-X-%d registration "
7045                                                 "failed\n", dev->name, i);
7046
7047                                         DBG_PRINT(ERR_DBG,
7048                                                 "%s: Defaulting to INTA\n",
7049                                                 dev->name);
7050                                         sp->config.intr_type = INTA;
7051                                         break;
7052                                 }
7053                                 sp->s2io_entries[i].in_use =
7054                                         MSIX_REGISTERED_SUCCESS;
7055                         }
7056                 }
7057                 if (!err) {
7058                         printk(KERN_INFO "MSI-X-RX %d entries enabled\n",
7059                                 --msix_rx_cnt);
7060                         DBG_PRINT(INFO_DBG, "MSI-X-TX entries enabled"
7061                                                 " through alarm vector\n");
7062                 }
7063         }
7064         if (sp->config.intr_type == INTA) {
7065                 err = request_irq((int) sp->pdev->irq, s2io_isr, IRQF_SHARED,
7066                                 sp->name, dev);
7067                 if (err) {
7068                         DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
7069                                   dev->name);
7070                         return -1;
7071                 }
7072         }
7073         return 0;
7074 }
7075 static void s2io_rem_isr(struct s2io_nic * sp)
7076 {
7077         if (sp->config.intr_type == MSI_X)
7078                 remove_msix_isr(sp);
7079         else
7080                 remove_inta_isr(sp);
7081 }
7082
7083 static void do_s2io_card_down(struct s2io_nic * sp, int do_io)
7084 {
7085         int cnt = 0;
7086         struct XENA_dev_config __iomem *bar0 = sp->bar0;
7087         register u64 val64 = 0;
7088         struct config_param *config;
7089         config = &sp->config;
7090
7091         if (!is_s2io_card_up(sp))
7092                 return;
7093
7094         del_timer_sync(&sp->alarm_timer);
7095         /* If s2io_set_link task is executing, wait till it completes. */
7096         while (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(sp->state))) {
7097                 msleep(50);
7098         }
7099         clear_bit(__S2IO_STATE_CARD_UP, &sp->state);
7100
7101         /* Disable napi */
7102         if (sp->config.napi) {
7103                 int off = 0;
7104                 if (config->intr_type ==  MSI_X) {
7105                         for (; off < sp->config.rx_ring_num; off++)
7106                                 napi_disable(&sp->mac_control.rings[off].napi);
7107                         }
7108                 else
7109                         napi_disable(&sp->napi);
7110         }
7111
7112         /* disable Tx and Rx traffic on the NIC */
7113         if (do_io)
7114                 stop_nic(sp);
7115
7116         s2io_rem_isr(sp);
7117
7118         /* Check if the device is Quiescent and then Reset the NIC */
7119         while(do_io) {
7120                 /* As per the HW requirement we need to replenish the
7121                  * receive buffer to avoid the ring bump. Since there is
7122                  * no intention of processing the Rx frame at this pointwe are
7123                  * just settting the ownership bit of rxd in Each Rx
7124                  * ring to HW and set the appropriate buffer size
7125                  * based on the ring mode
7126                  */
7127                 rxd_owner_bit_reset(sp);
7128
7129                 val64 = readq(&bar0->adapter_status);
7130                 if (verify_xena_quiescence(sp)) {
7131                         if(verify_pcc_quiescent(sp, sp->device_enabled_once))
7132                         break;
7133                 }
7134
7135                 msleep(50);
7136                 cnt++;
7137                 if (cnt == 10) {
7138                         DBG_PRINT(ERR_DBG,
7139                                   "s2io_close:Device not Quiescent ");
7140                         DBG_PRINT(ERR_DBG, "adaper status reads 0x%llx\n",
7141                                   (unsigned long long) val64);
7142                         break;
7143                 }
7144         }
7145         if (do_io)
7146                 s2io_reset(sp);
7147
7148         /* Free all Tx buffers */
7149         free_tx_buffers(sp);
7150
7151         /* Free all Rx buffers */
7152         free_rx_buffers(sp);
7153
7154         clear_bit(__S2IO_STATE_LINK_TASK, &(sp->state));
7155 }
7156
7157 static void s2io_card_down(struct s2io_nic * sp)
7158 {
7159         do_s2io_card_down(sp, 1);
7160 }
7161
7162 static int s2io_card_up(struct s2io_nic * sp)
7163 {
7164         int i, ret = 0;
7165         struct mac_info *mac_control;
7166         struct config_param *config;
7167         struct net_device *dev = (struct net_device *) sp->dev;
7168         u16 interruptible;
7169
7170         /* Initialize the H/W I/O registers */
7171         ret = init_nic(sp);
7172         if (ret != 0) {
7173                 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
7174                           dev->name);
7175                 if (ret != -EIO)
7176                         s2io_reset(sp);
7177                 return ret;
7178         }
7179
7180         /*
7181          * Initializing the Rx buffers. For now we are considering only 1
7182          * Rx ring and initializing buffers into 30 Rx blocks
7183          */
7184         mac_control = &sp->mac_control;
7185         config = &sp->config;
7186
7187         for (i = 0; i < config->rx_ring_num; i++) {
7188                 mac_control->rings[i].mtu = dev->mtu;
7189                 ret = fill_rx_buffers(&mac_control->rings[i], 1);
7190                 if (ret) {
7191                         DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
7192                                   dev->name);
7193                         s2io_reset(sp);
7194                         free_rx_buffers(sp);
7195                         return -ENOMEM;
7196                 }
7197                 DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
7198                           mac_control->rings[i].rx_bufs_left);
7199         }
7200
7201         /* Initialise napi */
7202         if (config->napi) {
7203                 int i;
7204                 if (config->intr_type ==  MSI_X) {
7205                         for (i = 0; i < sp->config.rx_ring_num; i++)
7206                                 napi_enable(&sp->mac_control.rings[i].napi);
7207                 } else {
7208                         napi_enable(&sp->napi);
7209                 }
7210         }
7211
7212         /* Maintain the state prior to the open */
7213         if (sp->promisc_flg)
7214                 sp->promisc_flg = 0;
7215         if (sp->m_cast_flg) {
7216                 sp->m_cast_flg = 0;
7217                 sp->all_multi_pos= 0;
7218         }
7219
7220         /* Setting its receive mode */
7221         s2io_set_multicast(dev);
7222
7223         if (sp->lro) {
7224                 /* Initialize max aggregatable pkts per session based on MTU */
7225                 sp->lro_max_aggr_per_sess = ((1<<16) - 1) / dev->mtu;
7226                 /* Check if we can use(if specified) user provided value */
7227                 if (lro_max_pkts < sp->lro_max_aggr_per_sess)
7228                         sp->lro_max_aggr_per_sess = lro_max_pkts;
7229         }
7230
7231         /* Enable Rx Traffic and interrupts on the NIC */
7232         if (start_nic(sp)) {
7233                 DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
7234                 s2io_reset(sp);
7235                 free_rx_buffers(sp);
7236                 return -ENODEV;
7237         }
7238
7239         /* Add interrupt service routine */
7240         if (s2io_add_isr(sp) != 0) {
7241                 if (sp->config.intr_type == MSI_X)
7242                         s2io_rem_isr(sp);
7243                 s2io_reset(sp);
7244                 free_rx_buffers(sp);
7245                 return -ENODEV;
7246         }
7247
7248         S2IO_TIMER_CONF(sp->alarm_timer, s2io_alarm_handle, sp, (HZ/2));
7249
7250         /*  Enable select interrupts */
7251         en_dis_err_alarms(sp, ENA_ALL_INTRS, ENABLE_INTRS);
7252         if (sp->config.intr_type != INTA)
7253                 en_dis_able_nic_intrs(sp, TX_TRAFFIC_INTR, ENABLE_INTRS);
7254         else {
7255                 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
7256                 interruptible |= TX_PIC_INTR;
7257                 en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
7258         }
7259
7260         set_bit(__S2IO_STATE_CARD_UP, &sp->state);
7261         return 0;
7262 }
7263
7264 /**
7265  * s2io_restart_nic - Resets the NIC.
7266  * @data : long pointer to the device private structure
7267  * Description:
7268  * This function is scheduled to be run by the s2io_tx_watchdog
7269  * function after 0.5 secs to reset the NIC. The idea is to reduce
7270  * the run time of the watch dog routine which is run holding a
7271  * spin lock.
7272  */
7273
7274 static void s2io_restart_nic(struct work_struct *work)
7275 {
7276         struct s2io_nic *sp = container_of(work, struct s2io_nic, rst_timer_task);
7277         struct net_device *dev = sp->dev;
7278
7279         rtnl_lock();
7280
7281         if (!netif_running(dev))
7282                 goto out_unlock;
7283
7284         s2io_card_down(sp);
7285         if (s2io_card_up(sp)) {
7286                 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
7287                           dev->name);
7288         }
7289         s2io_wake_all_tx_queue(sp);
7290         DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n",
7291                   dev->name);
7292 out_unlock:
7293         rtnl_unlock();
7294 }
7295
7296 /**
7297  *  s2io_tx_watchdog - Watchdog for transmit side.
7298  *  @dev : Pointer to net device structure
7299  *  Description:
7300  *  This function is triggered if the Tx Queue is stopped
7301  *  for a pre-defined amount of time when the Interface is still up.
7302  *  If the Interface is jammed in such a situation, the hardware is
7303  *  reset (by s2io_close) and restarted again (by s2io_open) to
7304  *  overcome any problem that might have been caused in the hardware.
7305  *  Return value:
7306  *  void
7307  */
7308
7309 static void s2io_tx_watchdog(struct net_device *dev)
7310 {
7311         struct s2io_nic *sp = dev->priv;
7312
7313         if (netif_carrier_ok(dev)) {
7314                 sp->mac_control.stats_info->sw_stat.watchdog_timer_cnt++;
7315                 schedule_work(&sp->rst_timer_task);
7316                 sp->mac_control.stats_info->sw_stat.soft_reset_cnt++;
7317         }
7318 }
7319
7320 /**
7321  *   rx_osm_handler - To perform some OS related operations on SKB.
7322  *   @sp: private member of the device structure,pointer to s2io_nic structure.
7323  *   @skb : the socket buffer pointer.
7324  *   @len : length of the packet
7325  *   @cksum : FCS checksum of the frame.
7326  *   @ring_no : the ring from which this RxD was extracted.
7327  *   Description:
7328  *   This function is called by the Rx interrupt serivce routine to perform
7329  *   some OS related operations on the SKB before passing it to the upper
7330  *   layers. It mainly checks if the checksum is OK, if so adds it to the
7331  *   SKBs cksum variable, increments the Rx packet count and passes the SKB
7332  *   to the upper layer. If the checksum is wrong, it increments the Rx
7333  *   packet error count, frees the SKB and returns error.
7334  *   Return value:
7335  *   SUCCESS on success and -1 on failure.
7336  */
7337 static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
7338 {
7339         struct s2io_nic *sp = ring_data->nic;
7340         struct net_device *dev = (struct net_device *) ring_data->dev;
7341         struct sk_buff *skb = (struct sk_buff *)
7342                 ((unsigned long) rxdp->Host_Control);
7343         int ring_no = ring_data->ring_no;
7344         u16 l3_csum, l4_csum;
7345         unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
7346         struct lro *lro;
7347         u8 err_mask;
7348
7349         skb->dev = dev;
7350
7351         if (err) {
7352                 /* Check for parity error */
7353                 if (err & 0x1) {
7354                         sp->mac_control.stats_info->sw_stat.parity_err_cnt++;
7355                 }
7356                 err_mask = err >> 48;
7357                 switch(err_mask) {
7358                         case 1:
7359                                 sp->mac_control.stats_info->sw_stat.
7360                                 rx_parity_err_cnt++;
7361                         break;
7362
7363                         case 2:
7364                                 sp->mac_control.stats_info->sw_stat.
7365                                 rx_abort_cnt++;
7366                         break;
7367
7368                         case 3:
7369                                 sp->mac_control.stats_info->sw_stat.
7370                                 rx_parity_abort_cnt++;
7371                         break;
7372
7373                         case 4:
7374                                 sp->mac_control.stats_info->sw_stat.
7375                                 rx_rda_fail_cnt++;
7376                         break;
7377
7378                         case 5:
7379                                 sp->mac_control.stats_info->sw_stat.
7380                                 rx_unkn_prot_cnt++;
7381                         break;
7382
7383                         case 6:
7384                                 sp->mac_control.stats_info->sw_stat.
7385                                 rx_fcs_err_cnt++;
7386                         break;
7387
7388                         case 7:
7389                                 sp->mac_control.stats_info->sw_stat.
7390                                 rx_buf_size_err_cnt++;
7391                         break;
7392
7393                         case 8:
7394                                 sp->mac_control.stats_info->sw_stat.
7395                                 rx_rxd_corrupt_cnt++;
7396                         break;
7397
7398                         case 15:
7399                                 sp->mac_control.stats_info->sw_stat.
7400                                 rx_unkn_err_cnt++;
7401                         break;
7402                 }
7403                 /*
7404                 * Drop the packet if bad transfer code. Exception being
7405                 * 0x5, which could be due to unsupported IPv6 extension header.
7406                 * In this case, we let stack handle the packet.
7407                 * Note that in this case, since checksum will be incorrect,
7408                 * stack will validate the same.
7409                 */
7410                 if (err_mask != 0x5) {
7411                         DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%x\n",
7412                                 dev->name, err_mask);
7413                         sp->stats.rx_crc_errors++;
7414                         sp->mac_control.stats_info->sw_stat.mem_freed
7415                                 += skb->truesize;
7416                         dev_kfree_skb(skb);
7417                         ring_data->rx_bufs_left -= 1;
7418                         rxdp->Host_Control = 0;
7419                         return 0;
7420                 }
7421         }
7422
7423         /* Updating statistics */
7424         ring_data->rx_packets++;
7425         rxdp->Host_Control = 0;
7426         if (sp->rxd_mode == RXD_MODE_1) {
7427                 int len = RXD_GET_BUFFER0_SIZE_1(rxdp->Control_2);
7428
7429                 ring_data->rx_bytes += len;
7430                 skb_put(skb, len);
7431
7432         } else if (sp->rxd_mode == RXD_MODE_3B) {
7433                 int get_block = ring_data->rx_curr_get_info.block_index;
7434                 int get_off = ring_data->rx_curr_get_info.offset;
7435                 int buf0_len = RXD_GET_BUFFER0_SIZE_3(rxdp->Control_2);
7436                 int buf2_len = RXD_GET_BUFFER2_SIZE_3(rxdp->Control_2);
7437                 unsigned char *buff = skb_push(skb, buf0_len);
7438
7439                 struct buffAdd *ba = &ring_data->ba[get_block][get_off];
7440                 ring_data->rx_bytes += buf0_len + buf2_len;
7441                 memcpy(buff, ba->ba_0, buf0_len);
7442                 skb_put(skb, buf2_len);
7443         }
7444
7445         if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) && ((!ring_data->lro) ||
7446             (ring_data->lro && (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG)))) &&
7447             (sp->rx_csum)) {
7448                 l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
7449                 l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
7450                 if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
7451                         /*
7452                          * NIC verifies if the Checksum of the received
7453                          * frame is Ok or not and accordingly returns
7454                          * a flag in the RxD.
7455                          */
7456                         skb->ip_summed = CHECKSUM_UNNECESSARY;
7457                         if (ring_data->lro) {
7458                                 u32 tcp_len;
7459                                 u8 *tcp;
7460                                 int ret = 0;
7461
7462                                 ret = s2io_club_tcp_session(ring_data,
7463                                         skb->data, &tcp, &tcp_len, &lro,
7464                                         rxdp, sp);
7465                                 switch (ret) {
7466                                         case 3: /* Begin anew */
7467                                                 lro->parent = skb;
7468                                                 goto aggregate;
7469                                         case 1: /* Aggregate */
7470                                         {
7471                                                 lro_append_pkt(sp, lro,
7472                                                         skb, tcp_len);
7473                                                 goto aggregate;
7474                                         }
7475                                         case 4: /* Flush session */
7476                                         {
7477                                                 lro_append_pkt(sp, lro,
7478                                                         skb, tcp_len);
7479                                                 queue_rx_frame(lro->parent,
7480                                                         lro->vlan_tag);
7481                                                 clear_lro_session(lro);
7482                                                 sp->mac_control.stats_info->
7483                                                     sw_stat.flush_max_pkts++;
7484                                                 goto aggregate;
7485                                         }
7486                                         case 2: /* Flush both */
7487                                                 lro->parent->data_len =
7488                                                         lro->frags_len;
7489                                                 sp->mac_control.stats_info->
7490                                                      sw_stat.sending_both++;
7491                                                 queue_rx_frame(lro->parent,
7492                                                         lro->vlan_tag);
7493                                                 clear_lro_session(lro);
7494                                                 goto send_up;
7495                                         case 0: /* sessions exceeded */
7496                                         case -1: /* non-TCP or not
7497                                                   * L2 aggregatable
7498                                                   */
7499                                         case 5: /*
7500                                                  * First pkt in session not
7501                                                  * L3/L4 aggregatable
7502                                                  */
7503                                                 break;
7504                                         default:
7505                                                 DBG_PRINT(ERR_DBG,
7506                                                         "%s: Samadhana!!\n",
7507                                                          __FUNCTION__);
7508                                                 BUG();
7509                                 }
7510                         }
7511                 } else {
7512                         /*
7513                          * Packet with erroneous checksum, let the
7514                          * upper layers deal with it.
7515                          */
7516                         skb->ip_summed = CHECKSUM_NONE;
7517                 }
7518         } else
7519                 skb->ip_summed = CHECKSUM_NONE;
7520
7521         sp->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
7522 send_up:
7523         queue_rx_frame(skb, RXD_GET_VLAN_TAG(rxdp->Control_2));
7524         dev->last_rx = jiffies;
7525 aggregate:
7526         sp->mac_control.rings[ring_no].rx_bufs_left -= 1;
7527         return SUCCESS;
7528 }
7529
7530 /**
7531  *  s2io_link - stops/starts the Tx queue.
7532  *  @sp : private member of the device structure, which is a pointer to the
7533  *  s2io_nic structure.
7534  *  @link : inidicates whether link is UP/DOWN.
7535  *  Description:
7536  *  This function stops/starts the Tx queue depending on whether the link
7537  *  status of the NIC is is down or up. This is called by the Alarm
7538  *  interrupt handler whenever a link change interrupt comes up.
7539  *  Return value:
7540  *  void.
7541  */
7542
7543 static void s2io_link(struct s2io_nic * sp, int link)
7544 {
7545         struct net_device *dev = (struct net_device *) sp->dev;
7546
7547         if (link != sp->last_link_state) {
7548                 init_tti(sp, link);
7549                 if (link == LINK_DOWN) {
7550                         DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
7551                         s2io_stop_all_tx_queue(sp);
7552                         netif_carrier_off(dev);
7553                         if(sp->mac_control.stats_info->sw_stat.link_up_cnt)
7554                         sp->mac_control.stats_info->sw_stat.link_up_time =
7555                                 jiffies - sp->start_time;
7556                         sp->mac_control.stats_info->sw_stat.link_down_cnt++;
7557                 } else {
7558                         DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
7559                         if (sp->mac_control.stats_info->sw_stat.link_down_cnt)
7560                         sp->mac_control.stats_info->sw_stat.link_down_time =
7561                                 jiffies - sp->start_time;
7562                         sp->mac_control.stats_info->sw_stat.link_up_cnt++;
7563                         netif_carrier_on(dev);
7564                         s2io_wake_all_tx_queue(sp);
7565                 }
7566         }
7567         sp->last_link_state = link;
7568         sp->start_time = jiffies;
7569 }
7570
7571 /**
7572  *  s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
7573  *  @sp : private member of the device structure, which is a pointer to the
7574  *  s2io_nic structure.
7575  *  Description:
7576  *  This function initializes a few of the PCI and PCI-X configuration registers
7577  *  with recommended values.
7578  *  Return value:
7579  *  void
7580  */
7581
7582 static void s2io_init_pci(struct s2io_nic * sp)
7583 {
7584         u16 pci_cmd = 0, pcix_cmd = 0;
7585
7586         /* Enable Data Parity Error Recovery in PCI-X command register. */
7587         pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7588                              &(pcix_cmd));
7589         pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7590                               (pcix_cmd | 1));
7591         pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7592                              &(pcix_cmd));
7593
7594         /* Set the PErr Response bit in PCI command register. */
7595         pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
7596         pci_write_config_word(sp->pdev, PCI_COMMAND,
7597                               (pci_cmd | PCI_COMMAND_PARITY));
7598         pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
7599 }
7600
7601 static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type,
7602         u8 *dev_multiq)
7603 {
7604         if ((tx_fifo_num > MAX_TX_FIFOS) ||
7605                 (tx_fifo_num < 1)) {
7606                 DBG_PRINT(ERR_DBG, "s2io: Requested number of tx fifos "
7607                         "(%d) not supported\n", tx_fifo_num);
7608
7609                 if (tx_fifo_num < 1)
7610                         tx_fifo_num = 1;
7611                 else
7612                         tx_fifo_num = MAX_TX_FIFOS;
7613
7614                 DBG_PRINT(ERR_DBG, "s2io: Default to %d ", tx_fifo_num);
7615                 DBG_PRINT(ERR_DBG, "tx fifos\n");
7616         }
7617
7618         if (multiq)
7619                 *dev_multiq = multiq;
7620
7621         if (tx_steering_type && (1 == tx_fifo_num)) {
7622                 if (tx_steering_type != TX_DEFAULT_STEERING)
7623                         DBG_PRINT(ERR_DBG,
7624                                 "s2io: Tx steering is not supported with "
7625                                 "one fifo. Disabling Tx steering.\n");
7626                 tx_steering_type = NO_STEERING;
7627         }
7628
7629         if ((tx_steering_type < NO_STEERING) ||
7630                 (tx_steering_type > TX_DEFAULT_STEERING)) {
7631                 DBG_PRINT(ERR_DBG, "s2io: Requested transmit steering not "
7632                          "supported\n");
7633                 DBG_PRINT(ERR_DBG, "s2io: Disabling transmit steering\n");
7634                 tx_steering_type = NO_STEERING;
7635         }
7636
7637         if (rx_ring_num > MAX_RX_RINGS) {
7638                 DBG_PRINT(ERR_DBG, "s2io: Requested number of rx rings not "
7639                          "supported\n");
7640                 DBG_PRINT(ERR_DBG, "s2io: Default to %d rx rings\n",
7641                         MAX_RX_RINGS);
7642                 rx_ring_num = MAX_RX_RINGS;
7643         }
7644
7645         if ((*dev_intr_type != INTA) && (*dev_intr_type != MSI_X)) {
7646                 DBG_PRINT(ERR_DBG, "s2io: Wrong intr_type requested. "
7647                           "Defaulting to INTA\n");
7648                 *dev_intr_type = INTA;
7649         }
7650
7651         if ((*dev_intr_type == MSI_X) &&
7652                         ((pdev->device != PCI_DEVICE_ID_HERC_WIN) &&
7653                         (pdev->device != PCI_DEVICE_ID_HERC_UNI))) {
7654                 DBG_PRINT(ERR_DBG, "s2io: Xframe I does not support MSI_X. "
7655                                         "Defaulting to INTA\n");
7656                 *dev_intr_type = INTA;
7657         }
7658
7659         if ((rx_ring_mode != 1) && (rx_ring_mode != 2)) {
7660                 DBG_PRINT(ERR_DBG, "s2io: Requested ring mode not supported\n");
7661                 DBG_PRINT(ERR_DBG, "s2io: Defaulting to 1-buffer mode\n");
7662                 rx_ring_mode = 1;
7663         }
7664         return SUCCESS;
7665 }
7666
7667 /**
7668  * rts_ds_steer - Receive traffic steering based on IPv4 or IPv6 TOS
7669  * or Traffic class respectively.
7670  * @nic: device private variable
7671  * Description: The function configures the receive steering to
7672  * desired receive ring.
7673  * Return Value:  SUCCESS on success and
7674  * '-1' on failure (endian settings incorrect).
7675  */
7676 static int rts_ds_steer(struct s2io_nic *nic, u8 ds_codepoint, u8 ring)
7677 {
7678         struct XENA_dev_config __iomem *bar0 = nic->bar0;
7679         register u64 val64 = 0;
7680
7681         if (ds_codepoint > 63)
7682                 return FAILURE;
7683
7684         val64 = RTS_DS_MEM_DATA(ring);
7685         writeq(val64, &bar0->rts_ds_mem_data);
7686
7687         val64 = RTS_DS_MEM_CTRL_WE |
7688                 RTS_DS_MEM_CTRL_STROBE_NEW_CMD |
7689                 RTS_DS_MEM_CTRL_OFFSET(ds_codepoint);
7690
7691         writeq(val64, &bar0->rts_ds_mem_ctrl);
7692
7693         return wait_for_cmd_complete(&bar0->rts_ds_mem_ctrl,
7694                                 RTS_DS_MEM_CTRL_STROBE_CMD_BEING_EXECUTED,
7695                                 S2IO_BIT_RESET);
7696 }
7697
7698 /**
7699  *  s2io_init_nic - Initialization of the adapter .
7700  *  @pdev : structure containing the PCI related information of the device.
7701  *  @pre: List of PCI devices supported by the driver listed in s2io_tbl.
7702  *  Description:
7703  *  The function initializes an adapter identified by the pci_dec structure.
7704  *  All OS related initialization including memory and device structure and
7705  *  initlaization of the device private variable is done. Also the swapper
7706  *  control register is initialized to enable read and write into the I/O
7707  *  registers of the device.
7708  *  Return value:
7709  *  returns 0 on success and negative on failure.
7710  */
7711
7712 static int __devinit
7713 s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7714 {
7715         struct s2io_nic *sp;
7716         struct net_device *dev;
7717         int i, j, ret;
7718         int dma_flag = FALSE;
7719         u32 mac_up, mac_down;
7720         u64 val64 = 0, tmp64 = 0;
7721         struct XENA_dev_config __iomem *bar0 = NULL;
7722         u16 subid;
7723         struct mac_info *mac_control;
7724         struct config_param *config;
7725         int mode;
7726         u8 dev_intr_type = intr_type;
7727         u8 dev_multiq = 0;
7728         DECLARE_MAC_BUF(mac);
7729
7730         ret = s2io_verify_parm(pdev, &dev_intr_type, &dev_multiq);
7731         if (ret)
7732                 return ret;
7733
7734         if ((ret = pci_enable_device(pdev))) {
7735                 DBG_PRINT(ERR_DBG,
7736                           "s2io_init_nic: pci_enable_device failed\n");
7737                 return ret;
7738         }
7739
7740         if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
7741                 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 64bit DMA\n");
7742                 dma_flag = TRUE;
7743                 if (pci_set_consistent_dma_mask
7744                     (pdev, DMA_64BIT_MASK)) {
7745                         DBG_PRINT(ERR_DBG,
7746                                   "Unable to obtain 64bit DMA for \
7747                                         consistent allocations\n");
7748                         pci_disable_device(pdev);
7749                         return -ENOMEM;
7750                 }
7751         } else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
7752                 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 32bit DMA\n");
7753         } else {
7754                 pci_disable_device(pdev);
7755                 return -ENOMEM;
7756         }
7757         if ((ret = pci_request_regions(pdev, s2io_driver_name))) {
7758                 DBG_PRINT(ERR_DBG, "%s: Request Regions failed - %x \n", __FUNCTION__, ret);
7759                 pci_disable_device(pdev);
7760                 return -ENODEV;
7761         }
7762         if (dev_multiq)
7763                 dev = alloc_etherdev_mq(sizeof(struct s2io_nic), tx_fifo_num);
7764         else
7765                 dev = alloc_etherdev(sizeof(struct s2io_nic));
7766         if (dev == NULL) {
7767                 DBG_PRINT(ERR_DBG, "Device allocation failed\n");
7768                 pci_disable_device(pdev);
7769                 pci_release_regions(pdev);
7770                 return -ENODEV;
7771         }
7772
7773         pci_set_master(pdev);
7774         pci_set_drvdata(pdev, dev);
7775         SET_NETDEV_DEV(dev, &pdev->dev);
7776
7777         /*  Private member variable initialized to s2io NIC structure */
7778         sp = dev->priv;
7779         memset(sp, 0, sizeof(struct s2io_nic));
7780         sp->dev = dev;
7781         sp->pdev = pdev;
7782         sp->high_dma_flag = dma_flag;
7783         sp->device_enabled_once = FALSE;
7784         if (rx_ring_mode == 1)
7785                 sp->rxd_mode = RXD_MODE_1;
7786         if (rx_ring_mode == 2)
7787                 sp->rxd_mode = RXD_MODE_3B;
7788
7789         sp->config.intr_type = dev_intr_type;
7790
7791         if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) ||
7792                 (pdev->device == PCI_DEVICE_ID_HERC_UNI))
7793                 sp->device_type = XFRAME_II_DEVICE;
7794         else
7795                 sp->device_type = XFRAME_I_DEVICE;
7796
7797         sp->lro = lro_enable;
7798
7799         /* Initialize some PCI/PCI-X fields of the NIC. */
7800         s2io_init_pci(sp);
7801
7802         /*
7803          * Setting the device configuration parameters.
7804          * Most of these parameters can be specified by the user during
7805          * module insertion as they are module loadable parameters. If
7806          * these parameters are not not specified during load time, they
7807          * are initialized with default values.
7808          */
7809         mac_control = &sp->mac_control;
7810         config = &sp->config;
7811
7812         config->napi = napi;
7813         config->tx_steering_type = tx_steering_type;
7814
7815         /* Tx side parameters. */
7816         if (config->tx_steering_type == TX_PRIORITY_STEERING)
7817                 config->tx_fifo_num = MAX_TX_FIFOS;
7818         else
7819                 config->tx_fifo_num = tx_fifo_num;
7820
7821         /* Initialize the fifos used for tx steering */
7822         if (config->tx_fifo_num < 5) {
7823                         if (config->tx_fifo_num  == 1)
7824                                 sp->total_tcp_fifos = 1;
7825                         else
7826                                 sp->total_tcp_fifos = config->tx_fifo_num - 1;
7827                         sp->udp_fifo_idx = config->tx_fifo_num - 1;
7828                         sp->total_udp_fifos = 1;
7829                         sp->other_fifo_idx = sp->total_tcp_fifos - 1;
7830         } else {
7831                 sp->total_tcp_fifos = (tx_fifo_num - FIFO_UDP_MAX_NUM -
7832                                                 FIFO_OTHER_MAX_NUM);
7833                 sp->udp_fifo_idx = sp->total_tcp_fifos;
7834                 sp->total_udp_fifos = FIFO_UDP_MAX_NUM;
7835                 sp->other_fifo_idx = sp->udp_fifo_idx + FIFO_UDP_MAX_NUM;
7836         }
7837
7838         config->multiq = dev_multiq;
7839         for (i = 0; i < config->tx_fifo_num; i++) {
7840                 config->tx_cfg[i].fifo_len = tx_fifo_len[i];
7841                 config->tx_cfg[i].fifo_priority = i;
7842         }
7843
7844         /* mapping the QoS priority to the configured fifos */
7845         for (i = 0; i < MAX_TX_FIFOS; i++)
7846                 config->fifo_mapping[i] = fifo_map[config->tx_fifo_num - 1][i];
7847
7848         /* map the hashing selector table to the configured fifos */
7849         for (i = 0; i < config->tx_fifo_num; i++)
7850                 sp->fifo_selector[i] = fifo_selector[i];
7851
7852
7853         config->tx_intr_type = TXD_INT_TYPE_UTILZ;
7854         for (i = 0; i < config->tx_fifo_num; i++) {
7855                 config->tx_cfg[i].f_no_snoop =
7856                     (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
7857                 if (config->tx_cfg[i].fifo_len < 65) {
7858                         config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
7859                         break;
7860                 }
7861         }
7862         /* + 2 because one Txd for skb->data and one Txd for UFO */
7863         config->max_txds = MAX_SKB_FRAGS + 2;
7864
7865         /* Rx side parameters. */
7866         config->rx_ring_num = rx_ring_num;
7867         for (i = 0; i < config->rx_ring_num; i++) {
7868                 config->rx_cfg[i].num_rxd = rx_ring_sz[i] *
7869                     (rxd_count[sp->rxd_mode] + 1);
7870                 config->rx_cfg[i].ring_priority = i;
7871                 mac_control->rings[i].rx_bufs_left = 0;
7872                 mac_control->rings[i].rxd_mode = sp->rxd_mode;
7873                 mac_control->rings[i].rxd_count = rxd_count[sp->rxd_mode];
7874                 mac_control->rings[i].pdev = sp->pdev;
7875                 mac_control->rings[i].dev = sp->dev;
7876         }
7877
7878         for (i = 0; i < rx_ring_num; i++) {
7879                 config->rx_cfg[i].ring_org = RING_ORG_BUFF1;
7880                 config->rx_cfg[i].f_no_snoop =
7881                     (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
7882         }
7883
7884         /*  Setting Mac Control parameters */
7885         mac_control->rmac_pause_time = rmac_pause_time;
7886         mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
7887         mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
7888
7889
7890         /*  initialize the shared memory used by the NIC and the host */
7891         if (init_shared_mem(sp)) {
7892                 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n",
7893                           dev->name);
7894                 ret = -ENOMEM;
7895                 goto mem_alloc_failed;
7896         }
7897
7898         sp->bar0 = ioremap(pci_resource_start(pdev, 0),
7899                                      pci_resource_len(pdev, 0));
7900         if (!sp->bar0) {
7901                 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem1\n",
7902                           dev->name);
7903                 ret = -ENOMEM;
7904                 goto bar0_remap_failed;
7905         }
7906
7907         sp->bar1 = ioremap(pci_resource_start(pdev, 2),
7908                                      pci_resource_len(pdev, 2));
7909         if (!sp->bar1) {
7910                 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem2\n",
7911                           dev->name);
7912                 ret = -ENOMEM;
7913                 goto bar1_remap_failed;
7914         }
7915
7916         dev->irq = pdev->irq;
7917         dev->base_addr = (unsigned long) sp->bar0;
7918
7919         /* Initializing the BAR1 address as the start of the FIFO pointer. */
7920         for (j = 0; j < MAX_TX_FIFOS; j++) {
7921                 mac_control->tx_FIFO_start[j] = (struct TxFIFO_element __iomem *)
7922                     (sp->bar1 + (j * 0x00020000));
7923         }
7924
7925         /*  Driver entry points */
7926         dev->open = &s2io_open;
7927         dev->stop = &s2io_close;
7928         dev->hard_start_xmit = &s2io_xmit;
7929         dev->get_stats = &s2io_get_stats;
7930         dev->set_multicast_list = &s2io_set_multicast;
7931         dev->do_ioctl = &s2io_ioctl;
7932         dev->set_mac_address = &s2io_set_mac_addr;
7933         dev->change_mtu = &s2io_change_mtu;
7934         SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
7935         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
7936         dev->vlan_rx_register = s2io_vlan_rx_register;
7937         dev->vlan_rx_kill_vid = (void *)s2io_vlan_rx_kill_vid;
7938
7939         /*
7940          * will use eth_mac_addr() for  dev->set_mac_address
7941          * mac address will be set every time dev->open() is called
7942          */
7943 #ifdef CONFIG_NET_POLL_CONTROLLER
7944         dev->poll_controller = s2io_netpoll;
7945 #endif
7946
7947         dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
7948         if (sp->high_dma_flag == TRUE)
7949                 dev->features |= NETIF_F_HIGHDMA;
7950         dev->features |= NETIF_F_TSO;
7951         dev->features |= NETIF_F_TSO6;
7952         if ((sp->device_type & XFRAME_II_DEVICE) && (ufo))  {
7953                 dev->features |= NETIF_F_UFO;
7954                 dev->features |= NETIF_F_HW_CSUM;
7955         }
7956         if (config->multiq)
7957                 dev->features |= NETIF_F_MULTI_QUEUE;
7958         dev->tx_timeout = &s2io_tx_watchdog;
7959         dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
7960         INIT_WORK(&sp->rst_timer_task, s2io_restart_nic);
7961         INIT_WORK(&sp->set_link_task, s2io_set_link);
7962
7963         pci_save_state(sp->pdev);
7964
7965         /* Setting swapper control on the NIC, for proper reset operation */
7966         if (s2io_set_swapper(sp)) {
7967                 DBG_PRINT(ERR_DBG, "%s:swapper settings are wrong\n",
7968                           dev->name);
7969                 ret = -EAGAIN;
7970                 goto set_swap_failed;
7971         }
7972
7973         /* Verify if the Herc works on the slot its placed into */
7974         if (sp->device_type & XFRAME_II_DEVICE) {
7975                 mode = s2io_verify_pci_mode(sp);
7976                 if (mode < 0) {
7977                         DBG_PRINT(ERR_DBG, "%s: ", __FUNCTION__);
7978                         DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
7979                         ret = -EBADSLT;
7980                         goto set_swap_failed;
7981                 }
7982         }
7983
7984         if (sp->config.intr_type == MSI_X) {
7985                 sp->num_entries = config->rx_ring_num + 1;
7986                 ret = s2io_enable_msi_x(sp);
7987
7988                 if (!ret) {
7989                         ret = s2io_test_msi(sp);
7990                         /* rollback MSI-X, will re-enable during add_isr() */
7991                         remove_msix_isr(sp);
7992                 }
7993                 if (ret) {
7994
7995                         DBG_PRINT(ERR_DBG,
7996                           "%s: MSI-X requested but failed to enable\n",
7997                           dev->name);
7998                         sp->config.intr_type = INTA;
7999                 }
8000         }
8001
8002         if (config->intr_type ==  MSI_X) {
8003                 for (i = 0; i < config->rx_ring_num ; i++)
8004                         netif_napi_add(dev, &mac_control->rings[i].napi,
8005                                 s2io_poll_msix, 64);
8006         } else {
8007                 netif_napi_add(dev, &sp->napi, s2io_poll_inta, 64);
8008         }
8009
8010         /* Not needed for Herc */
8011         if (sp->device_type & XFRAME_I_DEVICE) {
8012                 /*
8013                  * Fix for all "FFs" MAC address problems observed on
8014                  * Alpha platforms
8015                  */
8016                 fix_mac_address(sp);
8017                 s2io_reset(sp);
8018         }
8019
8020         /*
8021          * MAC address initialization.
8022          * For now only one mac address will be read and used.
8023          */
8024         bar0 = sp->bar0;
8025         val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
8026             RMAC_ADDR_CMD_MEM_OFFSET(0 + S2IO_MAC_ADDR_START_OFFSET);
8027         writeq(val64, &bar0->rmac_addr_cmd_mem);
8028         wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
8029                       RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, S2IO_BIT_RESET);
8030         tmp64 = readq(&bar0->rmac_addr_data0_mem);
8031         mac_down = (u32) tmp64;
8032         mac_up = (u32) (tmp64 >> 32);
8033
8034         sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
8035         sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
8036         sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
8037         sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
8038         sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
8039         sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
8040
8041         /*  Set the factory defined MAC address initially   */
8042         dev->addr_len = ETH_ALEN;
8043         memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
8044         memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
8045
8046         /* initialize number of multicast & unicast MAC entries variables */
8047         if (sp->device_type == XFRAME_I_DEVICE) {
8048                 config->max_mc_addr = S2IO_XENA_MAX_MC_ADDRESSES;
8049                 config->max_mac_addr = S2IO_XENA_MAX_MAC_ADDRESSES;
8050                 config->mc_start_offset = S2IO_XENA_MC_ADDR_START_OFFSET;
8051         } else if (sp->device_type == XFRAME_II_DEVICE) {
8052                 config->max_mc_addr = S2IO_HERC_MAX_MC_ADDRESSES;
8053                 config->max_mac_addr = S2IO_HERC_MAX_MAC_ADDRESSES;
8054                 config->mc_start_offset = S2IO_HERC_MC_ADDR_START_OFFSET;
8055         }
8056
8057         /* store mac addresses from CAM to s2io_nic structure */
8058         do_s2io_store_unicast_mc(sp);
8059
8060         /* Configure MSIX vector for number of rings configured plus one */
8061         if ((sp->device_type == XFRAME_II_DEVICE) &&
8062                 (config->intr_type == MSI_X))
8063                 sp->num_entries = config->rx_ring_num + 1;
8064
8065          /* Store the values of the MSIX table in the s2io_nic structure */
8066         store_xmsi_data(sp);
8067         /* reset Nic and bring it to known state */
8068         s2io_reset(sp);
8069
8070         /*
8071          * Initialize link state flags
8072          * and the card state parameter
8073          */
8074         sp->state = 0;
8075
8076         /* Initialize spinlocks */
8077         for (i = 0; i < sp->config.tx_fifo_num; i++)
8078                 spin_lock_init(&mac_control->fifos[i].tx_lock);
8079
8080         /*
8081          * SXE-002: Configure link and activity LED to init state
8082          * on driver load.
8083          */
8084         subid = sp->pdev->subsystem_device;
8085         if ((subid & 0xFF) >= 0x07) {
8086                 val64 = readq(&bar0->gpio_control);
8087                 val64 |= 0x0000800000000000ULL;
8088                 writeq(val64, &bar0->gpio_control);
8089                 val64 = 0x0411040400000000ULL;
8090                 writeq(val64, (void __iomem *) bar0 + 0x2700);
8091                 val64 = readq(&bar0->gpio_control);
8092         }
8093
8094         sp->rx_csum = 1;        /* Rx chksum verify enabled by default */
8095
8096         if (register_netdev(dev)) {
8097                 DBG_PRINT(ERR_DBG, "Device registration failed\n");
8098                 ret = -ENODEV;
8099                 goto register_failed;
8100         }
8101         s2io_vpd_read(sp);
8102         DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2007 Neterion Inc.\n");
8103         DBG_PRINT(ERR_DBG, "%s: Neterion %s (rev %d)\n",dev->name,
8104                   sp->product_name, pdev->revision);
8105         DBG_PRINT(ERR_DBG, "%s: Driver version %s\n", dev->name,
8106                   s2io_driver_version);
8107         DBG_PRINT(ERR_DBG, "%s: MAC ADDR: %s\n",
8108                   dev->name, print_mac(mac, dev->dev_addr));
8109         DBG_PRINT(ERR_DBG, "SERIAL NUMBER: %s\n", sp->serial_num);
8110         if (sp->device_type & XFRAME_II_DEVICE) {
8111                 mode = s2io_print_pci_mode(sp);
8112                 if (mode < 0) {
8113                         DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
8114                         ret = -EBADSLT;
8115                         unregister_netdev(dev);
8116                         goto set_swap_failed;
8117                 }
8118         }
8119         switch(sp->rxd_mode) {
8120                 case RXD_MODE_1:
8121                     DBG_PRINT(ERR_DBG, "%s: 1-Buffer receive mode enabled\n",
8122                                                 dev->name);
8123                     break;
8124                 case RXD_MODE_3B:
8125                     DBG_PRINT(ERR_DBG, "%s: 2-Buffer receive mode enabled\n",
8126                                                 dev->name);
8127                     break;
8128         }
8129
8130         switch (sp->config.napi) {
8131         case 0:
8132                 DBG_PRINT(ERR_DBG, "%s: NAPI disabled\n", dev->name);
8133                 break;
8134         case 1:
8135                 DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name);
8136                 break;
8137         }
8138
8139         DBG_PRINT(ERR_DBG, "%s: Using %d Tx fifo(s)\n", dev->name,
8140                 sp->config.tx_fifo_num);
8141
8142         DBG_PRINT(ERR_DBG, "%s: Using %d Rx ring(s)\n", dev->name,
8143                   sp->config.rx_ring_num);
8144
8145         switch(sp->config.intr_type) {
8146                 case INTA:
8147                     DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name);
8148                     break;
8149                 case MSI_X:
8150                     DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI-X\n", dev->name);
8151                     break;
8152         }
8153         if (sp->config.multiq) {
8154         for (i = 0; i < sp->config.tx_fifo_num; i++)
8155                 mac_control->fifos[i].multiq = config->multiq;
8156                 DBG_PRINT(ERR_DBG, "%s: Multiqueue support enabled\n",
8157                         dev->name);
8158         } else
8159                 DBG_PRINT(ERR_DBG, "%s: Multiqueue support disabled\n",
8160                         dev->name);
8161
8162         switch (sp->config.tx_steering_type) {
8163         case NO_STEERING:
8164                 DBG_PRINT(ERR_DBG, "%s: No steering enabled for"
8165                         " transmit\n", dev->name);
8166                         break;
8167         case TX_PRIORITY_STEERING:
8168                 DBG_PRINT(ERR_DBG, "%s: Priority steering enabled for"
8169                         " transmit\n", dev->name);
8170                 break;
8171         case TX_DEFAULT_STEERING:
8172                 DBG_PRINT(ERR_DBG, "%s: Default steering enabled for"
8173                         " transmit\n", dev->name);
8174         }
8175
8176         if (sp->lro)
8177                 DBG_PRINT(ERR_DBG, "%s: Large receive offload enabled\n",
8178                           dev->name);
8179         if (ufo)
8180                 DBG_PRINT(ERR_DBG, "%s: UDP Fragmentation Offload(UFO)"
8181                                         " enabled\n", dev->name);
8182         /* Initialize device name */
8183         sprintf(sp->name, "%s Neterion %s", dev->name, sp->product_name);
8184
8185         /*
8186          * Make Link state as off at this point, when the Link change
8187          * interrupt comes the state will be automatically changed to
8188          * the right state.
8189          */
8190         netif_carrier_off(dev);
8191
8192         return 0;
8193
8194       register_failed:
8195       set_swap_failed:
8196         iounmap(sp->bar1);
8197       bar1_remap_failed:
8198         iounmap(sp->bar0);
8199       bar0_remap_failed:
8200       mem_alloc_failed:
8201         free_shared_mem(sp);
8202         pci_disable_device(pdev);
8203         pci_release_regions(pdev);
8204         pci_set_drvdata(pdev, NULL);
8205         free_netdev(dev);
8206
8207         return ret;
8208 }
8209
8210 /**
8211  * s2io_rem_nic - Free the PCI device
8212  * @pdev: structure containing the PCI related information of the device.
8213  * Description: This function is called by the Pci subsystem to release a
8214  * PCI device and free up all resource held up by the device. This could
8215  * be in response to a Hot plug event or when the driver is to be removed
8216  * from memory.
8217  */
8218
8219 static void __devexit s2io_rem_nic(struct pci_dev *pdev)
8220 {
8221         struct net_device *dev =
8222             (struct net_device *) pci_get_drvdata(pdev);
8223         struct s2io_nic *sp;
8224
8225         if (dev == NULL) {
8226                 DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
8227                 return;
8228         }
8229
8230         flush_scheduled_work();
8231
8232         sp = dev->priv;
8233         unregister_netdev(dev);
8234
8235         free_shared_mem(sp);
8236         iounmap(sp->bar0);
8237         iounmap(sp->bar1);
8238         pci_release_regions(pdev);
8239         pci_set_drvdata(pdev, NULL);
8240         free_netdev(dev);
8241         pci_disable_device(pdev);
8242 }
8243
8244 /**
8245  * s2io_starter - Entry point for the driver
8246  * Description: This function is the entry point for the driver. It verifies
8247  * the module loadable parameters and initializes PCI configuration space.
8248  */
8249
8250 static int __init s2io_starter(void)
8251 {
8252         return pci_register_driver(&s2io_driver);
8253 }
8254
8255 /**
8256  * s2io_closer - Cleanup routine for the driver
8257  * Description: This function is the cleanup routine for the driver. It unregist * ers the driver.
8258  */
8259
8260 static __exit void s2io_closer(void)
8261 {
8262         pci_unregister_driver(&s2io_driver);
8263         DBG_PRINT(INIT_DBG, "cleanup done\n");
8264 }
8265
8266 module_init(s2io_starter);
8267 module_exit(s2io_closer);
8268
8269 static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip,
8270                 struct tcphdr **tcp, struct RxD_t *rxdp,
8271                 struct s2io_nic *sp)
8272 {
8273         int ip_off;
8274         u8 l2_type = (u8)((rxdp->Control_1 >> 37) & 0x7), ip_len;
8275
8276         if (!(rxdp->Control_1 & RXD_FRAME_PROTO_TCP)) {
8277                 DBG_PRINT(INIT_DBG,"%s: Non-TCP frames not supported for LRO\n",
8278                           __FUNCTION__);
8279                 return -1;
8280         }
8281
8282         /* Checking for DIX type or DIX type with VLAN */
8283         if ((l2_type == 0)
8284                 || (l2_type == 4)) {
8285                 ip_off = HEADER_ETHERNET_II_802_3_SIZE;
8286                 /*
8287                  * If vlan stripping is disabled and the frame is VLAN tagged,
8288                  * shift the offset by the VLAN header size bytes.
8289                  */
8290                 if ((!vlan_strip_flag) &&
8291                         (rxdp->Control_1 & RXD_FRAME_VLAN_TAG))
8292                         ip_off += HEADER_VLAN_SIZE;
8293         } else {
8294                 /* LLC, SNAP etc are considered non-mergeable */
8295                 return -1;
8296         }
8297
8298         *ip = (struct iphdr *)((u8 *)buffer + ip_off);
8299         ip_len = (u8)((*ip)->ihl);
8300         ip_len <<= 2;
8301         *tcp = (struct tcphdr *)((unsigned long)*ip + ip_len);
8302
8303         return 0;
8304 }
8305
8306 static int check_for_socket_match(struct lro *lro, struct iphdr *ip,
8307                                   struct tcphdr *tcp)
8308 {
8309         DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
8310         if ((lro->iph->saddr != ip->saddr) || (lro->iph->daddr != ip->daddr) ||
8311            (lro->tcph->source != tcp->source) || (lro->tcph->dest != tcp->dest))
8312                 return -1;
8313         return 0;
8314 }
8315
8316 static inline int get_l4_pyld_length(struct iphdr *ip, struct tcphdr *tcp)
8317 {
8318         return(ntohs(ip->tot_len) - (ip->ihl << 2) - (tcp->doff << 2));
8319 }
8320
8321 static void initiate_new_session(struct lro *lro, u8 *l2h,
8322         struct iphdr *ip, struct tcphdr *tcp, u32 tcp_pyld_len, u16 vlan_tag)
8323 {
8324         DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
8325         lro->l2h = l2h;
8326         lro->iph = ip;
8327         lro->tcph = tcp;
8328         lro->tcp_next_seq = tcp_pyld_len + ntohl(tcp->seq);
8329         lro->tcp_ack = tcp->ack_seq;
8330         lro->sg_num = 1;
8331         lro->total_len = ntohs(ip->tot_len);
8332         lro->frags_len = 0;
8333         lro->vlan_tag = vlan_tag;
8334         /*
8335          * check if we saw TCP timestamp. Other consistency checks have
8336          * already been done.
8337          */
8338         if (tcp->doff == 8) {
8339                 __be32 *ptr;
8340                 ptr = (__be32 *)(tcp+1);
8341                 lro->saw_ts = 1;
8342                 lro->cur_tsval = ntohl(*(ptr+1));
8343                 lro->cur_tsecr = *(ptr+2);
8344         }
8345         lro->in_use = 1;
8346 }
8347
8348 static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro)
8349 {
8350         struct iphdr *ip = lro->iph;
8351         struct tcphdr *tcp = lro->tcph;
8352         __sum16 nchk;
8353         struct stat_block *statinfo = sp->mac_control.stats_info;
8354         DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
8355
8356         /* Update L3 header */
8357         ip->tot_len = htons(lro->total_len);
8358         ip->check = 0;
8359         nchk = ip_fast_csum((u8 *)lro->iph, ip->ihl);
8360         ip->check = nchk;
8361
8362         /* Update L4 header */
8363         tcp->ack_seq = lro->tcp_ack;
8364         tcp->window = lro->window;
8365
8366         /* Update tsecr field if this session has timestamps enabled */
8367         if (lro->saw_ts) {
8368                 __be32 *ptr = (__be32 *)(tcp + 1);
8369                 *(ptr+2) = lro->cur_tsecr;
8370         }
8371
8372         /* Update counters required for calculation of
8373          * average no. of packets aggregated.
8374          */
8375         statinfo->sw_stat.sum_avg_pkts_aggregated += lro->sg_num;
8376         statinfo->sw_stat.num_aggregations++;
8377 }
8378
8379 static void aggregate_new_rx(struct lro *lro, struct iphdr *ip,
8380                 struct tcphdr *tcp, u32 l4_pyld)
8381 {
8382         DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
8383         lro->total_len += l4_pyld;
8384         lro->frags_len += l4_pyld;
8385         lro->tcp_next_seq += l4_pyld;
8386         lro->sg_num++;
8387
8388         /* Update ack seq no. and window ad(from this pkt) in LRO object */
8389         lro->tcp_ack = tcp->ack_seq;
8390         lro->window = tcp->window;
8391
8392         if (lro->saw_ts) {
8393                 __be32 *ptr;
8394                 /* Update tsecr and tsval from this packet */
8395                 ptr = (__be32 *)(tcp+1);
8396                 lro->cur_tsval = ntohl(*(ptr+1));
8397                 lro->cur_tsecr = *(ptr + 2);
8398         }
8399 }
8400
8401 static int verify_l3_l4_lro_capable(struct lro *l_lro, struct iphdr *ip,
8402                                     struct tcphdr *tcp, u32 tcp_pyld_len)
8403 {
8404         u8 *ptr;
8405
8406         DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
8407
8408         if (!tcp_pyld_len) {
8409                 /* Runt frame or a pure ack */
8410                 return -1;
8411         }
8412
8413         if (ip->ihl != 5) /* IP has options */
8414                 return -1;
8415
8416         /* If we see CE codepoint in IP header, packet is not mergeable */
8417         if (INET_ECN_is_ce(ipv4_get_dsfield(ip)))
8418                 return -1;
8419
8420         /* If we see ECE or CWR flags in TCP header, packet is not mergeable */
8421         if (tcp->urg || tcp->psh || tcp->rst || tcp->syn || tcp->fin ||
8422                                     tcp->ece || tcp->cwr || !tcp->ack) {
8423                 /*
8424                  * Currently recognize only the ack control word and
8425                  * any other control field being set would result in
8426                  * flushing the LRO session
8427                  */
8428                 return -1;
8429         }
8430
8431         /*
8432          * Allow only one TCP timestamp option. Don't aggregate if
8433          * any other options are detected.
8434          */
8435         if (tcp->doff != 5 && tcp->doff != 8)
8436                 return -1;
8437
8438         if (tcp->doff == 8) {
8439                 ptr = (u8 *)(tcp + 1);
8440                 while (*ptr == TCPOPT_NOP)
8441                         ptr++;
8442                 if (*ptr != TCPOPT_TIMESTAMP || *(ptr+1) != TCPOLEN_TIMESTAMP)
8443                         return -1;
8444
8445                 /* Ensure timestamp value increases monotonically */
8446                 if (l_lro)
8447                         if (l_lro->cur_tsval > ntohl(*((__be32 *)(ptr+2))))
8448                                 return -1;
8449
8450                 /* timestamp echo reply should be non-zero */
8451                 if (*((__be32 *)(ptr+6)) == 0)
8452                         return -1;
8453         }
8454
8455         return 0;
8456 }
8457
8458 static int
8459 s2io_club_tcp_session(struct ring_info *ring_data, u8 *buffer, u8 **tcp,
8460         u32 *tcp_len, struct lro **lro, struct RxD_t *rxdp,
8461         struct s2io_nic *sp)
8462 {
8463         struct iphdr *ip;
8464         struct tcphdr *tcph;
8465         int ret = 0, i;
8466         u16 vlan_tag = 0;
8467
8468         if (!(ret = check_L2_lro_capable(buffer, &ip, (struct tcphdr **)tcp,
8469                                          rxdp, sp))) {
8470                 DBG_PRINT(INFO_DBG,"IP Saddr: %x Daddr: %x\n",
8471                           ip->saddr, ip->daddr);
8472         } else
8473                 return ret;
8474
8475         vlan_tag = RXD_GET_VLAN_TAG(rxdp->Control_2);
8476         tcph = (struct tcphdr *)*tcp;
8477         *tcp_len = get_l4_pyld_length(ip, tcph);
8478         for (i=0; i<MAX_LRO_SESSIONS; i++) {
8479                 struct lro *l_lro = &ring_data->lro0_n[i];
8480                 if (l_lro->in_use) {
8481                         if (check_for_socket_match(l_lro, ip, tcph))
8482                                 continue;
8483                         /* Sock pair matched */
8484                         *lro = l_lro;
8485
8486                         if ((*lro)->tcp_next_seq != ntohl(tcph->seq)) {
8487                                 DBG_PRINT(INFO_DBG, "%s:Out of order. expected "
8488                                           "0x%x, actual 0x%x\n", __FUNCTION__,
8489                                           (*lro)->tcp_next_seq,
8490                                           ntohl(tcph->seq));
8491
8492                                 sp->mac_control.stats_info->
8493                                    sw_stat.outof_sequence_pkts++;
8494                                 ret = 2;
8495                                 break;
8496                         }
8497
8498                         if (!verify_l3_l4_lro_capable(l_lro, ip, tcph,*tcp_len))
8499                                 ret = 1; /* Aggregate */
8500                         else
8501                                 ret = 2; /* Flush both */
8502                         break;
8503                 }
8504         }
8505
8506         if (ret == 0) {
8507                 /* Before searching for available LRO objects,
8508                  * check if the pkt is L3/L4 aggregatable. If not
8509                  * don't create new LRO session. Just send this
8510                  * packet up.
8511                  */
8512                 if (verify_l3_l4_lro_capable(NULL, ip, tcph, *tcp_len)) {
8513                         return 5;
8514                 }
8515
8516                 for (i=0; i<MAX_LRO_SESSIONS; i++) {
8517                         struct lro *l_lro = &ring_data->lro0_n[i];
8518                         if (!(l_lro->in_use)) {
8519                                 *lro = l_lro;
8520                                 ret = 3; /* Begin anew */
8521                                 break;
8522                         }
8523                 }
8524         }
8525
8526         if (ret == 0) { /* sessions exceeded */
8527                 DBG_PRINT(INFO_DBG,"%s:All LRO sessions already in use\n",
8528                           __FUNCTION__);
8529                 *lro = NULL;
8530                 return ret;
8531         }
8532
8533         switch (ret) {
8534                 case 3:
8535                         initiate_new_session(*lro, buffer, ip, tcph, *tcp_len,
8536                                                                 vlan_tag);
8537                         break;
8538                 case 2:
8539                         update_L3L4_header(sp, *lro);
8540                         break;
8541                 case 1:
8542                         aggregate_new_rx(*lro, ip, tcph, *tcp_len);
8543                         if ((*lro)->sg_num == sp->lro_max_aggr_per_sess) {
8544                                 update_L3L4_header(sp, *lro);
8545                                 ret = 4; /* Flush the LRO */
8546                         }
8547                         break;
8548                 default:
8549                         DBG_PRINT(ERR_DBG,"%s:Dont know, can't say!!\n",
8550                                 __FUNCTION__);
8551                         break;
8552         }
8553
8554         return ret;
8555 }
8556
8557 static void clear_lro_session(struct lro *lro)
8558 {
8559         static u16 lro_struct_size = sizeof(struct lro);
8560
8561         memset(lro, 0, lro_struct_size);
8562 }
8563
8564 static void queue_rx_frame(struct sk_buff *skb, u16 vlan_tag)
8565 {
8566         struct net_device *dev = skb->dev;
8567         struct s2io_nic *sp = dev->priv;
8568
8569         skb->protocol = eth_type_trans(skb, dev);
8570         if (sp->vlgrp && vlan_tag
8571                 && (vlan_strip_flag)) {
8572                 /* Queueing the vlan frame to the upper layer */
8573                 if (sp->config.napi)
8574                         vlan_hwaccel_receive_skb(skb, sp->vlgrp, vlan_tag);
8575                 else
8576                         vlan_hwaccel_rx(skb, sp->vlgrp, vlan_tag);
8577         } else {
8578                 if (sp->config.napi)
8579                         netif_receive_skb(skb);
8580                 else
8581                         netif_rx(skb);
8582         }
8583 }
8584
8585 static void lro_append_pkt(struct s2io_nic *sp, struct lro *lro,
8586                            struct sk_buff *skb,
8587                            u32 tcp_len)
8588 {
8589         struct sk_buff *first = lro->parent;
8590
8591         first->len += tcp_len;
8592         first->data_len = lro->frags_len;
8593         skb_pull(skb, (skb->len - tcp_len));
8594         if (skb_shinfo(first)->frag_list)
8595                 lro->last_frag->next = skb;
8596         else
8597                 skb_shinfo(first)->frag_list = skb;
8598         first->truesize += skb->truesize;
8599         lro->last_frag = skb;
8600         sp->mac_control.stats_info->sw_stat.clubbed_frms_cnt++;
8601         return;
8602 }
8603
8604 /**
8605  * s2io_io_error_detected - called when PCI error is detected
8606  * @pdev: Pointer to PCI device
8607  * @state: The current pci connection state
8608  *
8609  * This function is called after a PCI bus error affecting
8610  * this device has been detected.
8611  */
8612 static pci_ers_result_t s2io_io_error_detected(struct pci_dev *pdev,
8613                                                pci_channel_state_t state)
8614 {
8615         struct net_device *netdev = pci_get_drvdata(pdev);
8616         struct s2io_nic *sp = netdev->priv;
8617
8618         netif_device_detach(netdev);
8619
8620         if (netif_running(netdev)) {
8621                 /* Bring down the card, while avoiding PCI I/O */
8622                 do_s2io_card_down(sp, 0);
8623         }
8624         pci_disable_device(pdev);
8625
8626         return PCI_ERS_RESULT_NEED_RESET;
8627 }
8628
8629 /**
8630  * s2io_io_slot_reset - called after the pci bus has been reset.
8631  * @pdev: Pointer to PCI device
8632  *
8633  * Restart the card from scratch, as if from a cold-boot.
8634  * At this point, the card has exprienced a hard reset,
8635  * followed by fixups by BIOS, and has its config space
8636  * set up identically to what it was at cold boot.
8637  */
8638 static pci_ers_result_t s2io_io_slot_reset(struct pci_dev *pdev)
8639 {
8640         struct net_device *netdev = pci_get_drvdata(pdev);
8641         struct s2io_nic *sp = netdev->priv;
8642
8643         if (pci_enable_device(pdev)) {
8644                 printk(KERN_ERR "s2io: "
8645                        "Cannot re-enable PCI device after reset.\n");
8646                 return PCI_ERS_RESULT_DISCONNECT;
8647         }
8648
8649         pci_set_master(pdev);
8650         s2io_reset(sp);
8651
8652         return PCI_ERS_RESULT_RECOVERED;
8653 }
8654
8655 /**
8656  * s2io_io_resume - called when traffic can start flowing again.
8657  * @pdev: Pointer to PCI device
8658  *
8659  * This callback is called when the error recovery driver tells
8660  * us that its OK to resume normal operation.
8661  */
8662 static void s2io_io_resume(struct pci_dev *pdev)
8663 {
8664         struct net_device *netdev = pci_get_drvdata(pdev);
8665         struct s2io_nic *sp = netdev->priv;
8666
8667         if (netif_running(netdev)) {
8668                 if (s2io_card_up(sp)) {
8669                         printk(KERN_ERR "s2io: "
8670                                "Can't bring device back up after reset.\n");
8671                         return;
8672                 }
8673
8674                 if (s2io_set_mac_addr(netdev, netdev->dev_addr) == FAILURE) {
8675                         s2io_card_down(sp);
8676                         printk(KERN_ERR "s2io: "
8677                                "Can't resetore mac addr after reset.\n");
8678                         return;
8679                 }
8680         }
8681
8682         netif_device_attach(netdev);
8683         netif_wake_queue(netdev);
8684 }