]> Pileus Git - ~andy/linux/blob - drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/jmorris/linux...
[~andy/linux] / drivers / net / ethernet / qlogic / qlcnic / qlcnic_io.c
1 /*
2  * QLogic qlcnic NIC Driver
3  * Copyright (c) 2009-2013 QLogic Corporation
4  *
5  * See LICENSE.qlcnic for copyright and licensing details.
6  */
7
8 #include <linux/netdevice.h>
9 #include <linux/if_vlan.h>
10 #include <net/ip.h>
11 #include <linux/ipv6.h>
12
13 #include "qlcnic.h"
14
15 #define TX_ETHER_PKT    0x01
16 #define TX_TCP_PKT      0x02
17 #define TX_UDP_PKT      0x03
18 #define TX_IP_PKT       0x04
19 #define TX_TCP_LSO      0x05
20 #define TX_TCP_LSO6     0x06
21 #define TX_TCPV6_PKT    0x0b
22 #define TX_UDPV6_PKT    0x0c
23 #define FLAGS_VLAN_TAGGED       0x10
24 #define FLAGS_VLAN_OOB          0x40
25
26 #define qlcnic_set_tx_vlan_tci(cmd_desc, v)     \
27         (cmd_desc)->vlan_TCI = cpu_to_le16(v);
28 #define qlcnic_set_cmd_desc_port(cmd_desc, var) \
29         ((cmd_desc)->port_ctxid |= ((var) & 0x0F))
30 #define qlcnic_set_cmd_desc_ctxid(cmd_desc, var)        \
31         ((cmd_desc)->port_ctxid |= ((var) << 4 & 0xF0))
32
33 #define qlcnic_set_tx_port(_desc, _port) \
34         ((_desc)->port_ctxid = ((_port) & 0xf) | (((_port) << 4) & 0xf0))
35
36 #define qlcnic_set_tx_flags_opcode(_desc, _flags, _opcode) \
37         ((_desc)->flags_opcode |= \
38         cpu_to_le16(((_flags) & 0x7f) | (((_opcode) & 0x3f) << 7)))
39
40 #define qlcnic_set_tx_frags_len(_desc, _frags, _len) \
41         ((_desc)->nfrags__length = \
42         cpu_to_le32(((_frags) & 0xff) | (((_len) & 0xffffff) << 8)))
43
44 /* owner bits of status_desc */
45 #define STATUS_OWNER_HOST       (0x1ULL << 56)
46 #define STATUS_OWNER_PHANTOM    (0x2ULL << 56)
47
48 /* Status descriptor:
49    0-3 port, 4-7 status, 8-11 type, 12-27 total_length
50    28-43 reference_handle, 44-47 protocol, 48-52 pkt_offset
51    53-55 desc_cnt, 56-57 owner, 58-63 opcode
52  */
53 #define qlcnic_get_sts_port(sts_data)   \
54         ((sts_data) & 0x0F)
55 #define qlcnic_get_sts_status(sts_data) \
56         (((sts_data) >> 4) & 0x0F)
57 #define qlcnic_get_sts_type(sts_data)   \
58         (((sts_data) >> 8) & 0x0F)
59 #define qlcnic_get_sts_totallength(sts_data)    \
60         (((sts_data) >> 12) & 0xFFFF)
61 #define qlcnic_get_sts_refhandle(sts_data)      \
62         (((sts_data) >> 28) & 0xFFFF)
63 #define qlcnic_get_sts_prot(sts_data)   \
64         (((sts_data) >> 44) & 0x0F)
65 #define qlcnic_get_sts_pkt_offset(sts_data)     \
66         (((sts_data) >> 48) & 0x1F)
67 #define qlcnic_get_sts_desc_cnt(sts_data)       \
68         (((sts_data) >> 53) & 0x7)
69 #define qlcnic_get_sts_opcode(sts_data) \
70         (((sts_data) >> 58) & 0x03F)
71
72 #define qlcnic_get_lro_sts_refhandle(sts_data)  \
73         ((sts_data) & 0x07FFF)
74 #define qlcnic_get_lro_sts_length(sts_data)     \
75         (((sts_data) >> 16) & 0x0FFFF)
76 #define qlcnic_get_lro_sts_l2_hdr_offset(sts_data)      \
77         (((sts_data) >> 32) & 0x0FF)
78 #define qlcnic_get_lro_sts_l4_hdr_offset(sts_data)      \
79         (((sts_data) >> 40) & 0x0FF)
80 #define qlcnic_get_lro_sts_timestamp(sts_data)  \
81         (((sts_data) >> 48) & 0x1)
82 #define qlcnic_get_lro_sts_type(sts_data)       \
83         (((sts_data) >> 49) & 0x7)
84 #define qlcnic_get_lro_sts_push_flag(sts_data)          \
85         (((sts_data) >> 52) & 0x1)
86 #define qlcnic_get_lro_sts_seq_number(sts_data)         \
87         ((sts_data) & 0x0FFFFFFFF)
88 #define qlcnic_get_lro_sts_mss(sts_data1)               \
89         ((sts_data1 >> 32) & 0x0FFFF)
90
91 #define qlcnic_83xx_get_lro_sts_mss(sts) ((sts) & 0xffff)
92
93 /* opcode field in status_desc */
94 #define QLCNIC_SYN_OFFLOAD      0x03
95 #define QLCNIC_RXPKT_DESC       0x04
96 #define QLCNIC_OLD_RXPKT_DESC   0x3f
97 #define QLCNIC_RESPONSE_DESC    0x05
98 #define QLCNIC_LRO_DESC         0x12
99
100 #define QLCNIC_TX_POLL_BUDGET           128
101 #define QLCNIC_TCP_HDR_SIZE             20
102 #define QLCNIC_TCP_TS_OPTION_SIZE       12
103 #define QLCNIC_FETCH_RING_ID(handle)    ((handle) >> 63)
104 #define QLCNIC_DESC_OWNER_FW            cpu_to_le64(STATUS_OWNER_PHANTOM)
105
106 #define QLCNIC_TCP_TS_HDR_SIZE (QLCNIC_TCP_HDR_SIZE + QLCNIC_TCP_TS_OPTION_SIZE)
107
108 /* for status field in status_desc */
109 #define STATUS_CKSUM_LOOP       0
110 #define STATUS_CKSUM_OK         2
111
112 #define qlcnic_83xx_pktln(sts)          ((sts >> 32) & 0x3FFF)
113 #define qlcnic_83xx_hndl(sts)           ((sts >> 48) & 0x7FFF)
114 #define qlcnic_83xx_csum_status(sts)    ((sts >> 39) & 7)
115 #define qlcnic_83xx_opcode(sts) ((sts >> 42) & 0xF)
116 #define qlcnic_83xx_vlan_tag(sts)       (((sts) >> 48) & 0xFFFF)
117 #define qlcnic_83xx_lro_pktln(sts)      (((sts) >> 32) & 0x3FFF)
118 #define qlcnic_83xx_l2_hdr_off(sts)     (((sts) >> 16) & 0xFF)
119 #define qlcnic_83xx_l4_hdr_off(sts)     (((sts) >> 24) & 0xFF)
120 #define qlcnic_83xx_pkt_cnt(sts)        (((sts) >> 16) & 0x7)
121 #define qlcnic_83xx_is_tstamp(sts)      (((sts) >> 40) & 1)
122 #define qlcnic_83xx_is_psh_bit(sts)     (((sts) >> 41) & 1)
123 #define qlcnic_83xx_is_ip_align(sts)    (((sts) >> 46) & 1)
124 #define qlcnic_83xx_has_vlan_tag(sts)   (((sts) >> 47) & 1)
125
126 struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *,
127                                      struct qlcnic_host_rds_ring *, u16, u16);
128
129 inline void qlcnic_83xx_enable_tx_intr(struct qlcnic_adapter *adapter,
130                                        struct qlcnic_host_tx_ring *tx_ring)
131 {
132         writel(0, tx_ring->crb_intr_mask);
133 }
134
135 inline void qlcnic_83xx_disable_tx_intr(struct qlcnic_adapter *adapter,
136                                         struct qlcnic_host_tx_ring *tx_ring)
137 {
138         writel(1, tx_ring->crb_intr_mask);
139 }
140
141 static inline u8 qlcnic_mac_hash(u64 mac)
142 {
143         return (u8)((mac & 0xff) ^ ((mac >> 40) & 0xff));
144 }
145
146 static inline u32 qlcnic_get_ref_handle(struct qlcnic_adapter *adapter,
147                                         u16 handle, u8 ring_id)
148 {
149         if (adapter->pdev->device == PCI_DEVICE_ID_QLOGIC_QLE834X)
150                 return handle | (ring_id << 15);
151         else
152                 return handle;
153 }
154
155 static inline int qlcnic_82xx_is_lb_pkt(u64 sts_data)
156 {
157         return (qlcnic_get_sts_status(sts_data) == STATUS_CKSUM_LOOP) ? 1 : 0;
158 }
159
160 void qlcnic_add_lb_filter(struct qlcnic_adapter *adapter, struct sk_buff *skb,
161                           int loopback_pkt, __le16 vlan_id)
162 {
163         struct ethhdr *phdr = (struct ethhdr *)(skb->data);
164         struct qlcnic_filter *fil, *tmp_fil;
165         struct hlist_node *tmp_hnode, *n;
166         struct hlist_head *head;
167         unsigned long time;
168         u64 src_addr = 0;
169         u8 hindex, found = 0, op;
170         int ret;
171
172         memcpy(&src_addr, phdr->h_source, ETH_ALEN);
173
174         if (loopback_pkt) {
175                 if (adapter->rx_fhash.fnum >= adapter->rx_fhash.fmax)
176                         return;
177
178                 hindex = qlcnic_mac_hash(src_addr) &
179                          (adapter->fhash.fbucket_size - 1);
180                 head = &(adapter->rx_fhash.fhead[hindex]);
181
182                 hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) {
183                         if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) &&
184                             tmp_fil->vlan_id == vlan_id) {
185                                 time = tmp_fil->ftime;
186                                 if (jiffies > (QLCNIC_READD_AGE * HZ + time))
187                                         tmp_fil->ftime = jiffies;
188                                 return;
189                         }
190                 }
191
192                 fil = kzalloc(sizeof(struct qlcnic_filter), GFP_ATOMIC);
193                 if (!fil)
194                         return;
195
196                 fil->ftime = jiffies;
197                 memcpy(fil->faddr, &src_addr, ETH_ALEN);
198                 fil->vlan_id = vlan_id;
199                 spin_lock(&adapter->rx_mac_learn_lock);
200                 hlist_add_head(&(fil->fnode), head);
201                 adapter->rx_fhash.fnum++;
202                 spin_unlock(&adapter->rx_mac_learn_lock);
203         } else {
204                 hindex = qlcnic_mac_hash(src_addr) &
205                          (adapter->fhash.fbucket_size - 1);
206                 head = &(adapter->rx_fhash.fhead[hindex]);
207                 spin_lock(&adapter->rx_mac_learn_lock);
208                 hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) {
209                         if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) &&
210                             tmp_fil->vlan_id == vlan_id) {
211                                 found = 1;
212                                 break;
213                         }
214                 }
215
216                 if (!found) {
217                         spin_unlock(&adapter->rx_mac_learn_lock);
218                         return;
219                 }
220
221                 op = vlan_id ? QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_ADD;
222                 ret = qlcnic_sre_macaddr_change(adapter, (u8 *)&src_addr,
223                                                 vlan_id, op);
224                 if (!ret) {
225                         op = vlan_id ? QLCNIC_MAC_VLAN_DEL : QLCNIC_MAC_DEL;
226                         ret = qlcnic_sre_macaddr_change(adapter,
227                                                         (u8 *)&src_addr,
228                                                         vlan_id, op);
229                         if (!ret) {
230                                 hlist_del(&(tmp_fil->fnode));
231                                 adapter->rx_fhash.fnum--;
232                         }
233                 }
234                 spin_unlock(&adapter->rx_mac_learn_lock);
235         }
236 }
237
238 void qlcnic_82xx_change_filter(struct qlcnic_adapter *adapter, u64 *uaddr,
239                                __le16 vlan_id)
240 {
241         struct cmd_desc_type0 *hwdesc;
242         struct qlcnic_nic_req *req;
243         struct qlcnic_mac_req *mac_req;
244         struct qlcnic_vlan_req *vlan_req;
245         struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
246         u32 producer;
247         u64 word;
248
249         producer = tx_ring->producer;
250         hwdesc = &tx_ring->desc_head[tx_ring->producer];
251
252         req = (struct qlcnic_nic_req *)hwdesc;
253         memset(req, 0, sizeof(struct qlcnic_nic_req));
254         req->qhdr = cpu_to_le64(QLCNIC_REQUEST << 23);
255
256         word = QLCNIC_MAC_EVENT | ((u64)(adapter->portnum) << 16);
257         req->req_hdr = cpu_to_le64(word);
258
259         mac_req = (struct qlcnic_mac_req *)&(req->words[0]);
260         mac_req->op = vlan_id ? QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_ADD;
261         memcpy(mac_req->mac_addr, &uaddr, ETH_ALEN);
262
263         vlan_req = (struct qlcnic_vlan_req *)&req->words[1];
264         vlan_req->vlan_id = vlan_id;
265
266         tx_ring->producer = get_next_index(producer, tx_ring->num_desc);
267         smp_mb();
268 }
269
270 static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
271                                struct cmd_desc_type0 *first_desc,
272                                struct sk_buff *skb)
273 {
274         struct qlcnic_filter *fil, *tmp_fil;
275         struct hlist_node *tmp_hnode, *n;
276         struct hlist_head *head;
277         struct net_device *netdev = adapter->netdev;
278         struct ethhdr *phdr = (struct ethhdr *)(skb->data);
279         u64 src_addr = 0;
280         __le16 vlan_id = 0;
281         u8 hindex;
282
283         if (ether_addr_equal(phdr->h_source, adapter->mac_addr))
284                 return;
285
286         if (adapter->fhash.fnum >= adapter->fhash.fmax) {
287                 adapter->stats.mac_filter_limit_overrun++;
288                 netdev_info(netdev, "Can not add more than %d mac addresses\n",
289                             adapter->fhash.fmax);
290                 return;
291         }
292
293         memcpy(&src_addr, phdr->h_source, ETH_ALEN);
294         hindex = qlcnic_mac_hash(src_addr) & (adapter->fhash.fbucket_size - 1);
295         head = &(adapter->fhash.fhead[hindex]);
296
297         hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) {
298                 if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) &&
299                     tmp_fil->vlan_id == vlan_id) {
300                         if (jiffies > (QLCNIC_READD_AGE * HZ + tmp_fil->ftime))
301                                 qlcnic_change_filter(adapter, &src_addr,
302                                                      vlan_id);
303                         tmp_fil->ftime = jiffies;
304                         return;
305                 }
306         }
307
308         fil = kzalloc(sizeof(struct qlcnic_filter), GFP_ATOMIC);
309         if (!fil)
310                 return;
311
312         qlcnic_change_filter(adapter, &src_addr, vlan_id);
313         fil->ftime = jiffies;
314         fil->vlan_id = vlan_id;
315         memcpy(fil->faddr, &src_addr, ETH_ALEN);
316         spin_lock(&adapter->mac_learn_lock);
317         hlist_add_head(&(fil->fnode), head);
318         adapter->fhash.fnum++;
319         spin_unlock(&adapter->mac_learn_lock);
320 }
321
322 static int qlcnic_tx_pkt(struct qlcnic_adapter *adapter,
323                          struct cmd_desc_type0 *first_desc, struct sk_buff *skb)
324 {
325         u8 l4proto, opcode = 0, hdr_len = 0;
326         u16 flags = 0, vlan_tci = 0;
327         int copied, offset, copy_len, size;
328         struct cmd_desc_type0 *hwdesc;
329         struct vlan_ethhdr *vh;
330         struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
331         u16 protocol = ntohs(skb->protocol);
332         u32 producer = tx_ring->producer;
333
334         if (protocol == ETH_P_8021Q) {
335                 vh = (struct vlan_ethhdr *)skb->data;
336                 flags = FLAGS_VLAN_TAGGED;
337                 vlan_tci = ntohs(vh->h_vlan_TCI);
338                 protocol = ntohs(vh->h_vlan_encapsulated_proto);
339         } else if (vlan_tx_tag_present(skb)) {
340                 flags = FLAGS_VLAN_OOB;
341                 vlan_tci = vlan_tx_tag_get(skb);
342         }
343         if (unlikely(adapter->pvid)) {
344                 if (vlan_tci && !(adapter->flags & QLCNIC_TAGGING_ENABLED))
345                         return -EIO;
346                 if (vlan_tci && (adapter->flags & QLCNIC_TAGGING_ENABLED))
347                         goto set_flags;
348
349                 flags = FLAGS_VLAN_OOB;
350                 vlan_tci = adapter->pvid;
351         }
352 set_flags:
353         qlcnic_set_tx_vlan_tci(first_desc, vlan_tci);
354         qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
355
356         if (*(skb->data) & BIT_0) {
357                 flags |= BIT_0;
358                 memcpy(&first_desc->eth_addr, skb->data, ETH_ALEN);
359         }
360         opcode = TX_ETHER_PKT;
361         if ((adapter->netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) &&
362             skb_shinfo(skb)->gso_size > 0) {
363                 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
364                 first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
365                 first_desc->total_hdr_length = hdr_len;
366                 opcode = (protocol == ETH_P_IPV6) ? TX_TCP_LSO6 : TX_TCP_LSO;
367
368                 /* For LSO, we need to copy the MAC/IP/TCP headers into
369                 * the descriptor ring */
370                 copied = 0;
371                 offset = 2;
372
373                 if (flags & FLAGS_VLAN_OOB) {
374                         first_desc->total_hdr_length += VLAN_HLEN;
375                         first_desc->tcp_hdr_offset = VLAN_HLEN;
376                         first_desc->ip_hdr_offset = VLAN_HLEN;
377
378                         /* Only in case of TSO on vlan device */
379                         flags |= FLAGS_VLAN_TAGGED;
380
381                         /* Create a TSO vlan header template for firmware */
382                         hwdesc = &tx_ring->desc_head[producer];
383                         tx_ring->cmd_buf_arr[producer].skb = NULL;
384
385                         copy_len = min((int)sizeof(struct cmd_desc_type0) -
386                                        offset, hdr_len + VLAN_HLEN);
387
388                         vh = (struct vlan_ethhdr *)((char *) hwdesc + 2);
389                         skb_copy_from_linear_data(skb, vh, 12);
390                         vh->h_vlan_proto = htons(ETH_P_8021Q);
391                         vh->h_vlan_TCI = htons(vlan_tci);
392
393                         skb_copy_from_linear_data_offset(skb, 12,
394                                                          (char *)vh + 16,
395                                                          copy_len - 16);
396                         copied = copy_len - VLAN_HLEN;
397                         offset = 0;
398                         producer = get_next_index(producer, tx_ring->num_desc);
399                 }
400
401                 while (copied < hdr_len) {
402                         size = (int)sizeof(struct cmd_desc_type0) - offset;
403                         copy_len = min(size, (hdr_len - copied));
404                         hwdesc = &tx_ring->desc_head[producer];
405                         tx_ring->cmd_buf_arr[producer].skb = NULL;
406                         skb_copy_from_linear_data_offset(skb, copied,
407                                                          (char *)hwdesc +
408                                                          offset, copy_len);
409                         copied += copy_len;
410                         offset = 0;
411                         producer = get_next_index(producer, tx_ring->num_desc);
412                 }
413
414                 tx_ring->producer = producer;
415                 smp_mb();
416                 adapter->stats.lso_frames++;
417
418         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
419                 if (protocol == ETH_P_IP) {
420                         l4proto = ip_hdr(skb)->protocol;
421
422                         if (l4proto == IPPROTO_TCP)
423                                 opcode = TX_TCP_PKT;
424                         else if (l4proto == IPPROTO_UDP)
425                                 opcode = TX_UDP_PKT;
426                 } else if (protocol == ETH_P_IPV6) {
427                         l4proto = ipv6_hdr(skb)->nexthdr;
428
429                         if (l4proto == IPPROTO_TCP)
430                                 opcode = TX_TCPV6_PKT;
431                         else if (l4proto == IPPROTO_UDP)
432                                 opcode = TX_UDPV6_PKT;
433                 }
434         }
435         first_desc->tcp_hdr_offset += skb_transport_offset(skb);
436         first_desc->ip_hdr_offset += skb_network_offset(skb);
437         qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
438
439         return 0;
440 }
441
442 static int qlcnic_map_tx_skb(struct pci_dev *pdev, struct sk_buff *skb,
443                              struct qlcnic_cmd_buffer *pbuf)
444 {
445         struct qlcnic_skb_frag *nf;
446         struct skb_frag_struct *frag;
447         int i, nr_frags;
448         dma_addr_t map;
449
450         nr_frags = skb_shinfo(skb)->nr_frags;
451         nf = &pbuf->frag_array[0];
452
453         map = pci_map_single(pdev, skb->data, skb_headlen(skb),
454                              PCI_DMA_TODEVICE);
455         if (pci_dma_mapping_error(pdev, map))
456                 goto out_err;
457
458         nf->dma = map;
459         nf->length = skb_headlen(skb);
460
461         for (i = 0; i < nr_frags; i++) {
462                 frag = &skb_shinfo(skb)->frags[i];
463                 nf = &pbuf->frag_array[i+1];
464                 map = skb_frag_dma_map(&pdev->dev, frag, 0, skb_frag_size(frag),
465                                        DMA_TO_DEVICE);
466                 if (dma_mapping_error(&pdev->dev, map))
467                         goto unwind;
468
469                 nf->dma = map;
470                 nf->length = skb_frag_size(frag);
471         }
472
473         return 0;
474
475 unwind:
476         while (--i >= 0) {
477                 nf = &pbuf->frag_array[i+1];
478                 pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);
479         }
480
481         nf = &pbuf->frag_array[0];
482         pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
483
484 out_err:
485         return -ENOMEM;
486 }
487
488 static void qlcnic_unmap_buffers(struct pci_dev *pdev, struct sk_buff *skb,
489                                  struct qlcnic_cmd_buffer *pbuf)
490 {
491         struct qlcnic_skb_frag *nf = &pbuf->frag_array[0];
492         int i, nr_frags = skb_shinfo(skb)->nr_frags;
493
494         for (i = 0; i < nr_frags; i++) {
495                 nf = &pbuf->frag_array[i+1];
496                 pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);
497         }
498
499         nf = &pbuf->frag_array[0];
500         pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
501         pbuf->skb = NULL;
502 }
503
504 static inline void qlcnic_clear_cmddesc(u64 *desc)
505 {
506         desc[0] = 0ULL;
507         desc[2] = 0ULL;
508         desc[7] = 0ULL;
509 }
510
511 netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
512 {
513         struct qlcnic_adapter *adapter = netdev_priv(netdev);
514         struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
515         struct qlcnic_cmd_buffer *pbuf;
516         struct qlcnic_skb_frag *buffrag;
517         struct cmd_desc_type0 *hwdesc, *first_desc;
518         struct pci_dev *pdev;
519         struct ethhdr *phdr;
520         int i, k, frag_count, delta = 0;
521         u32 producer, num_txd;
522
523         num_txd = tx_ring->num_desc;
524
525         if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
526                 netif_stop_queue(netdev);
527                 return NETDEV_TX_BUSY;
528         }
529
530         if (adapter->flags & QLCNIC_MACSPOOF) {
531                 phdr = (struct ethhdr *)skb->data;
532                 if (!ether_addr_equal(phdr->h_source, adapter->mac_addr))
533                         goto drop_packet;
534         }
535
536         frag_count = skb_shinfo(skb)->nr_frags + 1;
537         /* 14 frags supported for normal packet and
538          * 32 frags supported for TSO packet
539          */
540         if (!skb_is_gso(skb) && frag_count > QLCNIC_MAX_FRAGS_PER_TX) {
541                 for (i = 0; i < (frag_count - QLCNIC_MAX_FRAGS_PER_TX); i++)
542                         delta += skb_frag_size(&skb_shinfo(skb)->frags[i]);
543
544                 if (!__pskb_pull_tail(skb, delta))
545                         goto drop_packet;
546
547                 frag_count = 1 + skb_shinfo(skb)->nr_frags;
548         }
549
550         if (unlikely(qlcnic_tx_avail(tx_ring) <= TX_STOP_THRESH)) {
551                 netif_stop_queue(netdev);
552                 if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) {
553                         netif_start_queue(netdev);
554                 } else {
555                         adapter->stats.xmit_off++;
556                         return NETDEV_TX_BUSY;
557                 }
558         }
559
560         producer = tx_ring->producer;
561         pbuf = &tx_ring->cmd_buf_arr[producer];
562         pdev = adapter->pdev;
563         first_desc = &tx_ring->desc_head[producer];
564         hwdesc = &tx_ring->desc_head[producer];
565         qlcnic_clear_cmddesc((u64 *)hwdesc);
566
567         if (qlcnic_map_tx_skb(pdev, skb, pbuf)) {
568                 adapter->stats.tx_dma_map_error++;
569                 goto drop_packet;
570         }
571
572         pbuf->skb = skb;
573         pbuf->frag_count = frag_count;
574
575         qlcnic_set_tx_frags_len(first_desc, frag_count, skb->len);
576         qlcnic_set_tx_port(first_desc, adapter->portnum);
577
578         for (i = 0; i < frag_count; i++) {
579                 k = i % 4;
580
581                 if ((k == 0) && (i > 0)) {
582                         /* move to next desc.*/
583                         producer = get_next_index(producer, num_txd);
584                         hwdesc = &tx_ring->desc_head[producer];
585                         qlcnic_clear_cmddesc((u64 *)hwdesc);
586                         tx_ring->cmd_buf_arr[producer].skb = NULL;
587                 }
588
589                 buffrag = &pbuf->frag_array[i];
590                 hwdesc->buffer_length[k] = cpu_to_le16(buffrag->length);
591                 switch (k) {
592                 case 0:
593                         hwdesc->addr_buffer1 = cpu_to_le64(buffrag->dma);
594                         break;
595                 case 1:
596                         hwdesc->addr_buffer2 = cpu_to_le64(buffrag->dma);
597                         break;
598                 case 2:
599                         hwdesc->addr_buffer3 = cpu_to_le64(buffrag->dma);
600                         break;
601                 case 3:
602                         hwdesc->addr_buffer4 = cpu_to_le64(buffrag->dma);
603                         break;
604                 }
605         }
606
607         tx_ring->producer = get_next_index(producer, num_txd);
608         smp_mb();
609
610         if (unlikely(qlcnic_tx_pkt(adapter, first_desc, skb)))
611                 goto unwind_buff;
612
613         if (adapter->drv_mac_learn)
614                 qlcnic_send_filter(adapter, first_desc, skb);
615
616         adapter->stats.txbytes += skb->len;
617         adapter->stats.xmitcalled++;
618
619         qlcnic_update_cmd_producer(tx_ring);
620
621         return NETDEV_TX_OK;
622
623 unwind_buff:
624         qlcnic_unmap_buffers(pdev, skb, pbuf);
625 drop_packet:
626         adapter->stats.txdropped++;
627         dev_kfree_skb_any(skb);
628         return NETDEV_TX_OK;
629 }
630
631 void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup)
632 {
633         struct net_device *netdev = adapter->netdev;
634
635         if (adapter->ahw->linkup && !linkup) {
636                 netdev_info(netdev, "NIC Link is down\n");
637                 adapter->ahw->linkup = 0;
638                 if (netif_running(netdev)) {
639                         netif_carrier_off(netdev);
640                         netif_stop_queue(netdev);
641                 }
642         } else if (!adapter->ahw->linkup && linkup) {
643                 netdev_info(netdev, "NIC Link is up\n");
644                 adapter->ahw->linkup = 1;
645                 if (netif_running(netdev)) {
646                         netif_carrier_on(netdev);
647                         netif_wake_queue(netdev);
648                 }
649         }
650 }
651
652 static int qlcnic_alloc_rx_skb(struct qlcnic_adapter *adapter,
653                                struct qlcnic_host_rds_ring *rds_ring,
654                                struct qlcnic_rx_buffer *buffer)
655 {
656         struct sk_buff *skb;
657         dma_addr_t dma;
658         struct pci_dev *pdev = adapter->pdev;
659
660         skb = netdev_alloc_skb(adapter->netdev, rds_ring->skb_size);
661         if (!skb) {
662                 adapter->stats.skb_alloc_failure++;
663                 return -ENOMEM;
664         }
665
666         skb_reserve(skb, NET_IP_ALIGN);
667         dma = pci_map_single(pdev, skb->data,
668                              rds_ring->dma_size, PCI_DMA_FROMDEVICE);
669
670         if (pci_dma_mapping_error(pdev, dma)) {
671                 adapter->stats.rx_dma_map_error++;
672                 dev_kfree_skb_any(skb);
673                 return -ENOMEM;
674         }
675
676         buffer->skb = skb;
677         buffer->dma = dma;
678
679         return 0;
680 }
681
682 static void qlcnic_post_rx_buffers_nodb(struct qlcnic_adapter *adapter,
683                                         struct qlcnic_host_rds_ring *rds_ring,
684                                         u8 ring_id)
685 {
686         struct rcv_desc *pdesc;
687         struct qlcnic_rx_buffer *buffer;
688         int  count = 0;
689         uint32_t producer, handle;
690         struct list_head *head;
691
692         if (!spin_trylock(&rds_ring->lock))
693                 return;
694
695         producer = rds_ring->producer;
696         head = &rds_ring->free_list;
697         while (!list_empty(head)) {
698                 buffer = list_entry(head->next, struct qlcnic_rx_buffer, list);
699
700                 if (!buffer->skb) {
701                         if (qlcnic_alloc_rx_skb(adapter, rds_ring, buffer))
702                                 break;
703                 }
704                 count++;
705                 list_del(&buffer->list);
706
707                 /* make a rcv descriptor  */
708                 pdesc = &rds_ring->desc_head[producer];
709                 handle = qlcnic_get_ref_handle(adapter,
710                                                buffer->ref_handle, ring_id);
711                 pdesc->reference_handle = cpu_to_le16(handle);
712                 pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size);
713                 pdesc->addr_buffer = cpu_to_le64(buffer->dma);
714                 producer = get_next_index(producer, rds_ring->num_desc);
715         }
716         if (count) {
717                 rds_ring->producer = producer;
718                 writel((producer - 1) & (rds_ring->num_desc - 1),
719                        rds_ring->crb_rcv_producer);
720         }
721         spin_unlock(&rds_ring->lock);
722 }
723
724 static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter,
725                                    struct qlcnic_host_tx_ring *tx_ring,
726                                    int budget)
727 {
728         u32 sw_consumer, hw_consumer;
729         int i, done, count = 0;
730         struct qlcnic_cmd_buffer *buffer;
731         struct pci_dev *pdev = adapter->pdev;
732         struct net_device *netdev = adapter->netdev;
733         struct qlcnic_skb_frag *frag;
734
735         if (!spin_trylock(&adapter->tx_clean_lock))
736                 return 1;
737
738         sw_consumer = tx_ring->sw_consumer;
739         hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
740
741         while (sw_consumer != hw_consumer) {
742                 buffer = &tx_ring->cmd_buf_arr[sw_consumer];
743                 if (buffer->skb) {
744                         frag = &buffer->frag_array[0];
745                         pci_unmap_single(pdev, frag->dma, frag->length,
746                                          PCI_DMA_TODEVICE);
747                         frag->dma = 0ULL;
748                         for (i = 1; i < buffer->frag_count; i++) {
749                                 frag++;
750                                 pci_unmap_page(pdev, frag->dma, frag->length,
751                                                PCI_DMA_TODEVICE);
752                                 frag->dma = 0ULL;
753                         }
754                         adapter->stats.xmitfinished++;
755                         dev_kfree_skb_any(buffer->skb);
756                         buffer->skb = NULL;
757                 }
758
759                 sw_consumer = get_next_index(sw_consumer, tx_ring->num_desc);
760                 if (++count >= budget)
761                         break;
762         }
763
764         if (count && netif_running(netdev)) {
765                 tx_ring->sw_consumer = sw_consumer;
766                 smp_mb();
767                 if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) {
768                         if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) {
769                                 netif_wake_queue(netdev);
770                                 adapter->stats.xmit_on++;
771                         }
772                 }
773                 adapter->tx_timeo_cnt = 0;
774         }
775         /*
776          * If everything is freed up to consumer then check if the ring is full
777          * If the ring is full then check if more needs to be freed and
778          * schedule the call back again.
779          *
780          * This happens when there are 2 CPUs. One could be freeing and the
781          * other filling it. If the ring is full when we get out of here and
782          * the card has already interrupted the host then the host can miss the
783          * interrupt.
784          *
785          * There is still a possible race condition and the host could miss an
786          * interrupt. The card has to take care of this.
787          */
788         hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
789         done = (sw_consumer == hw_consumer);
790         spin_unlock(&adapter->tx_clean_lock);
791
792         return done;
793 }
794
795 static int qlcnic_poll(struct napi_struct *napi, int budget)
796 {
797         int tx_complete, work_done;
798         struct qlcnic_host_sds_ring *sds_ring;
799         struct qlcnic_adapter *adapter;
800
801         sds_ring = container_of(napi, struct qlcnic_host_sds_ring, napi);
802         adapter = sds_ring->adapter;
803         tx_complete = qlcnic_process_cmd_ring(adapter, adapter->tx_ring,
804                                               budget);
805         work_done = qlcnic_process_rcv_ring(sds_ring, budget);
806         if ((work_done < budget) && tx_complete) {
807                 napi_complete(&sds_ring->napi);
808                 if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
809                         qlcnic_enable_int(sds_ring);
810         }
811
812         return work_done;
813 }
814
815 static int qlcnic_rx_poll(struct napi_struct *napi, int budget)
816 {
817         struct qlcnic_host_sds_ring *sds_ring;
818         struct qlcnic_adapter *adapter;
819         int work_done;
820
821         sds_ring = container_of(napi, struct qlcnic_host_sds_ring, napi);
822         adapter = sds_ring->adapter;
823
824         work_done = qlcnic_process_rcv_ring(sds_ring, budget);
825
826         if (work_done < budget) {
827                 napi_complete(&sds_ring->napi);
828                 if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
829                         qlcnic_enable_int(sds_ring);
830         }
831
832         return work_done;
833 }
834
835 static void qlcnic_handle_linkevent(struct qlcnic_adapter *adapter,
836                                     struct qlcnic_fw_msg *msg)
837 {
838         u32 cable_OUI;
839         u16 cable_len, link_speed;
840         u8  link_status, module, duplex, autoneg, lb_status = 0;
841         struct net_device *netdev = adapter->netdev;
842
843         adapter->ahw->has_link_events = 1;
844
845         cable_OUI = msg->body[1] & 0xffffffff;
846         cable_len = (msg->body[1] >> 32) & 0xffff;
847         link_speed = (msg->body[1] >> 48) & 0xffff;
848
849         link_status = msg->body[2] & 0xff;
850         duplex = (msg->body[2] >> 16) & 0xff;
851         autoneg = (msg->body[2] >> 24) & 0xff;
852         lb_status = (msg->body[2] >> 32) & 0x3;
853
854         module = (msg->body[2] >> 8) & 0xff;
855         if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE)
856                 dev_info(&netdev->dev,
857                          "unsupported cable: OUI 0x%x, length %d\n",
858                          cable_OUI, cable_len);
859         else if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN)
860                 dev_info(&netdev->dev, "unsupported cable length %d\n",
861                          cable_len);
862
863         if (!link_status && (lb_status == QLCNIC_ILB_MODE ||
864             lb_status == QLCNIC_ELB_MODE))
865                 adapter->ahw->loopback_state |= QLCNIC_LINKEVENT;
866
867         qlcnic_advert_link_change(adapter, link_status);
868
869         if (duplex == LINKEVENT_FULL_DUPLEX)
870                 adapter->ahw->link_duplex = DUPLEX_FULL;
871         else
872                 adapter->ahw->link_duplex = DUPLEX_HALF;
873
874         adapter->ahw->module_type = module;
875         adapter->ahw->link_autoneg = autoneg;
876
877         if (link_status) {
878                 adapter->ahw->link_speed = link_speed;
879         } else {
880                 adapter->ahw->link_speed = SPEED_UNKNOWN;
881                 adapter->ahw->link_duplex = DUPLEX_UNKNOWN;
882         }
883 }
884
885 static void qlcnic_handle_fw_message(int desc_cnt, int index,
886                                      struct qlcnic_host_sds_ring *sds_ring)
887 {
888         struct qlcnic_fw_msg msg;
889         struct status_desc *desc;
890         struct qlcnic_adapter *adapter;
891         struct device *dev;
892         int i = 0, opcode, ret;
893
894         while (desc_cnt > 0 && i < 8) {
895                 desc = &sds_ring->desc_head[index];
896                 msg.words[i++] = le64_to_cpu(desc->status_desc_data[0]);
897                 msg.words[i++] = le64_to_cpu(desc->status_desc_data[1]);
898
899                 index = get_next_index(index, sds_ring->num_desc);
900                 desc_cnt--;
901         }
902
903         adapter = sds_ring->adapter;
904         dev = &adapter->pdev->dev;
905         opcode = qlcnic_get_nic_msg_opcode(msg.body[0]);
906
907         switch (opcode) {
908         case QLCNIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE:
909                 qlcnic_handle_linkevent(adapter, &msg);
910                 break;
911         case QLCNIC_C2H_OPCODE_CONFIG_LOOPBACK:
912                 ret = (u32)(msg.body[1]);
913                 switch (ret) {
914                 case 0:
915                         adapter->ahw->loopback_state |= QLCNIC_LB_RESPONSE;
916                         break;
917                 case 1:
918                         dev_info(dev, "loopback already in progress\n");
919                         adapter->ahw->diag_cnt = -QLCNIC_TEST_IN_PROGRESS;
920                         break;
921                 case 2:
922                         dev_info(dev, "loopback cable is not connected\n");
923                         adapter->ahw->diag_cnt = -QLCNIC_LB_CABLE_NOT_CONN;
924                         break;
925                 default:
926                         dev_info(dev,
927                                  "loopback configure request failed, err %x\n",
928                                  ret);
929                         adapter->ahw->diag_cnt = -QLCNIC_UNDEFINED_ERROR;
930                         break;
931                 }
932                 break;
933         default:
934                 break;
935         }
936 }
937
938 struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *adapter,
939                                      struct qlcnic_host_rds_ring *ring,
940                                      u16 index, u16 cksum)
941 {
942         struct qlcnic_rx_buffer *buffer;
943         struct sk_buff *skb;
944
945         buffer = &ring->rx_buf_arr[index];
946         if (unlikely(buffer->skb == NULL)) {
947                 WARN_ON(1);
948                 return NULL;
949         }
950
951         pci_unmap_single(adapter->pdev, buffer->dma, ring->dma_size,
952                          PCI_DMA_FROMDEVICE);
953
954         skb = buffer->skb;
955         if (likely((adapter->netdev->features & NETIF_F_RXCSUM) &&
956                    (cksum == STATUS_CKSUM_OK || cksum == STATUS_CKSUM_LOOP))) {
957                 adapter->stats.csummed++;
958                 skb->ip_summed = CHECKSUM_UNNECESSARY;
959         } else {
960                 skb_checksum_none_assert(skb);
961         }
962
963
964         buffer->skb = NULL;
965
966         return skb;
967 }
968
969 static inline int qlcnic_check_rx_tagging(struct qlcnic_adapter *adapter,
970                                           struct sk_buff *skb, u16 *vlan_tag)
971 {
972         struct ethhdr *eth_hdr;
973
974         if (!__vlan_get_tag(skb, vlan_tag)) {
975                 eth_hdr = (struct ethhdr *)skb->data;
976                 memmove(skb->data + VLAN_HLEN, eth_hdr, ETH_ALEN * 2);
977                 skb_pull(skb, VLAN_HLEN);
978         }
979         if (!adapter->pvid)
980                 return 0;
981
982         if (*vlan_tag == adapter->pvid) {
983                 /* Outer vlan tag. Packet should follow non-vlan path */
984                 *vlan_tag = 0xffff;
985                 return 0;
986         }
987         if (adapter->flags & QLCNIC_TAGGING_ENABLED)
988                 return 0;
989
990         return -EINVAL;
991 }
992
993 static struct qlcnic_rx_buffer *
994 qlcnic_process_rcv(struct qlcnic_adapter *adapter,
995                    struct qlcnic_host_sds_ring *sds_ring, int ring,
996                    u64 sts_data0)
997 {
998         struct net_device *netdev = adapter->netdev;
999         struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1000         struct qlcnic_rx_buffer *buffer;
1001         struct sk_buff *skb;
1002         struct qlcnic_host_rds_ring *rds_ring;
1003         int index, length, cksum, pkt_offset, is_lb_pkt;
1004         u16 vid = 0xffff, t_vid;
1005
1006         if (unlikely(ring >= adapter->max_rds_rings))
1007                 return NULL;
1008
1009         rds_ring = &recv_ctx->rds_rings[ring];
1010
1011         index = qlcnic_get_sts_refhandle(sts_data0);
1012         if (unlikely(index >= rds_ring->num_desc))
1013                 return NULL;
1014
1015         buffer = &rds_ring->rx_buf_arr[index];
1016         length = qlcnic_get_sts_totallength(sts_data0);
1017         cksum  = qlcnic_get_sts_status(sts_data0);
1018         pkt_offset = qlcnic_get_sts_pkt_offset(sts_data0);
1019
1020         skb = qlcnic_process_rxbuf(adapter, rds_ring, index, cksum);
1021         if (!skb)
1022                 return buffer;
1023
1024         if (adapter->drv_mac_learn &&
1025             (adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
1026                 t_vid = 0;
1027                 is_lb_pkt = qlcnic_82xx_is_lb_pkt(sts_data0);
1028                 qlcnic_add_lb_filter(adapter, skb, is_lb_pkt,
1029                                      cpu_to_le16(t_vid));
1030         }
1031
1032         if (length > rds_ring->skb_size)
1033                 skb_put(skb, rds_ring->skb_size);
1034         else
1035                 skb_put(skb, length);
1036
1037         if (pkt_offset)
1038                 skb_pull(skb, pkt_offset);
1039
1040         if (unlikely(qlcnic_check_rx_tagging(adapter, skb, &vid))) {
1041                 adapter->stats.rxdropped++;
1042                 dev_kfree_skb(skb);
1043                 return buffer;
1044         }
1045
1046         skb->protocol = eth_type_trans(skb, netdev);
1047
1048         if (vid != 0xffff)
1049                 __vlan_hwaccel_put_tag(skb, vid);
1050
1051         napi_gro_receive(&sds_ring->napi, skb);
1052
1053         adapter->stats.rx_pkts++;
1054         adapter->stats.rxbytes += length;
1055
1056         return buffer;
1057 }
1058
1059 #define QLC_TCP_HDR_SIZE            20
1060 #define QLC_TCP_TS_OPTION_SIZE      12
1061 #define QLC_TCP_TS_HDR_SIZE         (QLC_TCP_HDR_SIZE + QLC_TCP_TS_OPTION_SIZE)
1062
1063 static struct qlcnic_rx_buffer *
1064 qlcnic_process_lro(struct qlcnic_adapter *adapter,
1065                    int ring, u64 sts_data0, u64 sts_data1)
1066 {
1067         struct net_device *netdev = adapter->netdev;
1068         struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1069         struct qlcnic_rx_buffer *buffer;
1070         struct sk_buff *skb;
1071         struct qlcnic_host_rds_ring *rds_ring;
1072         struct iphdr *iph;
1073         struct ipv6hdr *ipv6h;
1074         struct tcphdr *th;
1075         bool push, timestamp;
1076         int index, l2_hdr_offset, l4_hdr_offset, is_lb_pkt;
1077         u16 lro_length, length, data_offset, t_vid, vid = 0xffff;
1078         u32 seq_number;
1079
1080         if (unlikely(ring > adapter->max_rds_rings))
1081                 return NULL;
1082
1083         rds_ring = &recv_ctx->rds_rings[ring];
1084
1085         index = qlcnic_get_lro_sts_refhandle(sts_data0);
1086         if (unlikely(index > rds_ring->num_desc))
1087                 return NULL;
1088
1089         buffer = &rds_ring->rx_buf_arr[index];
1090
1091         timestamp = qlcnic_get_lro_sts_timestamp(sts_data0);
1092         lro_length = qlcnic_get_lro_sts_length(sts_data0);
1093         l2_hdr_offset = qlcnic_get_lro_sts_l2_hdr_offset(sts_data0);
1094         l4_hdr_offset = qlcnic_get_lro_sts_l4_hdr_offset(sts_data0);
1095         push = qlcnic_get_lro_sts_push_flag(sts_data0);
1096         seq_number = qlcnic_get_lro_sts_seq_number(sts_data1);
1097
1098         skb = qlcnic_process_rxbuf(adapter, rds_ring, index, STATUS_CKSUM_OK);
1099         if (!skb)
1100                 return buffer;
1101
1102         if (adapter->drv_mac_learn &&
1103             (adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
1104                 t_vid = 0;
1105                 is_lb_pkt = qlcnic_82xx_is_lb_pkt(sts_data0);
1106                 qlcnic_add_lb_filter(adapter, skb, is_lb_pkt,
1107                                      cpu_to_le16(t_vid));
1108         }
1109
1110         if (timestamp)
1111                 data_offset = l4_hdr_offset + QLC_TCP_TS_HDR_SIZE;
1112         else
1113                 data_offset = l4_hdr_offset + QLC_TCP_HDR_SIZE;
1114
1115         skb_put(skb, lro_length + data_offset);
1116         skb_pull(skb, l2_hdr_offset);
1117
1118         if (unlikely(qlcnic_check_rx_tagging(adapter, skb, &vid))) {
1119                 adapter->stats.rxdropped++;
1120                 dev_kfree_skb(skb);
1121                 return buffer;
1122         }
1123
1124         skb->protocol = eth_type_trans(skb, netdev);
1125
1126         if (ntohs(skb->protocol) == ETH_P_IPV6) {
1127                 ipv6h = (struct ipv6hdr *)skb->data;
1128                 th = (struct tcphdr *)(skb->data + sizeof(struct ipv6hdr));
1129                 length = (th->doff << 2) + lro_length;
1130                 ipv6h->payload_len = htons(length);
1131         } else {
1132                 iph = (struct iphdr *)skb->data;
1133                 th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
1134                 length = (iph->ihl << 2) + (th->doff << 2) + lro_length;
1135                 iph->tot_len = htons(length);
1136                 iph->check = 0;
1137                 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
1138         }
1139
1140         th->psh = push;
1141         th->seq = htonl(seq_number);
1142         length = skb->len;
1143
1144         if (adapter->flags & QLCNIC_FW_LRO_MSS_CAP) {
1145                 skb_shinfo(skb)->gso_size = qlcnic_get_lro_sts_mss(sts_data1);
1146                 if (skb->protocol == htons(ETH_P_IPV6))
1147                         skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
1148                 else
1149                         skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
1150         }
1151
1152         if (vid != 0xffff)
1153                 __vlan_hwaccel_put_tag(skb, vid);
1154         netif_receive_skb(skb);
1155
1156         adapter->stats.lro_pkts++;
1157         adapter->stats.lrobytes += length;
1158
1159         return buffer;
1160 }
1161
1162 int qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, int max)
1163 {
1164         struct qlcnic_host_rds_ring *rds_ring;
1165         struct qlcnic_adapter *adapter = sds_ring->adapter;
1166         struct list_head *cur;
1167         struct status_desc *desc;
1168         struct qlcnic_rx_buffer *rxbuf;
1169         int opcode, desc_cnt, count = 0;
1170         u64 sts_data0, sts_data1;
1171         u8 ring;
1172         u32 consumer = sds_ring->consumer;
1173
1174         while (count < max) {
1175                 desc = &sds_ring->desc_head[consumer];
1176                 sts_data0 = le64_to_cpu(desc->status_desc_data[0]);
1177
1178                 if (!(sts_data0 & STATUS_OWNER_HOST))
1179                         break;
1180
1181                 desc_cnt = qlcnic_get_sts_desc_cnt(sts_data0);
1182                 opcode = qlcnic_get_sts_opcode(sts_data0);
1183                 switch (opcode) {
1184                 case QLCNIC_RXPKT_DESC:
1185                 case QLCNIC_OLD_RXPKT_DESC:
1186                 case QLCNIC_SYN_OFFLOAD:
1187                         ring = qlcnic_get_sts_type(sts_data0);
1188                         rxbuf = qlcnic_process_rcv(adapter, sds_ring, ring,
1189                                                    sts_data0);
1190                         break;
1191                 case QLCNIC_LRO_DESC:
1192                         ring = qlcnic_get_lro_sts_type(sts_data0);
1193                         sts_data1 = le64_to_cpu(desc->status_desc_data[1]);
1194                         rxbuf = qlcnic_process_lro(adapter, ring, sts_data0,
1195                                                    sts_data1);
1196                         break;
1197                 case QLCNIC_RESPONSE_DESC:
1198                         qlcnic_handle_fw_message(desc_cnt, consumer, sds_ring);
1199                 default:
1200                         goto skip;
1201                 }
1202                 WARN_ON(desc_cnt > 1);
1203
1204                 if (likely(rxbuf))
1205                         list_add_tail(&rxbuf->list, &sds_ring->free_list[ring]);
1206                 else
1207                         adapter->stats.null_rxbuf++;
1208 skip:
1209                 for (; desc_cnt > 0; desc_cnt--) {
1210                         desc = &sds_ring->desc_head[consumer];
1211                         desc->status_desc_data[0] = QLCNIC_DESC_OWNER_FW;
1212                         consumer = get_next_index(consumer, sds_ring->num_desc);
1213                 }
1214                 count++;
1215         }
1216
1217         for (ring = 0; ring < adapter->max_rds_rings; ring++) {
1218                 rds_ring = &adapter->recv_ctx->rds_rings[ring];
1219                 if (!list_empty(&sds_ring->free_list[ring])) {
1220                         list_for_each(cur, &sds_ring->free_list[ring]) {
1221                                 rxbuf = list_entry(cur, struct qlcnic_rx_buffer,
1222                                                    list);
1223                                 qlcnic_alloc_rx_skb(adapter, rds_ring, rxbuf);
1224                         }
1225                         spin_lock(&rds_ring->lock);
1226                         list_splice_tail_init(&sds_ring->free_list[ring],
1227                                               &rds_ring->free_list);
1228                         spin_unlock(&rds_ring->lock);
1229                 }
1230
1231                 qlcnic_post_rx_buffers_nodb(adapter, rds_ring, ring);
1232         }
1233
1234         if (count) {
1235                 sds_ring->consumer = consumer;
1236                 writel(consumer, sds_ring->crb_sts_consumer);
1237         }
1238
1239         return count;
1240 }
1241
1242 void qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter,
1243                             struct qlcnic_host_rds_ring *rds_ring, u8 ring_id)
1244 {
1245         struct rcv_desc *pdesc;
1246         struct qlcnic_rx_buffer *buffer;
1247         int count = 0;
1248         u32 producer, handle;
1249         struct list_head *head;
1250
1251         producer = rds_ring->producer;
1252         head = &rds_ring->free_list;
1253
1254         while (!list_empty(head)) {
1255
1256                 buffer = list_entry(head->next, struct qlcnic_rx_buffer, list);
1257
1258                 if (!buffer->skb) {
1259                         if (qlcnic_alloc_rx_skb(adapter, rds_ring, buffer))
1260                                 break;
1261                 }
1262
1263                 count++;
1264                 list_del(&buffer->list);
1265
1266                 /* make a rcv descriptor  */
1267                 pdesc = &rds_ring->desc_head[producer];
1268                 pdesc->addr_buffer = cpu_to_le64(buffer->dma);
1269                 handle = qlcnic_get_ref_handle(adapter, buffer->ref_handle,
1270                                                ring_id);
1271                 pdesc->reference_handle = cpu_to_le16(handle);
1272                 pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size);
1273                 producer = get_next_index(producer, rds_ring->num_desc);
1274         }
1275
1276         if (count) {
1277                 rds_ring->producer = producer;
1278                 writel((producer-1) & (rds_ring->num_desc-1),
1279                        rds_ring->crb_rcv_producer);
1280         }
1281 }
1282
1283 static void dump_skb(struct sk_buff *skb, struct qlcnic_adapter *adapter)
1284 {
1285         int i;
1286         unsigned char *data = skb->data;
1287
1288         pr_info(KERN_INFO "\n");
1289         for (i = 0; i < skb->len; i++) {
1290                 QLCDB(adapter, DRV, "%02x ", data[i]);
1291                 if ((i & 0x0f) == 8)
1292                         pr_info(KERN_INFO "\n");
1293         }
1294 }
1295
1296 static void qlcnic_process_rcv_diag(struct qlcnic_adapter *adapter, int ring,
1297                                     u64 sts_data0)
1298 {
1299         struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1300         struct sk_buff *skb;
1301         struct qlcnic_host_rds_ring *rds_ring;
1302         int index, length, cksum, pkt_offset;
1303
1304         if (unlikely(ring >= adapter->max_rds_rings))
1305                 return;
1306
1307         rds_ring = &recv_ctx->rds_rings[ring];
1308
1309         index = qlcnic_get_sts_refhandle(sts_data0);
1310         length = qlcnic_get_sts_totallength(sts_data0);
1311         if (unlikely(index >= rds_ring->num_desc))
1312                 return;
1313
1314         cksum  = qlcnic_get_sts_status(sts_data0);
1315         pkt_offset = qlcnic_get_sts_pkt_offset(sts_data0);
1316
1317         skb = qlcnic_process_rxbuf(adapter, rds_ring, index, cksum);
1318         if (!skb)
1319                 return;
1320
1321         if (length > rds_ring->skb_size)
1322                 skb_put(skb, rds_ring->skb_size);
1323         else
1324                 skb_put(skb, length);
1325
1326         if (pkt_offset)
1327                 skb_pull(skb, pkt_offset);
1328
1329         if (!qlcnic_check_loopback_buff(skb->data, adapter->mac_addr))
1330                 adapter->ahw->diag_cnt++;
1331         else
1332                 dump_skb(skb, adapter);
1333
1334         dev_kfree_skb_any(skb);
1335         adapter->stats.rx_pkts++;
1336         adapter->stats.rxbytes += length;
1337
1338         return;
1339 }
1340
1341 void qlcnic_82xx_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring)
1342 {
1343         struct qlcnic_adapter *adapter = sds_ring->adapter;
1344         struct status_desc *desc;
1345         u64 sts_data0;
1346         int ring, opcode, desc_cnt;
1347
1348         u32 consumer = sds_ring->consumer;
1349
1350         desc = &sds_ring->desc_head[consumer];
1351         sts_data0 = le64_to_cpu(desc->status_desc_data[0]);
1352
1353         if (!(sts_data0 & STATUS_OWNER_HOST))
1354                 return;
1355
1356         desc_cnt = qlcnic_get_sts_desc_cnt(sts_data0);
1357         opcode = qlcnic_get_sts_opcode(sts_data0);
1358         switch (opcode) {
1359         case QLCNIC_RESPONSE_DESC:
1360                 qlcnic_handle_fw_message(desc_cnt, consumer, sds_ring);
1361                 break;
1362         default:
1363                 ring = qlcnic_get_sts_type(sts_data0);
1364                 qlcnic_process_rcv_diag(adapter, ring, sts_data0);
1365                 break;
1366         }
1367
1368         for (; desc_cnt > 0; desc_cnt--) {
1369                 desc = &sds_ring->desc_head[consumer];
1370                 desc->status_desc_data[0] = cpu_to_le64(STATUS_OWNER_PHANTOM);
1371                 consumer = get_next_index(consumer, sds_ring->num_desc);
1372         }
1373
1374         sds_ring->consumer = consumer;
1375         writel(consumer, sds_ring->crb_sts_consumer);
1376 }
1377
1378 int qlcnic_82xx_napi_add(struct qlcnic_adapter *adapter,
1379                          struct net_device *netdev)
1380 {
1381         int ring, max_sds_rings;
1382         struct qlcnic_host_sds_ring *sds_ring;
1383         struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1384
1385         if (qlcnic_alloc_sds_rings(recv_ctx, adapter->max_sds_rings))
1386                 return -ENOMEM;
1387
1388         max_sds_rings = adapter->max_sds_rings;
1389
1390         for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1391                 sds_ring = &recv_ctx->sds_rings[ring];
1392                 if (ring == adapter->max_sds_rings - 1)
1393                         netif_napi_add(netdev, &sds_ring->napi, qlcnic_poll,
1394                                        QLCNIC_NETDEV_WEIGHT / max_sds_rings);
1395                 else
1396                         netif_napi_add(netdev, &sds_ring->napi, qlcnic_rx_poll,
1397                                        QLCNIC_NETDEV_WEIGHT*2);
1398         }
1399
1400         if (qlcnic_alloc_tx_rings(adapter, netdev)) {
1401                 qlcnic_free_sds_rings(recv_ctx);
1402                 return -ENOMEM;
1403         }
1404
1405         return 0;
1406 }
1407
1408 void qlcnic_82xx_napi_del(struct qlcnic_adapter *adapter)
1409 {
1410         int ring;
1411         struct qlcnic_host_sds_ring *sds_ring;
1412         struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1413
1414         for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1415                 sds_ring = &recv_ctx->sds_rings[ring];
1416                 netif_napi_del(&sds_ring->napi);
1417         }
1418
1419         qlcnic_free_sds_rings(adapter->recv_ctx);
1420         qlcnic_free_tx_rings(adapter);
1421 }
1422
1423 void qlcnic_82xx_napi_enable(struct qlcnic_adapter *adapter)
1424 {
1425         int ring;
1426         struct qlcnic_host_sds_ring *sds_ring;
1427         struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1428
1429         if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
1430                 return;
1431
1432         for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1433                 sds_ring = &recv_ctx->sds_rings[ring];
1434                 napi_enable(&sds_ring->napi);
1435                 qlcnic_enable_int(sds_ring);
1436         }
1437 }
1438
1439 void qlcnic_82xx_napi_disable(struct qlcnic_adapter *adapter)
1440 {
1441         int ring;
1442         struct qlcnic_host_sds_ring *sds_ring;
1443         struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1444
1445         if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
1446                 return;
1447
1448         for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1449                 sds_ring = &recv_ctx->sds_rings[ring];
1450                 qlcnic_disable_int(sds_ring);
1451                 napi_synchronize(&sds_ring->napi);
1452                 napi_disable(&sds_ring->napi);
1453         }
1454 }
1455
1456 #define QLC_83XX_NORMAL_LB_PKT  (1ULL << 36)
1457 #define QLC_83XX_LRO_LB_PKT     (1ULL << 46)
1458
1459 static inline int qlcnic_83xx_is_lb_pkt(u64 sts_data, int lro_pkt)
1460 {
1461         if (lro_pkt)
1462                 return (sts_data & QLC_83XX_LRO_LB_PKT) ? 1 : 0;
1463         else
1464                 return (sts_data & QLC_83XX_NORMAL_LB_PKT) ? 1 : 0;
1465 }
1466
1467 static struct qlcnic_rx_buffer *
1468 qlcnic_83xx_process_rcv(struct qlcnic_adapter *adapter,
1469                         struct qlcnic_host_sds_ring *sds_ring,
1470                         u8 ring, u64 sts_data[])
1471 {
1472         struct net_device *netdev = adapter->netdev;
1473         struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1474         struct qlcnic_rx_buffer *buffer;
1475         struct sk_buff *skb;
1476         struct qlcnic_host_rds_ring *rds_ring;
1477         int index, length, cksum, is_lb_pkt;
1478         u16 vid = 0xffff, t_vid;
1479
1480         if (unlikely(ring >= adapter->max_rds_rings))
1481                 return NULL;
1482
1483         rds_ring = &recv_ctx->rds_rings[ring];
1484
1485         index = qlcnic_83xx_hndl(sts_data[0]);
1486         if (unlikely(index >= rds_ring->num_desc))
1487                 return NULL;
1488
1489         buffer = &rds_ring->rx_buf_arr[index];
1490         length = qlcnic_83xx_pktln(sts_data[0]);
1491         cksum  = qlcnic_83xx_csum_status(sts_data[1]);
1492         skb = qlcnic_process_rxbuf(adapter, rds_ring, index, cksum);
1493         if (!skb)
1494                 return buffer;
1495
1496         if (adapter->drv_mac_learn &&
1497             (adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
1498                 t_vid = 0;
1499                 is_lb_pkt = qlcnic_83xx_is_lb_pkt(sts_data[1], 0);
1500                 qlcnic_add_lb_filter(adapter, skb, is_lb_pkt,
1501                                      cpu_to_le16(t_vid));
1502         }
1503
1504         if (length > rds_ring->skb_size)
1505                 skb_put(skb, rds_ring->skb_size);
1506         else
1507                 skb_put(skb, length);
1508
1509         if (unlikely(qlcnic_check_rx_tagging(adapter, skb, &vid))) {
1510                 adapter->stats.rxdropped++;
1511                 dev_kfree_skb(skb);
1512                 return buffer;
1513         }
1514
1515         skb->protocol = eth_type_trans(skb, netdev);
1516
1517         if (vid != 0xffff)
1518                 __vlan_hwaccel_put_tag(skb, vid);
1519
1520         napi_gro_receive(&sds_ring->napi, skb);
1521
1522         adapter->stats.rx_pkts++;
1523         adapter->stats.rxbytes += length;
1524
1525         return buffer;
1526 }
1527
1528 static struct qlcnic_rx_buffer *
1529 qlcnic_83xx_process_lro(struct qlcnic_adapter *adapter,
1530                         u8 ring, u64 sts_data[])
1531 {
1532         struct net_device *netdev = adapter->netdev;
1533         struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1534         struct qlcnic_rx_buffer *buffer;
1535         struct sk_buff *skb;
1536         struct qlcnic_host_rds_ring *rds_ring;
1537         struct iphdr *iph;
1538         struct ipv6hdr *ipv6h;
1539         struct tcphdr *th;
1540         bool push;
1541         int l2_hdr_offset, l4_hdr_offset;
1542         int index, is_lb_pkt;
1543         u16 lro_length, length, data_offset, gso_size;
1544         u16 vid = 0xffff, t_vid;
1545
1546         if (unlikely(ring > adapter->max_rds_rings))
1547                 return NULL;
1548
1549         rds_ring = &recv_ctx->rds_rings[ring];
1550
1551         index = qlcnic_83xx_hndl(sts_data[0]);
1552         if (unlikely(index > rds_ring->num_desc))
1553                 return NULL;
1554
1555         buffer = &rds_ring->rx_buf_arr[index];
1556
1557         lro_length = qlcnic_83xx_lro_pktln(sts_data[0]);
1558         l2_hdr_offset = qlcnic_83xx_l2_hdr_off(sts_data[1]);
1559         l4_hdr_offset = qlcnic_83xx_l4_hdr_off(sts_data[1]);
1560         push = qlcnic_83xx_is_psh_bit(sts_data[1]);
1561
1562         skb = qlcnic_process_rxbuf(adapter, rds_ring, index, STATUS_CKSUM_OK);
1563         if (!skb)
1564                 return buffer;
1565
1566         if (adapter->drv_mac_learn &&
1567             (adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
1568                 t_vid = 0;
1569                 is_lb_pkt = qlcnic_83xx_is_lb_pkt(sts_data[1], 1);
1570                 qlcnic_add_lb_filter(adapter, skb, is_lb_pkt,
1571                                      cpu_to_le16(t_vid));
1572         }
1573         if (qlcnic_83xx_is_tstamp(sts_data[1]))
1574                 data_offset = l4_hdr_offset + QLCNIC_TCP_TS_HDR_SIZE;
1575         else
1576                 data_offset = l4_hdr_offset + QLCNIC_TCP_HDR_SIZE;
1577
1578         skb_put(skb, lro_length + data_offset);
1579         skb_pull(skb, l2_hdr_offset);
1580
1581         if (unlikely(qlcnic_check_rx_tagging(adapter, skb, &vid))) {
1582                 adapter->stats.rxdropped++;
1583                 dev_kfree_skb(skb);
1584                 return buffer;
1585         }
1586
1587         skb->protocol = eth_type_trans(skb, netdev);
1588         if (ntohs(skb->protocol) == ETH_P_IPV6) {
1589                 ipv6h = (struct ipv6hdr *)skb->data;
1590                 th = (struct tcphdr *)(skb->data + sizeof(struct ipv6hdr));
1591
1592                 length = (th->doff << 2) + lro_length;
1593                 ipv6h->payload_len = htons(length);
1594         } else {
1595                 iph = (struct iphdr *)skb->data;
1596                 th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
1597                 length = (iph->ihl << 2) + (th->doff << 2) + lro_length;
1598                 iph->tot_len = htons(length);
1599                 iph->check = 0;
1600                 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
1601         }
1602
1603         th->psh = push;
1604         length = skb->len;
1605
1606         if (adapter->flags & QLCNIC_FW_LRO_MSS_CAP) {
1607                 gso_size = qlcnic_83xx_get_lro_sts_mss(sts_data[0]);
1608                 skb_shinfo(skb)->gso_size = gso_size;
1609                 if (skb->protocol == htons(ETH_P_IPV6))
1610                         skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
1611                 else
1612                         skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
1613         }
1614
1615         if (vid != 0xffff)
1616                 __vlan_hwaccel_put_tag(skb, vid);
1617
1618         netif_receive_skb(skb);
1619
1620         adapter->stats.lro_pkts++;
1621         adapter->stats.lrobytes += length;
1622         return buffer;
1623 }
1624
1625 static int qlcnic_83xx_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring,
1626                                         int max)
1627 {
1628         struct qlcnic_host_rds_ring *rds_ring;
1629         struct qlcnic_adapter *adapter = sds_ring->adapter;
1630         struct list_head *cur;
1631         struct status_desc *desc;
1632         struct qlcnic_rx_buffer *rxbuf = NULL;
1633         u8 ring;
1634         u64 sts_data[2];
1635         int count = 0, opcode;
1636         u32 consumer = sds_ring->consumer;
1637
1638         while (count < max) {
1639                 desc = &sds_ring->desc_head[consumer];
1640                 sts_data[1] = le64_to_cpu(desc->status_desc_data[1]);
1641                 opcode = qlcnic_83xx_opcode(sts_data[1]);
1642                 if (!opcode)
1643                         break;
1644                 sts_data[0] = le64_to_cpu(desc->status_desc_data[0]);
1645                 ring = QLCNIC_FETCH_RING_ID(sts_data[0]);
1646
1647                 switch (opcode) {
1648                 case QLC_83XX_REG_DESC:
1649                         rxbuf = qlcnic_83xx_process_rcv(adapter, sds_ring,
1650                                                         ring, sts_data);
1651                         break;
1652                 case QLC_83XX_LRO_DESC:
1653                         rxbuf = qlcnic_83xx_process_lro(adapter, ring,
1654                                                         sts_data);
1655                         break;
1656                 default:
1657                         dev_info(&adapter->pdev->dev,
1658                                  "Unkonwn opcode: 0x%x\n", opcode);
1659                         goto skip;
1660                 }
1661
1662                 if (likely(rxbuf))
1663                         list_add_tail(&rxbuf->list, &sds_ring->free_list[ring]);
1664                 else
1665                         adapter->stats.null_rxbuf++;
1666 skip:
1667                 desc = &sds_ring->desc_head[consumer];
1668                 /* Reset the descriptor */
1669                 desc->status_desc_data[1] = 0;
1670                 consumer = get_next_index(consumer, sds_ring->num_desc);
1671                 count++;
1672         }
1673         for (ring = 0; ring < adapter->max_rds_rings; ring++) {
1674                 rds_ring = &adapter->recv_ctx->rds_rings[ring];
1675                 if (!list_empty(&sds_ring->free_list[ring])) {
1676                         list_for_each(cur, &sds_ring->free_list[ring]) {
1677                                 rxbuf = list_entry(cur, struct qlcnic_rx_buffer,
1678                                                    list);
1679                                 qlcnic_alloc_rx_skb(adapter, rds_ring, rxbuf);
1680                         }
1681                         spin_lock(&rds_ring->lock);
1682                         list_splice_tail_init(&sds_ring->free_list[ring],
1683                                               &rds_ring->free_list);
1684                         spin_unlock(&rds_ring->lock);
1685                 }
1686                 qlcnic_post_rx_buffers_nodb(adapter, rds_ring, ring);
1687         }
1688         if (count) {
1689                 sds_ring->consumer = consumer;
1690                 writel(consumer, sds_ring->crb_sts_consumer);
1691         }
1692         return count;
1693 }
1694
1695 static int qlcnic_83xx_poll(struct napi_struct *napi, int budget)
1696 {
1697         int tx_complete;
1698         int work_done;
1699         struct qlcnic_host_sds_ring *sds_ring;
1700         struct qlcnic_adapter *adapter;
1701         struct qlcnic_host_tx_ring *tx_ring;
1702
1703         sds_ring = container_of(napi, struct qlcnic_host_sds_ring, napi);
1704         adapter = sds_ring->adapter;
1705         /* tx ring count = 1 */
1706         tx_ring = adapter->tx_ring;
1707
1708         tx_complete = qlcnic_process_cmd_ring(adapter, tx_ring, budget);
1709         work_done = qlcnic_83xx_process_rcv_ring(sds_ring, budget);
1710         if ((work_done < budget) && tx_complete) {
1711                 napi_complete(&sds_ring->napi);
1712                 qlcnic_83xx_enable_intr(adapter, sds_ring);
1713         }
1714
1715         return work_done;
1716 }
1717
1718 static int qlcnic_83xx_msix_tx_poll(struct napi_struct *napi, int budget)
1719 {
1720         int work_done;
1721         struct qlcnic_host_tx_ring *tx_ring;
1722         struct qlcnic_adapter *adapter;
1723
1724         budget = QLCNIC_TX_POLL_BUDGET;
1725         tx_ring = container_of(napi, struct qlcnic_host_tx_ring, napi);
1726         adapter = tx_ring->adapter;
1727         work_done = qlcnic_process_cmd_ring(adapter, tx_ring, budget);
1728         if (work_done) {
1729                 napi_complete(&tx_ring->napi);
1730                 if (test_bit(__QLCNIC_DEV_UP , &adapter->state))
1731                         qlcnic_83xx_enable_tx_intr(adapter, tx_ring);
1732         }
1733
1734         return work_done;
1735 }
1736
1737 static int qlcnic_83xx_rx_poll(struct napi_struct *napi, int budget)
1738 {
1739         int work_done;
1740         struct qlcnic_host_sds_ring *sds_ring;
1741         struct qlcnic_adapter *adapter;
1742
1743         sds_ring = container_of(napi, struct qlcnic_host_sds_ring, napi);
1744         adapter = sds_ring->adapter;
1745         work_done = qlcnic_83xx_process_rcv_ring(sds_ring, budget);
1746         if (work_done < budget) {
1747                 napi_complete(&sds_ring->napi);
1748                 if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
1749                         qlcnic_83xx_enable_intr(adapter, sds_ring);
1750         }
1751
1752         return work_done;
1753 }
1754
1755 void qlcnic_83xx_napi_enable(struct qlcnic_adapter *adapter)
1756 {
1757         int ring;
1758         struct qlcnic_host_sds_ring *sds_ring;
1759         struct qlcnic_host_tx_ring *tx_ring;
1760         struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1761
1762         if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
1763                 return;
1764
1765         for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1766                 sds_ring = &recv_ctx->sds_rings[ring];
1767                 napi_enable(&sds_ring->napi);
1768                 if (adapter->flags & QLCNIC_MSIX_ENABLED)
1769                         qlcnic_83xx_enable_intr(adapter, sds_ring);
1770         }
1771
1772         if (adapter->flags & QLCNIC_MSIX_ENABLED) {
1773                 for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
1774                         tx_ring = &adapter->tx_ring[ring];
1775                         napi_enable(&tx_ring->napi);
1776                         qlcnic_83xx_enable_tx_intr(adapter, tx_ring);
1777                 }
1778         }
1779 }
1780
1781 void qlcnic_83xx_napi_disable(struct qlcnic_adapter *adapter)
1782 {
1783         int ring;
1784         struct qlcnic_host_sds_ring *sds_ring;
1785         struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1786         struct qlcnic_host_tx_ring *tx_ring;
1787
1788         if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
1789                 return;
1790
1791         for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1792                 sds_ring = &recv_ctx->sds_rings[ring];
1793                 if (adapter->flags & QLCNIC_MSIX_ENABLED)
1794                         qlcnic_83xx_disable_intr(adapter, sds_ring);
1795                 napi_synchronize(&sds_ring->napi);
1796                 napi_disable(&sds_ring->napi);
1797         }
1798
1799         if (adapter->flags & QLCNIC_MSIX_ENABLED) {
1800                 for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
1801                         tx_ring = &adapter->tx_ring[ring];
1802                         qlcnic_83xx_disable_tx_intr(adapter, tx_ring);
1803                         napi_synchronize(&tx_ring->napi);
1804                         napi_disable(&tx_ring->napi);
1805                 }
1806         }
1807 }
1808
1809 int qlcnic_83xx_napi_add(struct qlcnic_adapter *adapter,
1810                          struct net_device *netdev)
1811 {
1812         int ring, max_sds_rings;
1813         struct qlcnic_host_sds_ring *sds_ring;
1814         struct qlcnic_host_tx_ring *tx_ring;
1815         struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1816
1817         if (qlcnic_alloc_sds_rings(recv_ctx, adapter->max_sds_rings))
1818                 return -ENOMEM;
1819
1820         max_sds_rings = adapter->max_sds_rings;
1821         for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1822                 sds_ring = &recv_ctx->sds_rings[ring];
1823                 if (adapter->flags & QLCNIC_MSIX_ENABLED)
1824                         netif_napi_add(netdev, &sds_ring->napi,
1825                                        qlcnic_83xx_rx_poll,
1826                                        QLCNIC_NETDEV_WEIGHT * 2);
1827                 else
1828                         netif_napi_add(netdev, &sds_ring->napi,
1829                                        qlcnic_83xx_poll,
1830                                        QLCNIC_NETDEV_WEIGHT / max_sds_rings);
1831         }
1832
1833         if (qlcnic_alloc_tx_rings(adapter, netdev)) {
1834                 qlcnic_free_sds_rings(recv_ctx);
1835                 return -ENOMEM;
1836         }
1837
1838         if (adapter->flags & QLCNIC_MSIX_ENABLED) {
1839                 for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
1840                         tx_ring = &adapter->tx_ring[ring];
1841                         netif_napi_add(netdev, &tx_ring->napi,
1842                                        qlcnic_83xx_msix_tx_poll,
1843                                        QLCNIC_NETDEV_WEIGHT);
1844                 }
1845         }
1846
1847         return 0;
1848 }
1849
1850 void qlcnic_83xx_napi_del(struct qlcnic_adapter *adapter)
1851 {
1852         int ring;
1853         struct qlcnic_host_sds_ring *sds_ring;
1854         struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1855         struct qlcnic_host_tx_ring *tx_ring;
1856
1857         for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1858                 sds_ring = &recv_ctx->sds_rings[ring];
1859                 netif_napi_del(&sds_ring->napi);
1860         }
1861
1862         qlcnic_free_sds_rings(adapter->recv_ctx);
1863
1864         if ((adapter->flags & QLCNIC_MSIX_ENABLED)) {
1865                 for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
1866                         tx_ring = &adapter->tx_ring[ring];
1867                         netif_napi_del(&tx_ring->napi);
1868                 }
1869         }
1870
1871         qlcnic_free_tx_rings(adapter);
1872 }
1873
1874 void qlcnic_83xx_process_rcv_diag(struct qlcnic_adapter *adapter,
1875                                   int ring, u64 sts_data[])
1876 {
1877         struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1878         struct sk_buff *skb;
1879         struct qlcnic_host_rds_ring *rds_ring;
1880         int index, length;
1881
1882         if (unlikely(ring >= adapter->max_rds_rings))
1883                 return;
1884
1885         rds_ring = &recv_ctx->rds_rings[ring];
1886         index = qlcnic_83xx_hndl(sts_data[0]);
1887         if (unlikely(index >= rds_ring->num_desc))
1888                 return;
1889
1890         length = qlcnic_83xx_pktln(sts_data[0]);
1891
1892         skb = qlcnic_process_rxbuf(adapter, rds_ring, index, STATUS_CKSUM_OK);
1893         if (!skb)
1894                 return;
1895
1896         if (length > rds_ring->skb_size)
1897                 skb_put(skb, rds_ring->skb_size);
1898         else
1899                 skb_put(skb, length);
1900
1901         if (!qlcnic_check_loopback_buff(skb->data, adapter->mac_addr))
1902                 adapter->ahw->diag_cnt++;
1903         else
1904                 dump_skb(skb, adapter);
1905
1906         dev_kfree_skb_any(skb);
1907         return;
1908 }
1909
1910 void qlcnic_83xx_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring)
1911 {
1912         struct qlcnic_adapter *adapter = sds_ring->adapter;
1913         struct status_desc *desc;
1914         u64 sts_data[2];
1915         int ring, opcode;
1916         u32 consumer = sds_ring->consumer;
1917
1918         desc = &sds_ring->desc_head[consumer];
1919         sts_data[0] = le64_to_cpu(desc->status_desc_data[0]);
1920         sts_data[1] = le64_to_cpu(desc->status_desc_data[1]);
1921         opcode = qlcnic_83xx_opcode(sts_data[1]);
1922         if (!opcode)
1923                 return;
1924
1925         ring = QLCNIC_FETCH_RING_ID(qlcnic_83xx_hndl(sts_data[0]));
1926         qlcnic_83xx_process_rcv_diag(adapter, ring, sts_data);
1927         desc = &sds_ring->desc_head[consumer];
1928         desc->status_desc_data[0] = cpu_to_le64(STATUS_OWNER_PHANTOM);
1929         consumer = get_next_index(consumer, sds_ring->num_desc);
1930         sds_ring->consumer = consumer;
1931         writel(consumer, sds_ring->crb_sts_consumer);
1932 }