]> Pileus Git - ~andy/linux/blob - drivers/net/ethernet/intel/i40e/i40e_txrx.c
netfilter: nf_conntrack_dccp: fix skb_header_pointer API usages
[~andy/linux] / drivers / net / ethernet / intel / i40e / i40e_txrx.c
1 /*******************************************************************************
2  *
3  * Intel Ethernet Controller XL710 Family Linux Driver
4  * Copyright(c) 2013 Intel Corporation.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms and conditions of the GNU General Public License,
8  * version 2, as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  * You should have received a copy of the GNU General Public License along with
16  * this program; if not, write to the Free Software Foundation, Inc.,
17  * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18  *
19  * The full GNU General Public License is included in this distribution in
20  * the file called "COPYING".
21  *
22  * Contact Information:
23  * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25  *
26  ******************************************************************************/
27
28 #include "i40e.h"
29
30 static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
31                                 u32 td_tag)
32 {
33         return cpu_to_le64(I40E_TX_DESC_DTYPE_DATA |
34                            ((u64)td_cmd  << I40E_TXD_QW1_CMD_SHIFT) |
35                            ((u64)td_offset << I40E_TXD_QW1_OFFSET_SHIFT) |
36                            ((u64)size  << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) |
37                            ((u64)td_tag  << I40E_TXD_QW1_L2TAG1_SHIFT));
38 }
39
40 #define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
41 /**
42  * i40e_program_fdir_filter - Program a Flow Director filter
43  * @fdir_input: Packet data that will be filter parameters
44  * @pf: The pf pointer
45  * @add: True for add/update, False for remove
46  **/
47 int i40e_program_fdir_filter(struct i40e_fdir_data *fdir_data,
48                              struct i40e_pf *pf, bool add)
49 {
50         struct i40e_filter_program_desc *fdir_desc;
51         struct i40e_tx_buffer *tx_buf;
52         struct i40e_tx_desc *tx_desc;
53         struct i40e_ring *tx_ring;
54         unsigned int fpt, dcc;
55         struct i40e_vsi *vsi;
56         struct device *dev;
57         dma_addr_t dma;
58         u32 td_cmd = 0;
59         u16 i;
60
61         /* find existing FDIR VSI */
62         vsi = NULL;
63         for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
64                 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR)
65                         vsi = pf->vsi[i];
66         if (!vsi)
67                 return -ENOENT;
68
69         tx_ring = vsi->tx_rings[0];
70         dev = tx_ring->dev;
71
72         dma = dma_map_single(dev, fdir_data->raw_packet,
73                              I40E_FDIR_MAX_RAW_PACKET_LOOKUP, DMA_TO_DEVICE);
74         if (dma_mapping_error(dev, dma))
75                 goto dma_fail;
76
77         /* grab the next descriptor */
78         i = tx_ring->next_to_use;
79         fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
80         tx_buf = &tx_ring->tx_bi[i];
81
82         tx_ring->next_to_use = (i + 1 < tx_ring->count) ? i + 1 : 0;
83
84         fpt = (fdir_data->q_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
85               I40E_TXD_FLTR_QW0_QINDEX_MASK;
86
87         fpt |= (fdir_data->flex_off << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT) &
88                I40E_TXD_FLTR_QW0_FLEXOFF_MASK;
89
90         fpt |= (fdir_data->pctype << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) &
91                I40E_TXD_FLTR_QW0_PCTYPE_MASK;
92
93         /* Use LAN VSI Id if not programmed by user */
94         if (fdir_data->dest_vsi == 0)
95                 fpt |= (pf->vsi[pf->lan_vsi]->id) <<
96                        I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
97         else
98                 fpt |= ((u32)fdir_data->dest_vsi <<
99                         I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) &
100                        I40E_TXD_FLTR_QW0_DEST_VSI_MASK;
101
102         fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(fpt);
103
104         dcc = I40E_TX_DESC_DTYPE_FILTER_PROG;
105
106         if (add)
107                 dcc |= I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
108                        I40E_TXD_FLTR_QW1_PCMD_SHIFT;
109         else
110                 dcc |= I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
111                        I40E_TXD_FLTR_QW1_PCMD_SHIFT;
112
113         dcc |= (fdir_data->dest_ctl << I40E_TXD_FLTR_QW1_DEST_SHIFT) &
114                I40E_TXD_FLTR_QW1_DEST_MASK;
115
116         dcc |= (fdir_data->fd_status << I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT) &
117                I40E_TXD_FLTR_QW1_FD_STATUS_MASK;
118
119         if (fdir_data->cnt_index != 0) {
120                 dcc |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
121                 dcc |= ((u32)fdir_data->cnt_index <<
122                         I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
123                        I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
124         }
125
126         fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dcc);
127         fdir_desc->fd_id = cpu_to_le32(fdir_data->fd_id);
128
129         /* Now program a dummy descriptor */
130         i = tx_ring->next_to_use;
131         tx_desc = I40E_TX_DESC(tx_ring, i);
132
133         tx_ring->next_to_use = (i + 1 < tx_ring->count) ? i + 1 : 0;
134
135         tx_desc->buffer_addr = cpu_to_le64(dma);
136         td_cmd = I40E_TXD_CMD | I40E_TX_DESC_CMD_DUMMY;
137
138         tx_desc->cmd_type_offset_bsz =
139                 build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_LOOKUP, 0);
140
141         /* Force memory writes to complete before letting h/w
142          * know there are new descriptors to fetch.  (Only
143          * applicable for weak-ordered memory model archs,
144          * such as IA-64).
145          */
146         wmb();
147
148         /* Mark the data descriptor to be watched */
149         tx_buf->next_to_watch = tx_desc;
150
151         writel(tx_ring->next_to_use, tx_ring->tail);
152         return 0;
153
154 dma_fail:
155         return -1;
156 }
157
158 /**
159  * i40e_fd_handle_status - check the Programming Status for FD
160  * @rx_ring: the Rx ring for this descriptor
161  * @qw: the descriptor data
162  * @prog_id: the id originally used for programming
163  *
164  * This is used to verify if the FD programming or invalidation
165  * requested by SW to the HW is successful or not and take actions accordingly.
166  **/
167 static void i40e_fd_handle_status(struct i40e_ring *rx_ring, u32 qw, u8 prog_id)
168 {
169         struct pci_dev *pdev = rx_ring->vsi->back->pdev;
170         u32 error;
171
172         error = (qw & I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >>
173                 I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT;
174
175         /* for now just print the Status */
176         dev_info(&pdev->dev, "FD programming id %02x, Status %08x\n",
177                  prog_id, error);
178 }
179
180 /**
181  * i40e_unmap_and_free_tx_resource - Release a Tx buffer
182  * @ring:      the ring that owns the buffer
183  * @tx_buffer: the buffer to free
184  **/
185 static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
186                                             struct i40e_tx_buffer *tx_buffer)
187 {
188         if (tx_buffer->skb) {
189                 dev_kfree_skb_any(tx_buffer->skb);
190                 if (dma_unmap_len(tx_buffer, len))
191                         dma_unmap_single(ring->dev,
192                                          dma_unmap_addr(tx_buffer, dma),
193                                          dma_unmap_len(tx_buffer, len),
194                                          DMA_TO_DEVICE);
195         } else if (dma_unmap_len(tx_buffer, len)) {
196                 dma_unmap_page(ring->dev,
197                                dma_unmap_addr(tx_buffer, dma),
198                                dma_unmap_len(tx_buffer, len),
199                                DMA_TO_DEVICE);
200         }
201         tx_buffer->next_to_watch = NULL;
202         tx_buffer->skb = NULL;
203         dma_unmap_len_set(tx_buffer, len, 0);
204         /* tx_buffer must be completely set up in the transmit path */
205 }
206
207 /**
208  * i40e_clean_tx_ring - Free any empty Tx buffers
209  * @tx_ring: ring to be cleaned
210  **/
211 void i40e_clean_tx_ring(struct i40e_ring *tx_ring)
212 {
213         unsigned long bi_size;
214         u16 i;
215
216         /* ring already cleared, nothing to do */
217         if (!tx_ring->tx_bi)
218                 return;
219
220         /* Free all the Tx ring sk_buffs */
221         for (i = 0; i < tx_ring->count; i++)
222                 i40e_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_bi[i]);
223
224         bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
225         memset(tx_ring->tx_bi, 0, bi_size);
226
227         /* Zero out the descriptor ring */
228         memset(tx_ring->desc, 0, tx_ring->size);
229
230         tx_ring->next_to_use = 0;
231         tx_ring->next_to_clean = 0;
232
233         if (!tx_ring->netdev)
234                 return;
235
236         /* cleanup Tx queue statistics */
237         netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev,
238                                                   tx_ring->queue_index));
239 }
240
241 /**
242  * i40e_free_tx_resources - Free Tx resources per queue
243  * @tx_ring: Tx descriptor ring for a specific queue
244  *
245  * Free all transmit software resources
246  **/
247 void i40e_free_tx_resources(struct i40e_ring *tx_ring)
248 {
249         i40e_clean_tx_ring(tx_ring);
250         kfree(tx_ring->tx_bi);
251         tx_ring->tx_bi = NULL;
252
253         if (tx_ring->desc) {
254                 dma_free_coherent(tx_ring->dev, tx_ring->size,
255                                   tx_ring->desc, tx_ring->dma);
256                 tx_ring->desc = NULL;
257         }
258 }
259
260 /**
261  * i40e_get_tx_pending - how many tx descriptors not processed
262  * @tx_ring: the ring of descriptors
263  *
264  * Since there is no access to the ring head register
265  * in XL710, we need to use our local copies
266  **/
267 static u32 i40e_get_tx_pending(struct i40e_ring *ring)
268 {
269         u32 ntu = ((ring->next_to_clean <= ring->next_to_use)
270                         ? ring->next_to_use
271                         : ring->next_to_use + ring->count);
272         return ntu - ring->next_to_clean;
273 }
274
275 /**
276  * i40e_check_tx_hang - Is there a hang in the Tx queue
277  * @tx_ring: the ring of descriptors
278  **/
279 static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)
280 {
281         u32 tx_pending = i40e_get_tx_pending(tx_ring);
282         bool ret = false;
283
284         clear_check_for_tx_hang(tx_ring);
285
286         /* Check for a hung queue, but be thorough. This verifies
287          * that a transmit has been completed since the previous
288          * check AND there is at least one packet pending. The
289          * ARMED bit is set to indicate a potential hang. The
290          * bit is cleared if a pause frame is received to remove
291          * false hang detection due to PFC or 802.3x frames. By
292          * requiring this to fail twice we avoid races with
293          * PFC clearing the ARMED bit and conditions where we
294          * run the check_tx_hang logic with a transmit completion
295          * pending but without time to complete it yet.
296          */
297         if ((tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) &&
298             tx_pending) {
299                 /* make sure it is true for two checks in a row */
300                 ret = test_and_set_bit(__I40E_HANG_CHECK_ARMED,
301                                        &tx_ring->state);
302         } else {
303                 /* update completed stats and disarm the hang check */
304                 tx_ring->tx_stats.tx_done_old = tx_ring->stats.packets;
305                 clear_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state);
306         }
307
308         return ret;
309 }
310
311 /**
312  * i40e_clean_tx_irq - Reclaim resources after transmit completes
313  * @tx_ring:  tx ring to clean
314  * @budget:   how many cleans we're allowed
315  *
316  * Returns true if there's any budget left (e.g. the clean is finished)
317  **/
318 static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
319 {
320         u16 i = tx_ring->next_to_clean;
321         struct i40e_tx_buffer *tx_buf;
322         struct i40e_tx_desc *tx_desc;
323         unsigned int total_packets = 0;
324         unsigned int total_bytes = 0;
325
326         tx_buf = &tx_ring->tx_bi[i];
327         tx_desc = I40E_TX_DESC(tx_ring, i);
328         i -= tx_ring->count;
329
330         do {
331                 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
332
333                 /* if next_to_watch is not set then there is no work pending */
334                 if (!eop_desc)
335                         break;
336
337                 /* prevent any other reads prior to eop_desc */
338                 read_barrier_depends();
339
340                 /* if the descriptor isn't done, no work yet to do */
341                 if (!(eop_desc->cmd_type_offset_bsz &
342                       cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE)))
343                         break;
344
345                 /* clear next_to_watch to prevent false hangs */
346                 tx_buf->next_to_watch = NULL;
347
348                 /* update the statistics for this packet */
349                 total_bytes += tx_buf->bytecount;
350                 total_packets += tx_buf->gso_segs;
351
352                 /* free the skb */
353                 dev_kfree_skb_any(tx_buf->skb);
354
355                 /* unmap skb header data */
356                 dma_unmap_single(tx_ring->dev,
357                                  dma_unmap_addr(tx_buf, dma),
358                                  dma_unmap_len(tx_buf, len),
359                                  DMA_TO_DEVICE);
360
361                 /* clear tx_buffer data */
362                 tx_buf->skb = NULL;
363                 dma_unmap_len_set(tx_buf, len, 0);
364
365                 /* unmap remaining buffers */
366                 while (tx_desc != eop_desc) {
367
368                         tx_buf++;
369                         tx_desc++;
370                         i++;
371                         if (unlikely(!i)) {
372                                 i -= tx_ring->count;
373                                 tx_buf = tx_ring->tx_bi;
374                                 tx_desc = I40E_TX_DESC(tx_ring, 0);
375                         }
376
377                         /* unmap any remaining paged data */
378                         if (dma_unmap_len(tx_buf, len)) {
379                                 dma_unmap_page(tx_ring->dev,
380                                                dma_unmap_addr(tx_buf, dma),
381                                                dma_unmap_len(tx_buf, len),
382                                                DMA_TO_DEVICE);
383                                 dma_unmap_len_set(tx_buf, len, 0);
384                         }
385                 }
386
387                 /* move us one more past the eop_desc for start of next pkt */
388                 tx_buf++;
389                 tx_desc++;
390                 i++;
391                 if (unlikely(!i)) {
392                         i -= tx_ring->count;
393                         tx_buf = tx_ring->tx_bi;
394                         tx_desc = I40E_TX_DESC(tx_ring, 0);
395                 }
396
397                 /* update budget accounting */
398                 budget--;
399         } while (likely(budget));
400
401         i += tx_ring->count;
402         tx_ring->next_to_clean = i;
403         u64_stats_update_begin(&tx_ring->syncp);
404         tx_ring->stats.bytes += total_bytes;
405         tx_ring->stats.packets += total_packets;
406         u64_stats_update_end(&tx_ring->syncp);
407         tx_ring->q_vector->tx.total_bytes += total_bytes;
408         tx_ring->q_vector->tx.total_packets += total_packets;
409
410         if (check_for_tx_hang(tx_ring) && i40e_check_tx_hang(tx_ring)) {
411                 /* schedule immediate reset if we believe we hung */
412                 dev_info(tx_ring->dev, "Detected Tx Unit Hang\n"
413                          "  VSI                  <%d>\n"
414                          "  Tx Queue             <%d>\n"
415                          "  next_to_use          <%x>\n"
416                          "  next_to_clean        <%x>\n",
417                          tx_ring->vsi->seid,
418                          tx_ring->queue_index,
419                          tx_ring->next_to_use, i);
420                 dev_info(tx_ring->dev, "tx_bi[next_to_clean]\n"
421                          "  time_stamp           <%lx>\n"
422                          "  jiffies              <%lx>\n",
423                          tx_ring->tx_bi[i].time_stamp, jiffies);
424
425                 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
426
427                 dev_info(tx_ring->dev,
428                          "tx hang detected on queue %d, resetting adapter\n",
429                          tx_ring->queue_index);
430
431                 tx_ring->netdev->netdev_ops->ndo_tx_timeout(tx_ring->netdev);
432
433                 /* the adapter is about to reset, no point in enabling stuff */
434                 return true;
435         }
436
437         netdev_tx_completed_queue(netdev_get_tx_queue(tx_ring->netdev,
438                                                       tx_ring->queue_index),
439                                   total_packets, total_bytes);
440
441 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
442         if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
443                      (I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
444                 /* Make sure that anybody stopping the queue after this
445                  * sees the new next_to_clean.
446                  */
447                 smp_mb();
448                 if (__netif_subqueue_stopped(tx_ring->netdev,
449                                              tx_ring->queue_index) &&
450                    !test_bit(__I40E_DOWN, &tx_ring->vsi->state)) {
451                         netif_wake_subqueue(tx_ring->netdev,
452                                             tx_ring->queue_index);
453                         ++tx_ring->tx_stats.restart_queue;
454                 }
455         }
456
457         return budget > 0;
458 }
459
460 /**
461  * i40e_set_new_dynamic_itr - Find new ITR level
462  * @rc: structure containing ring performance data
463  *
464  * Stores a new ITR value based on packets and byte counts during
465  * the last interrupt.  The advantage of per interrupt computation
466  * is faster updates and more accurate ITR for the current traffic
467  * pattern.  Constants in this function were computed based on
468  * theoretical maximum wire speed and thresholds were set based on
469  * testing data as well as attempting to minimize response time
470  * while increasing bulk throughput.
471  **/
472 static void i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
473 {
474         enum i40e_latency_range new_latency_range = rc->latency_range;
475         u32 new_itr = rc->itr;
476         int bytes_per_int;
477
478         if (rc->total_packets == 0 || !rc->itr)
479                 return;
480
481         /* simple throttlerate management
482          *   0-10MB/s   lowest (100000 ints/s)
483          *  10-20MB/s   low    (20000 ints/s)
484          *  20-1249MB/s bulk   (8000 ints/s)
485          */
486         bytes_per_int = rc->total_bytes / rc->itr;
487         switch (rc->itr) {
488         case I40E_LOWEST_LATENCY:
489                 if (bytes_per_int > 10)
490                         new_latency_range = I40E_LOW_LATENCY;
491                 break;
492         case I40E_LOW_LATENCY:
493                 if (bytes_per_int > 20)
494                         new_latency_range = I40E_BULK_LATENCY;
495                 else if (bytes_per_int <= 10)
496                         new_latency_range = I40E_LOWEST_LATENCY;
497                 break;
498         case I40E_BULK_LATENCY:
499                 if (bytes_per_int <= 20)
500                         rc->latency_range = I40E_LOW_LATENCY;
501                 break;
502         }
503
504         switch (new_latency_range) {
505         case I40E_LOWEST_LATENCY:
506                 new_itr = I40E_ITR_100K;
507                 break;
508         case I40E_LOW_LATENCY:
509                 new_itr = I40E_ITR_20K;
510                 break;
511         case I40E_BULK_LATENCY:
512                 new_itr = I40E_ITR_8K;
513                 break;
514         default:
515                 break;
516         }
517
518         if (new_itr != rc->itr) {
519                 /* do an exponential smoothing */
520                 new_itr = (10 * new_itr * rc->itr) /
521                           ((9 * new_itr) + rc->itr);
522                 rc->itr = new_itr & I40E_MAX_ITR;
523         }
524
525         rc->total_bytes = 0;
526         rc->total_packets = 0;
527 }
528
529 /**
530  * i40e_update_dynamic_itr - Adjust ITR based on bytes per int
531  * @q_vector: the vector to adjust
532  **/
533 static void i40e_update_dynamic_itr(struct i40e_q_vector *q_vector)
534 {
535         u16 vector = q_vector->vsi->base_vector + q_vector->v_idx;
536         struct i40e_hw *hw = &q_vector->vsi->back->hw;
537         u32 reg_addr;
538         u16 old_itr;
539
540         reg_addr = I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1);
541         old_itr = q_vector->rx.itr;
542         i40e_set_new_dynamic_itr(&q_vector->rx);
543         if (old_itr != q_vector->rx.itr)
544                 wr32(hw, reg_addr, q_vector->rx.itr);
545
546         reg_addr = I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1);
547         old_itr = q_vector->tx.itr;
548         i40e_set_new_dynamic_itr(&q_vector->tx);
549         if (old_itr != q_vector->tx.itr)
550                 wr32(hw, reg_addr, q_vector->tx.itr);
551 }
552
553 /**
554  * i40e_clean_programming_status - clean the programming status descriptor
555  * @rx_ring: the rx ring that has this descriptor
556  * @rx_desc: the rx descriptor written back by HW
557  *
558  * Flow director should handle FD_FILTER_STATUS to check its filter programming
559  * status being successful or not and take actions accordingly. FCoE should
560  * handle its context/filter programming/invalidation status and take actions.
561  *
562  **/
563 static void i40e_clean_programming_status(struct i40e_ring *rx_ring,
564                                           union i40e_rx_desc *rx_desc)
565 {
566         u64 qw;
567         u8 id;
568
569         qw = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
570         id = (qw & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >>
571                   I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT;
572
573         if (id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS)
574                 i40e_fd_handle_status(rx_ring, qw, id);
575 }
576
577 /**
578  * i40e_setup_tx_descriptors - Allocate the Tx descriptors
579  * @tx_ring: the tx ring to set up
580  *
581  * Return 0 on success, negative on error
582  **/
583 int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring)
584 {
585         struct device *dev = tx_ring->dev;
586         int bi_size;
587
588         if (!dev)
589                 return -ENOMEM;
590
591         bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
592         tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL);
593         if (!tx_ring->tx_bi)
594                 goto err;
595
596         /* round up to nearest 4K */
597         tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc);
598         tx_ring->size = ALIGN(tx_ring->size, 4096);
599         tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
600                                            &tx_ring->dma, GFP_KERNEL);
601         if (!tx_ring->desc) {
602                 dev_info(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
603                          tx_ring->size);
604                 goto err;
605         }
606
607         tx_ring->next_to_use = 0;
608         tx_ring->next_to_clean = 0;
609         return 0;
610
611 err:
612         kfree(tx_ring->tx_bi);
613         tx_ring->tx_bi = NULL;
614         return -ENOMEM;
615 }
616
617 /**
618  * i40e_clean_rx_ring - Free Rx buffers
619  * @rx_ring: ring to be cleaned
620  **/
621 void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
622 {
623         struct device *dev = rx_ring->dev;
624         struct i40e_rx_buffer *rx_bi;
625         unsigned long bi_size;
626         u16 i;
627
628         /* ring already cleared, nothing to do */
629         if (!rx_ring->rx_bi)
630                 return;
631
632         /* Free all the Rx ring sk_buffs */
633         for (i = 0; i < rx_ring->count; i++) {
634                 rx_bi = &rx_ring->rx_bi[i];
635                 if (rx_bi->dma) {
636                         dma_unmap_single(dev,
637                                          rx_bi->dma,
638                                          rx_ring->rx_buf_len,
639                                          DMA_FROM_DEVICE);
640                         rx_bi->dma = 0;
641                 }
642                 if (rx_bi->skb) {
643                         dev_kfree_skb(rx_bi->skb);
644                         rx_bi->skb = NULL;
645                 }
646                 if (rx_bi->page) {
647                         if (rx_bi->page_dma) {
648                                 dma_unmap_page(dev,
649                                                rx_bi->page_dma,
650                                                PAGE_SIZE / 2,
651                                                DMA_FROM_DEVICE);
652                                 rx_bi->page_dma = 0;
653                         }
654                         __free_page(rx_bi->page);
655                         rx_bi->page = NULL;
656                         rx_bi->page_offset = 0;
657                 }
658         }
659
660         bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
661         memset(rx_ring->rx_bi, 0, bi_size);
662
663         /* Zero out the descriptor ring */
664         memset(rx_ring->desc, 0, rx_ring->size);
665
666         rx_ring->next_to_clean = 0;
667         rx_ring->next_to_use = 0;
668 }
669
670 /**
671  * i40e_free_rx_resources - Free Rx resources
672  * @rx_ring: ring to clean the resources from
673  *
674  * Free all receive software resources
675  **/
676 void i40e_free_rx_resources(struct i40e_ring *rx_ring)
677 {
678         i40e_clean_rx_ring(rx_ring);
679         kfree(rx_ring->rx_bi);
680         rx_ring->rx_bi = NULL;
681
682         if (rx_ring->desc) {
683                 dma_free_coherent(rx_ring->dev, rx_ring->size,
684                                   rx_ring->desc, rx_ring->dma);
685                 rx_ring->desc = NULL;
686         }
687 }
688
689 /**
690  * i40e_setup_rx_descriptors - Allocate Rx descriptors
691  * @rx_ring: Rx descriptor ring (for a specific queue) to setup
692  *
693  * Returns 0 on success, negative on failure
694  **/
695 int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
696 {
697         struct device *dev = rx_ring->dev;
698         int bi_size;
699
700         bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
701         rx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL);
702         if (!rx_ring->rx_bi)
703                 goto err;
704
705         /* Round up to nearest 4K */
706         rx_ring->size = ring_is_16byte_desc_enabled(rx_ring)
707                 ? rx_ring->count * sizeof(union i40e_16byte_rx_desc)
708                 : rx_ring->count * sizeof(union i40e_32byte_rx_desc);
709         rx_ring->size = ALIGN(rx_ring->size, 4096);
710         rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
711                                            &rx_ring->dma, GFP_KERNEL);
712
713         if (!rx_ring->desc) {
714                 dev_info(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
715                          rx_ring->size);
716                 goto err;
717         }
718
719         rx_ring->next_to_clean = 0;
720         rx_ring->next_to_use = 0;
721
722         return 0;
723 err:
724         kfree(rx_ring->rx_bi);
725         rx_ring->rx_bi = NULL;
726         return -ENOMEM;
727 }
728
729 /**
730  * i40e_release_rx_desc - Store the new tail and head values
731  * @rx_ring: ring to bump
732  * @val: new head index
733  **/
734 static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
735 {
736         rx_ring->next_to_use = val;
737         /* Force memory writes to complete before letting h/w
738          * know there are new descriptors to fetch.  (Only
739          * applicable for weak-ordered memory model archs,
740          * such as IA-64).
741          */
742         wmb();
743         writel(val, rx_ring->tail);
744 }
745
746 /**
747  * i40e_alloc_rx_buffers - Replace used receive buffers; packet split
748  * @rx_ring: ring to place buffers on
749  * @cleaned_count: number of buffers to replace
750  **/
751 void i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
752 {
753         u16 i = rx_ring->next_to_use;
754         union i40e_rx_desc *rx_desc;
755         struct i40e_rx_buffer *bi;
756         struct sk_buff *skb;
757
758         /* do nothing if no valid netdev defined */
759         if (!rx_ring->netdev || !cleaned_count)
760                 return;
761
762         while (cleaned_count--) {
763                 rx_desc = I40E_RX_DESC(rx_ring, i);
764                 bi = &rx_ring->rx_bi[i];
765                 skb = bi->skb;
766
767                 if (!skb) {
768                         skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
769                                                         rx_ring->rx_buf_len);
770                         if (!skb) {
771                                 rx_ring->rx_stats.alloc_rx_buff_failed++;
772                                 goto no_buffers;
773                         }
774                         /* initialize queue mapping */
775                         skb_record_rx_queue(skb, rx_ring->queue_index);
776                         bi->skb = skb;
777                 }
778
779                 if (!bi->dma) {
780                         bi->dma = dma_map_single(rx_ring->dev,
781                                                  skb->data,
782                                                  rx_ring->rx_buf_len,
783                                                  DMA_FROM_DEVICE);
784                         if (dma_mapping_error(rx_ring->dev, bi->dma)) {
785                                 rx_ring->rx_stats.alloc_rx_buff_failed++;
786                                 bi->dma = 0;
787                                 goto no_buffers;
788                         }
789                 }
790
791                 if (ring_is_ps_enabled(rx_ring)) {
792                         if (!bi->page) {
793                                 bi->page = alloc_page(GFP_ATOMIC);
794                                 if (!bi->page) {
795                                         rx_ring->rx_stats.alloc_rx_page_failed++;
796                                         goto no_buffers;
797                                 }
798                         }
799
800                         if (!bi->page_dma) {
801                                 /* use a half page if we're re-using */
802                                 bi->page_offset ^= PAGE_SIZE / 2;
803                                 bi->page_dma = dma_map_page(rx_ring->dev,
804                                                             bi->page,
805                                                             bi->page_offset,
806                                                             PAGE_SIZE / 2,
807                                                             DMA_FROM_DEVICE);
808                                 if (dma_mapping_error(rx_ring->dev,
809                                                       bi->page_dma)) {
810                                         rx_ring->rx_stats.alloc_rx_page_failed++;
811                                         bi->page_dma = 0;
812                                         goto no_buffers;
813                                 }
814                         }
815
816                         /* Refresh the desc even if buffer_addrs didn't change
817                          * because each write-back erases this info.
818                          */
819                         rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
820                         rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
821                 } else {
822                         rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
823                         rx_desc->read.hdr_addr = 0;
824                 }
825                 i++;
826                 if (i == rx_ring->count)
827                         i = 0;
828         }
829
830 no_buffers:
831         if (rx_ring->next_to_use != i)
832                 i40e_release_rx_desc(rx_ring, i);
833 }
834
835 /**
836  * i40e_receive_skb - Send a completed packet up the stack
837  * @rx_ring:  rx ring in play
838  * @skb: packet to send up
839  * @vlan_tag: vlan tag for packet
840  **/
841 static void i40e_receive_skb(struct i40e_ring *rx_ring,
842                              struct sk_buff *skb, u16 vlan_tag)
843 {
844         struct i40e_q_vector *q_vector = rx_ring->q_vector;
845         struct i40e_vsi *vsi = rx_ring->vsi;
846         u64 flags = vsi->back->flags;
847
848         if (vlan_tag & VLAN_VID_MASK)
849                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
850
851         if (flags & I40E_FLAG_IN_NETPOLL)
852                 netif_rx(skb);
853         else
854                 napi_gro_receive(&q_vector->napi, skb);
855 }
856
857 /**
858  * i40e_rx_checksum - Indicate in skb if hw indicated a good cksum
859  * @vsi: the VSI we care about
860  * @skb: skb currently being received and modified
861  * @rx_status: status value of last descriptor in packet
862  * @rx_error: error value of last descriptor in packet
863  * @rx_ptype: ptype value of last descriptor in packet
864  **/
865 static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
866                                     struct sk_buff *skb,
867                                     u32 rx_status,
868                                     u32 rx_error,
869                                     u16 rx_ptype)
870 {
871         bool ipv4_tunnel, ipv6_tunnel;
872         __wsum rx_udp_csum;
873         __sum16 csum;
874         struct iphdr *iph;
875
876         ipv4_tunnel = (rx_ptype > I40E_RX_PTYPE_GRENAT4_MAC_PAY3) &&
877                       (rx_ptype < I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4);
878         ipv6_tunnel = (rx_ptype > I40E_RX_PTYPE_GRENAT6_MAC_PAY3) &&
879                       (rx_ptype < I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4);
880
881         skb->encapsulation = ipv4_tunnel || ipv6_tunnel;
882         skb->ip_summed = CHECKSUM_NONE;
883
884         /* Rx csum enabled and ip headers found? */
885         if (!(vsi->netdev->features & NETIF_F_RXCSUM &&
886               rx_status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
887                 return;
888
889         /* IP or L4 or outmost IP checksum error */
890         if (rx_error & ((1 << I40E_RX_DESC_ERROR_IPE_SHIFT) |
891                         (1 << I40E_RX_DESC_ERROR_L4E_SHIFT) |
892                         (1 << I40E_RX_DESC_ERROR_EIPE_SHIFT))) {
893                 vsi->back->hw_csum_rx_error++;
894                 return;
895         }
896
897         if (ipv4_tunnel &&
898             !(rx_status & (1 << I40E_RX_DESC_STATUS_UDP_0_SHIFT))) {
899                 /* If VXLAN traffic has an outer UDPv4 checksum we need to check
900                  * it in the driver, hardware does not do it for us.
901                  * Since L3L4P bit was set we assume a valid IHL value (>=5)
902                  * so the total length of IPv4 header is IHL*4 bytes
903                  */
904                 skb->transport_header = skb->mac_header +
905                                         sizeof(struct ethhdr) +
906                                         (ip_hdr(skb)->ihl * 4);
907
908                 /* Add 4 bytes for VLAN tagged packets */
909                 skb->transport_header += (skb->protocol == htons(ETH_P_8021Q) ||
910                                           skb->protocol == htons(ETH_P_8021AD))
911                                           ? VLAN_HLEN : 0;
912
913                 rx_udp_csum = udp_csum(skb);
914                 iph = ip_hdr(skb);
915                 csum = csum_tcpudp_magic(
916                                 iph->saddr, iph->daddr,
917                                 (skb->len - skb_transport_offset(skb)),
918                                 IPPROTO_UDP, rx_udp_csum);
919
920                 if (udp_hdr(skb)->check != csum) {
921                         vsi->back->hw_csum_rx_error++;
922                         return;
923                 }
924         }
925
926         skb->ip_summed = CHECKSUM_UNNECESSARY;
927 }
928
929 /**
930  * i40e_rx_hash - returns the hash value from the Rx descriptor
931  * @ring: descriptor ring
932  * @rx_desc: specific descriptor
933  **/
934 static inline u32 i40e_rx_hash(struct i40e_ring *ring,
935                                union i40e_rx_desc *rx_desc)
936 {
937         const __le64 rss_mask =
938                 cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH <<
939                             I40E_RX_DESC_STATUS_FLTSTAT_SHIFT);
940
941         if ((ring->netdev->features & NETIF_F_RXHASH) &&
942             (rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask)
943                 return le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
944         else
945                 return 0;
946 }
947
948 /**
949  * i40e_clean_rx_irq - Reclaim resources after receive completes
950  * @rx_ring:  rx ring to clean
951  * @budget:   how many cleans we're allowed
952  *
953  * Returns true if there's any budget left (e.g. the clean is finished)
954  **/
955 static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
956 {
957         unsigned int total_rx_bytes = 0, total_rx_packets = 0;
958         u16 rx_packet_len, rx_header_len, rx_sph, rx_hbo;
959         u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
960         const int current_node = numa_node_id();
961         struct i40e_vsi *vsi = rx_ring->vsi;
962         u16 i = rx_ring->next_to_clean;
963         union i40e_rx_desc *rx_desc;
964         u32 rx_error, rx_status;
965         u64 qword;
966         u16 rx_ptype;
967
968         rx_desc = I40E_RX_DESC(rx_ring, i);
969         qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
970         rx_status = (qword & I40E_RXD_QW1_STATUS_MASK)
971                                 >> I40E_RXD_QW1_STATUS_SHIFT;
972
973         while (rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)) {
974                 union i40e_rx_desc *next_rxd;
975                 struct i40e_rx_buffer *rx_bi;
976                 struct sk_buff *skb;
977                 u16 vlan_tag;
978                 if (i40e_rx_is_programming_status(qword)) {
979                         i40e_clean_programming_status(rx_ring, rx_desc);
980                         I40E_RX_NEXT_DESC_PREFETCH(rx_ring, i, next_rxd);
981                         goto next_desc;
982                 }
983                 rx_bi = &rx_ring->rx_bi[i];
984                 skb = rx_bi->skb;
985                 prefetch(skb->data);
986
987                 rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK)
988                                               >> I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
989                 rx_header_len = (qword & I40E_RXD_QW1_LENGTH_HBUF_MASK)
990                                               >> I40E_RXD_QW1_LENGTH_HBUF_SHIFT;
991                 rx_sph = (qword & I40E_RXD_QW1_LENGTH_SPH_MASK)
992                                               >> I40E_RXD_QW1_LENGTH_SPH_SHIFT;
993
994                 rx_error = (qword & I40E_RXD_QW1_ERROR_MASK)
995                                               >> I40E_RXD_QW1_ERROR_SHIFT;
996                 rx_hbo = rx_error & (1 << I40E_RX_DESC_ERROR_HBO_SHIFT);
997                 rx_error &= ~(1 << I40E_RX_DESC_ERROR_HBO_SHIFT);
998
999                 rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
1000                            I40E_RXD_QW1_PTYPE_SHIFT;
1001                 rx_bi->skb = NULL;
1002
1003                 /* This memory barrier is needed to keep us from reading
1004                  * any other fields out of the rx_desc until we know the
1005                  * STATUS_DD bit is set
1006                  */
1007                 rmb();
1008
1009                 /* Get the header and possibly the whole packet
1010                  * If this is an skb from previous receive dma will be 0
1011                  */
1012                 if (rx_bi->dma) {
1013                         u16 len;
1014
1015                         if (rx_hbo)
1016                                 len = I40E_RX_HDR_SIZE;
1017                         else if (rx_sph)
1018                                 len = rx_header_len;
1019                         else if (rx_packet_len)
1020                                 len = rx_packet_len;   /* 1buf/no split found */
1021                         else
1022                                 len = rx_header_len;   /* split always mode */
1023
1024                         skb_put(skb, len);
1025                         dma_unmap_single(rx_ring->dev,
1026                                          rx_bi->dma,
1027                                          rx_ring->rx_buf_len,
1028                                          DMA_FROM_DEVICE);
1029                         rx_bi->dma = 0;
1030                 }
1031
1032                 /* Get the rest of the data if this was a header split */
1033                 if (ring_is_ps_enabled(rx_ring) && rx_packet_len) {
1034
1035                         skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
1036                                            rx_bi->page,
1037                                            rx_bi->page_offset,
1038                                            rx_packet_len);
1039
1040                         skb->len += rx_packet_len;
1041                         skb->data_len += rx_packet_len;
1042                         skb->truesize += rx_packet_len;
1043
1044                         if ((page_count(rx_bi->page) == 1) &&
1045                             (page_to_nid(rx_bi->page) == current_node))
1046                                 get_page(rx_bi->page);
1047                         else
1048                                 rx_bi->page = NULL;
1049
1050                         dma_unmap_page(rx_ring->dev,
1051                                        rx_bi->page_dma,
1052                                        PAGE_SIZE / 2,
1053                                        DMA_FROM_DEVICE);
1054                         rx_bi->page_dma = 0;
1055                 }
1056                 I40E_RX_NEXT_DESC_PREFETCH(rx_ring, i, next_rxd);
1057
1058                 if (unlikely(
1059                     !(rx_status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
1060                         struct i40e_rx_buffer *next_buffer;
1061
1062                         next_buffer = &rx_ring->rx_bi[i];
1063
1064                         if (ring_is_ps_enabled(rx_ring)) {
1065                                 rx_bi->skb = next_buffer->skb;
1066                                 rx_bi->dma = next_buffer->dma;
1067                                 next_buffer->skb = skb;
1068                                 next_buffer->dma = 0;
1069                         }
1070                         rx_ring->rx_stats.non_eop_descs++;
1071                         goto next_desc;
1072                 }
1073
1074                 /* ERR_MASK will only have valid bits if EOP set */
1075                 if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) {
1076                         dev_kfree_skb_any(skb);
1077                         goto next_desc;
1078                 }
1079
1080                 skb->rxhash = i40e_rx_hash(rx_ring, rx_desc);
1081                 /* probably a little skewed due to removing CRC */
1082                 total_rx_bytes += skb->len;
1083                 total_rx_packets++;
1084
1085                 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
1086
1087                 i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
1088
1089                 vlan_tag = rx_status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
1090                          ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
1091                          : 0;
1092                 i40e_receive_skb(rx_ring, skb, vlan_tag);
1093
1094                 rx_ring->netdev->last_rx = jiffies;
1095                 budget--;
1096 next_desc:
1097                 rx_desc->wb.qword1.status_error_len = 0;
1098                 if (!budget)
1099                         break;
1100
1101                 cleaned_count++;
1102                 /* return some buffers to hardware, one at a time is too slow */
1103                 if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
1104                         i40e_alloc_rx_buffers(rx_ring, cleaned_count);
1105                         cleaned_count = 0;
1106                 }
1107
1108                 /* use prefetched values */
1109                 rx_desc = next_rxd;
1110                 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1111                 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK)
1112                                                 >> I40E_RXD_QW1_STATUS_SHIFT;
1113         }
1114
1115         rx_ring->next_to_clean = i;
1116         u64_stats_update_begin(&rx_ring->syncp);
1117         rx_ring->stats.packets += total_rx_packets;
1118         rx_ring->stats.bytes += total_rx_bytes;
1119         u64_stats_update_end(&rx_ring->syncp);
1120         rx_ring->q_vector->rx.total_packets += total_rx_packets;
1121         rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
1122
1123         if (cleaned_count)
1124                 i40e_alloc_rx_buffers(rx_ring, cleaned_count);
1125
1126         return budget > 0;
1127 }
1128
1129 /**
1130  * i40e_napi_poll - NAPI polling Rx/Tx cleanup routine
1131  * @napi: napi struct with our devices info in it
1132  * @budget: amount of work driver is allowed to do this pass, in packets
1133  *
1134  * This function will clean all queues associated with a q_vector.
1135  *
1136  * Returns the amount of work done
1137  **/
1138 int i40e_napi_poll(struct napi_struct *napi, int budget)
1139 {
1140         struct i40e_q_vector *q_vector =
1141                                container_of(napi, struct i40e_q_vector, napi);
1142         struct i40e_vsi *vsi = q_vector->vsi;
1143         struct i40e_ring *ring;
1144         bool clean_complete = true;
1145         int budget_per_ring;
1146
1147         if (test_bit(__I40E_DOWN, &vsi->state)) {
1148                 napi_complete(napi);
1149                 return 0;
1150         }
1151
1152         /* Since the actual Tx work is minimal, we can give the Tx a larger
1153          * budget and be more aggressive about cleaning up the Tx descriptors.
1154          */
1155         i40e_for_each_ring(ring, q_vector->tx)
1156                 clean_complete &= i40e_clean_tx_irq(ring, vsi->work_limit);
1157
1158         /* We attempt to distribute budget to each Rx queue fairly, but don't
1159          * allow the budget to go below 1 because that would exit polling early.
1160          */
1161         budget_per_ring = max(budget/q_vector->num_ringpairs, 1);
1162
1163         i40e_for_each_ring(ring, q_vector->rx)
1164                 clean_complete &= i40e_clean_rx_irq(ring, budget_per_ring);
1165
1166         /* If work not completed, return budget and polling will return */
1167         if (!clean_complete)
1168                 return budget;
1169
1170         /* Work is done so exit the polling mode and re-enable the interrupt */
1171         napi_complete(napi);
1172         if (ITR_IS_DYNAMIC(vsi->rx_itr_setting) ||
1173             ITR_IS_DYNAMIC(vsi->tx_itr_setting))
1174                 i40e_update_dynamic_itr(q_vector);
1175
1176         if (!test_bit(__I40E_DOWN, &vsi->state)) {
1177                 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
1178                         i40e_irq_dynamic_enable(vsi,
1179                                         q_vector->v_idx + vsi->base_vector);
1180                 } else {
1181                         struct i40e_hw *hw = &vsi->back->hw;
1182                         /* We re-enable the queue 0 cause, but
1183                          * don't worry about dynamic_enable
1184                          * because we left it on for the other
1185                          * possible interrupts during napi
1186                          */
1187                         u32 qval = rd32(hw, I40E_QINT_RQCTL(0));
1188                         qval |= I40E_QINT_RQCTL_CAUSE_ENA_MASK;
1189                         wr32(hw, I40E_QINT_RQCTL(0), qval);
1190
1191                         qval = rd32(hw, I40E_QINT_TQCTL(0));
1192                         qval |= I40E_QINT_TQCTL_CAUSE_ENA_MASK;
1193                         wr32(hw, I40E_QINT_TQCTL(0), qval);
1194
1195                         i40e_irq_dynamic_enable_icr0(vsi->back);
1196                 }
1197         }
1198
1199         return 0;
1200 }
1201
1202 /**
1203  * i40e_atr - Add a Flow Director ATR filter
1204  * @tx_ring:  ring to add programming descriptor to
1205  * @skb:      send buffer
1206  * @flags:    send flags
1207  * @protocol: wire protocol
1208  **/
1209 static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
1210                      u32 flags, __be16 protocol)
1211 {
1212         struct i40e_filter_program_desc *fdir_desc;
1213         struct i40e_pf *pf = tx_ring->vsi->back;
1214         union {
1215                 unsigned char *network;
1216                 struct iphdr *ipv4;
1217                 struct ipv6hdr *ipv6;
1218         } hdr;
1219         struct tcphdr *th;
1220         unsigned int hlen;
1221         u32 flex_ptype, dtype_cmd;
1222         u16 i;
1223
1224         /* make sure ATR is enabled */
1225         if (!(pf->flags & I40E_FLAG_FDIR_ATR_ENABLED))
1226                 return;
1227
1228         /* if sampling is disabled do nothing */
1229         if (!tx_ring->atr_sample_rate)
1230                 return;
1231
1232         tx_ring->atr_count++;
1233
1234         /* snag network header to get L4 type and address */
1235         hdr.network = skb_network_header(skb);
1236
1237         /* Currently only IPv4/IPv6 with TCP is supported */
1238         if (protocol == htons(ETH_P_IP)) {
1239                 if (hdr.ipv4->protocol != IPPROTO_TCP)
1240                         return;
1241
1242                 /* access ihl as a u8 to avoid unaligned access on ia64 */
1243                 hlen = (hdr.network[0] & 0x0F) << 2;
1244         } else if (protocol == htons(ETH_P_IPV6)) {
1245                 if (hdr.ipv6->nexthdr != IPPROTO_TCP)
1246                         return;
1247
1248                 hlen = sizeof(struct ipv6hdr);
1249         } else {
1250                 return;
1251         }
1252
1253         th = (struct tcphdr *)(hdr.network + hlen);
1254
1255         /* sample on all syn/fin packets or once every atr sample rate */
1256         if (!th->fin && !th->syn && (tx_ring->atr_count < tx_ring->atr_sample_rate))
1257                 return;
1258
1259         tx_ring->atr_count = 0;
1260
1261         /* grab the next descriptor */
1262         i = tx_ring->next_to_use;
1263         fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
1264
1265         i++;
1266         tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
1267
1268         flex_ptype = (tx_ring->queue_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
1269                       I40E_TXD_FLTR_QW0_QINDEX_MASK;
1270         flex_ptype |= (protocol == htons(ETH_P_IP)) ?
1271                       (I40E_FILTER_PCTYPE_NONF_IPV4_TCP <<
1272                        I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) :
1273                       (I40E_FILTER_PCTYPE_NONF_IPV6_TCP <<
1274                        I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
1275
1276         flex_ptype |= tx_ring->vsi->id << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
1277
1278         dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG;
1279
1280         dtype_cmd |= th->fin ?
1281                      (I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
1282                       I40E_TXD_FLTR_QW1_PCMD_SHIFT) :
1283                      (I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
1284                       I40E_TXD_FLTR_QW1_PCMD_SHIFT);
1285
1286         dtype_cmd |= I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX <<
1287                      I40E_TXD_FLTR_QW1_DEST_SHIFT;
1288
1289         dtype_cmd |= I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID <<
1290                      I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT;
1291
1292         fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
1293         fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd);
1294 }
1295
1296 /**
1297  * i40e_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
1298  * @skb:     send buffer
1299  * @tx_ring: ring to send buffer on
1300  * @flags:   the tx flags to be set
1301  *
1302  * Checks the skb and set up correspondingly several generic transmit flags
1303  * related to VLAN tagging for the HW, such as VLAN, DCB, etc.
1304  *
1305  * Returns error code indicate the frame should be dropped upon error and the
1306  * otherwise  returns 0 to indicate the flags has been set properly.
1307  **/
1308 static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
1309                                       struct i40e_ring *tx_ring,
1310                                       u32 *flags)
1311 {
1312         __be16 protocol = skb->protocol;
1313         u32  tx_flags = 0;
1314
1315         /* if we have a HW VLAN tag being added, default to the HW one */
1316         if (vlan_tx_tag_present(skb)) {
1317                 tx_flags |= vlan_tx_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT;
1318                 tx_flags |= I40E_TX_FLAGS_HW_VLAN;
1319         /* else if it is a SW VLAN, check the next protocol and store the tag */
1320         } else if (protocol == htons(ETH_P_8021Q)) {
1321                 struct vlan_hdr *vhdr, _vhdr;
1322                 vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
1323                 if (!vhdr)
1324                         return -EINVAL;
1325
1326                 protocol = vhdr->h_vlan_encapsulated_proto;
1327                 tx_flags |= ntohs(vhdr->h_vlan_TCI) << I40E_TX_FLAGS_VLAN_SHIFT;
1328                 tx_flags |= I40E_TX_FLAGS_SW_VLAN;
1329         }
1330
1331         /* Insert 802.1p priority into VLAN header */
1332         if ((tx_ring->vsi->back->flags & I40E_FLAG_DCB_ENABLED) &&
1333             ((tx_flags & (I40E_TX_FLAGS_HW_VLAN | I40E_TX_FLAGS_SW_VLAN)) ||
1334              (skb->priority != TC_PRIO_CONTROL))) {
1335                 tx_flags &= ~I40E_TX_FLAGS_VLAN_PRIO_MASK;
1336                 tx_flags |= (skb->priority & 0x7) <<
1337                                 I40E_TX_FLAGS_VLAN_PRIO_SHIFT;
1338                 if (tx_flags & I40E_TX_FLAGS_SW_VLAN) {
1339                         struct vlan_ethhdr *vhdr;
1340                         if (skb_header_cloned(skb) &&
1341                             pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
1342                                 return -ENOMEM;
1343                         vhdr = (struct vlan_ethhdr *)skb->data;
1344                         vhdr->h_vlan_TCI = htons(tx_flags >>
1345                                                  I40E_TX_FLAGS_VLAN_SHIFT);
1346                 } else {
1347                         tx_flags |= I40E_TX_FLAGS_HW_VLAN;
1348                 }
1349         }
1350         *flags = tx_flags;
1351         return 0;
1352 }
1353
1354 /**
1355  * i40e_tso - set up the tso context descriptor
1356  * @tx_ring:  ptr to the ring to send
1357  * @skb:      ptr to the skb we're sending
1358  * @tx_flags: the collected send information
1359  * @protocol: the send protocol
1360  * @hdr_len:  ptr to the size of the packet header
1361  * @cd_tunneling: ptr to context descriptor bits
1362  *
1363  * Returns 0 if no TSO can happen, 1 if tso is going, or error
1364  **/
1365 static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
1366                     u32 tx_flags, __be16 protocol, u8 *hdr_len,
1367                     u64 *cd_type_cmd_tso_mss, u32 *cd_tunneling)
1368 {
1369         u32 cd_cmd, cd_tso_len, cd_mss;
1370         struct tcphdr *tcph;
1371         struct iphdr *iph;
1372         u32 l4len;
1373         int err;
1374         struct ipv6hdr *ipv6h;
1375
1376         if (!skb_is_gso(skb))
1377                 return 0;
1378
1379         if (skb_header_cloned(skb)) {
1380                 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1381                 if (err)
1382                         return err;
1383         }
1384
1385         if (protocol == htons(ETH_P_IP)) {
1386                 iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
1387                 tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
1388                 iph->tot_len = 0;
1389                 iph->check = 0;
1390                 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
1391                                                  0, IPPROTO_TCP, 0);
1392         } else if (skb_is_gso_v6(skb)) {
1393
1394                 ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb)
1395                                            : ipv6_hdr(skb);
1396                 tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
1397                 ipv6h->payload_len = 0;
1398                 tcph->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
1399                                                0, IPPROTO_TCP, 0);
1400         }
1401
1402         l4len = skb->encapsulation ? inner_tcp_hdrlen(skb) : tcp_hdrlen(skb);
1403         *hdr_len = (skb->encapsulation
1404                     ? (skb_inner_transport_header(skb) - skb->data)
1405                     : skb_transport_offset(skb)) + l4len;
1406
1407         /* find the field values */
1408         cd_cmd = I40E_TX_CTX_DESC_TSO;
1409         cd_tso_len = skb->len - *hdr_len;
1410         cd_mss = skb_shinfo(skb)->gso_size;
1411         *cd_type_cmd_tso_mss |= ((u64)cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT)
1412                              | ((u64)cd_tso_len
1413                                 << I40E_TXD_CTX_QW1_TSO_LEN_SHIFT)
1414                              | ((u64)cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT);
1415         return 1;
1416 }
1417
1418 /**
1419  * i40e_tx_enable_csum - Enable Tx checksum offloads
1420  * @skb: send buffer
1421  * @tx_flags: Tx flags currently set
1422  * @td_cmd: Tx descriptor command bits to set
1423  * @td_offset: Tx descriptor header offsets to set
1424  * @cd_tunneling: ptr to context desc bits
1425  **/
1426 static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
1427                                 u32 *td_cmd, u32 *td_offset,
1428                                 struct i40e_ring *tx_ring,
1429                                 u32 *cd_tunneling)
1430 {
1431         struct ipv6hdr *this_ipv6_hdr;
1432         unsigned int this_tcp_hdrlen;
1433         struct iphdr *this_ip_hdr;
1434         u32 network_hdr_len;
1435         u8 l4_hdr = 0;
1436
1437         if (skb->encapsulation) {
1438                 network_hdr_len = skb_inner_network_header_len(skb);
1439                 this_ip_hdr = inner_ip_hdr(skb);
1440                 this_ipv6_hdr = inner_ipv6_hdr(skb);
1441                 this_tcp_hdrlen = inner_tcp_hdrlen(skb);
1442
1443                 if (tx_flags & I40E_TX_FLAGS_IPV4) {
1444
1445                         if (tx_flags & I40E_TX_FLAGS_TSO) {
1446                                 *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4;
1447                                 ip_hdr(skb)->check = 0;
1448                         } else {
1449                                 *cd_tunneling |=
1450                                          I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
1451                         }
1452                 } else if (tx_flags & I40E_TX_FLAGS_IPV6) {
1453                         if (tx_flags & I40E_TX_FLAGS_TSO) {
1454                                 *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
1455                                 ip_hdr(skb)->check = 0;
1456                         } else {
1457                                 *cd_tunneling |=
1458                                          I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
1459                         }
1460                 }
1461
1462                 /* Now set the ctx descriptor fields */
1463                 *cd_tunneling |= (skb_network_header_len(skb) >> 2) <<
1464                                         I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT |
1465                                    I40E_TXD_CTX_UDP_TUNNELING            |
1466                                    ((skb_inner_network_offset(skb) -
1467                                         skb_transport_offset(skb)) >> 1) <<
1468                                    I40E_TXD_CTX_QW0_NATLEN_SHIFT;
1469
1470         } else {
1471                 network_hdr_len = skb_network_header_len(skb);
1472                 this_ip_hdr = ip_hdr(skb);
1473                 this_ipv6_hdr = ipv6_hdr(skb);
1474                 this_tcp_hdrlen = tcp_hdrlen(skb);
1475         }
1476
1477         /* Enable IP checksum offloads */
1478         if (tx_flags & I40E_TX_FLAGS_IPV4) {
1479                 l4_hdr = this_ip_hdr->protocol;
1480                 /* the stack computes the IP header already, the only time we
1481                  * need the hardware to recompute it is in the case of TSO.
1482                  */
1483                 if (tx_flags & I40E_TX_FLAGS_TSO) {
1484                         *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4_CSUM;
1485                         this_ip_hdr->check = 0;
1486                 } else {
1487                         *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4;
1488                 }
1489                 /* Now set the td_offset for IP header length */
1490                 *td_offset = (network_hdr_len >> 2) <<
1491                               I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
1492         } else if (tx_flags & I40E_TX_FLAGS_IPV6) {
1493                 l4_hdr = this_ipv6_hdr->nexthdr;
1494                 *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
1495                 /* Now set the td_offset for IP header length */
1496                 *td_offset = (network_hdr_len >> 2) <<
1497                               I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
1498         }
1499         /* words in MACLEN + dwords in IPLEN + dwords in L4Len */
1500         *td_offset |= (skb_network_offset(skb) >> 1) <<
1501                        I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
1502
1503         /* Enable L4 checksum offloads */
1504         switch (l4_hdr) {
1505         case IPPROTO_TCP:
1506                 /* enable checksum offloads */
1507                 *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP;
1508                 *td_offset |= (this_tcp_hdrlen >> 2) <<
1509                                I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
1510                 break;
1511         case IPPROTO_SCTP:
1512                 /* enable SCTP checksum offload */
1513                 *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP;
1514                 *td_offset |= (sizeof(struct sctphdr) >> 2) <<
1515                                I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
1516                 break;
1517         case IPPROTO_UDP:
1518                 /* enable UDP checksum offload */
1519                 *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP;
1520                 *td_offset |= (sizeof(struct udphdr) >> 2) <<
1521                                I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
1522                 break;
1523         default:
1524                 break;
1525         }
1526 }
1527
1528 /**
1529  * i40e_create_tx_ctx Build the Tx context descriptor
1530  * @tx_ring:  ring to create the descriptor on
1531  * @cd_type_cmd_tso_mss: Quad Word 1
1532  * @cd_tunneling: Quad Word 0 - bits 0-31
1533  * @cd_l2tag2: Quad Word 0 - bits 32-63
1534  **/
1535 static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
1536                                const u64 cd_type_cmd_tso_mss,
1537                                const u32 cd_tunneling, const u32 cd_l2tag2)
1538 {
1539         struct i40e_tx_context_desc *context_desc;
1540         int i = tx_ring->next_to_use;
1541
1542         if (!cd_type_cmd_tso_mss && !cd_tunneling && !cd_l2tag2)
1543                 return;
1544
1545         /* grab the next descriptor */
1546         context_desc = I40E_TX_CTXTDESC(tx_ring, i);
1547
1548         i++;
1549         tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
1550
1551         /* cpu_to_le32 and assign to struct fields */
1552         context_desc->tunneling_params = cpu_to_le32(cd_tunneling);
1553         context_desc->l2tag2 = cpu_to_le16(cd_l2tag2);
1554         context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss);
1555 }
1556
1557 /**
1558  * i40e_tx_map - Build the Tx descriptor
1559  * @tx_ring:  ring to send buffer on
1560  * @skb:      send buffer
1561  * @first:    first buffer info buffer to use
1562  * @tx_flags: collected send information
1563  * @hdr_len:  size of the packet header
1564  * @td_cmd:   the command field in the descriptor
1565  * @td_offset: offset for checksum or crc
1566  **/
1567 static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
1568                         struct i40e_tx_buffer *first, u32 tx_flags,
1569                         const u8 hdr_len, u32 td_cmd, u32 td_offset)
1570 {
1571         unsigned int data_len = skb->data_len;
1572         unsigned int size = skb_headlen(skb);
1573         struct skb_frag_struct *frag;
1574         struct i40e_tx_buffer *tx_bi;
1575         struct i40e_tx_desc *tx_desc;
1576         u16 i = tx_ring->next_to_use;
1577         u32 td_tag = 0;
1578         dma_addr_t dma;
1579         u16 gso_segs;
1580
1581         if (tx_flags & I40E_TX_FLAGS_HW_VLAN) {
1582                 td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
1583                 td_tag = (tx_flags & I40E_TX_FLAGS_VLAN_MASK) >>
1584                          I40E_TX_FLAGS_VLAN_SHIFT;
1585         }
1586
1587         if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO))
1588                 gso_segs = skb_shinfo(skb)->gso_segs;
1589         else
1590                 gso_segs = 1;
1591
1592         /* multiply data chunks by size of headers */
1593         first->bytecount = skb->len - hdr_len + (gso_segs * hdr_len);
1594         first->gso_segs = gso_segs;
1595         first->skb = skb;
1596         first->tx_flags = tx_flags;
1597
1598         dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
1599
1600         tx_desc = I40E_TX_DESC(tx_ring, i);
1601         tx_bi = first;
1602
1603         for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
1604                 if (dma_mapping_error(tx_ring->dev, dma))
1605                         goto dma_error;
1606
1607                 /* record length, and DMA address */
1608                 dma_unmap_len_set(tx_bi, len, size);
1609                 dma_unmap_addr_set(tx_bi, dma, dma);
1610
1611                 tx_desc->buffer_addr = cpu_to_le64(dma);
1612
1613                 while (unlikely(size > I40E_MAX_DATA_PER_TXD)) {
1614                         tx_desc->cmd_type_offset_bsz =
1615                                 build_ctob(td_cmd, td_offset,
1616                                            I40E_MAX_DATA_PER_TXD, td_tag);
1617
1618                         tx_desc++;
1619                         i++;
1620                         if (i == tx_ring->count) {
1621                                 tx_desc = I40E_TX_DESC(tx_ring, 0);
1622                                 i = 0;
1623                         }
1624
1625                         dma += I40E_MAX_DATA_PER_TXD;
1626                         size -= I40E_MAX_DATA_PER_TXD;
1627
1628                         tx_desc->buffer_addr = cpu_to_le64(dma);
1629                 }
1630
1631                 if (likely(!data_len))
1632                         break;
1633
1634                 tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset,
1635                                                           size, td_tag);
1636
1637                 tx_desc++;
1638                 i++;
1639                 if (i == tx_ring->count) {
1640                         tx_desc = I40E_TX_DESC(tx_ring, 0);
1641                         i = 0;
1642                 }
1643
1644                 size = skb_frag_size(frag);
1645                 data_len -= size;
1646
1647                 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
1648                                        DMA_TO_DEVICE);
1649
1650                 tx_bi = &tx_ring->tx_bi[i];
1651         }
1652
1653         tx_desc->cmd_type_offset_bsz =
1654                 build_ctob(td_cmd, td_offset, size, td_tag) |
1655                 cpu_to_le64((u64)I40E_TXD_CMD << I40E_TXD_QW1_CMD_SHIFT);
1656
1657         netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev,
1658                                                  tx_ring->queue_index),
1659                              first->bytecount);
1660
1661         /* set the timestamp */
1662         first->time_stamp = jiffies;
1663
1664         /* Force memory writes to complete before letting h/w
1665          * know there are new descriptors to fetch.  (Only
1666          * applicable for weak-ordered memory model archs,
1667          * such as IA-64).
1668          */
1669         wmb();
1670
1671         /* set next_to_watch value indicating a packet is present */
1672         first->next_to_watch = tx_desc;
1673
1674         i++;
1675         if (i == tx_ring->count)
1676                 i = 0;
1677
1678         tx_ring->next_to_use = i;
1679
1680         /* notify HW of packet */
1681         writel(i, tx_ring->tail);
1682
1683         return;
1684
1685 dma_error:
1686         dev_info(tx_ring->dev, "TX DMA map failed\n");
1687
1688         /* clear dma mappings for failed tx_bi map */
1689         for (;;) {
1690                 tx_bi = &tx_ring->tx_bi[i];
1691                 i40e_unmap_and_free_tx_resource(tx_ring, tx_bi);
1692                 if (tx_bi == first)
1693                         break;
1694                 if (i == 0)
1695                         i = tx_ring->count;
1696                 i--;
1697         }
1698
1699         tx_ring->next_to_use = i;
1700 }
1701
1702 /**
1703  * __i40e_maybe_stop_tx - 2nd level check for tx stop conditions
1704  * @tx_ring: the ring to be checked
1705  * @size:    the size buffer we want to assure is available
1706  *
1707  * Returns -EBUSY if a stop is needed, else 0
1708  **/
1709 static inline int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
1710 {
1711         netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
1712         smp_mb();
1713
1714         /* Check again in a case another CPU has just made room available. */
1715         if (likely(I40E_DESC_UNUSED(tx_ring) < size))
1716                 return -EBUSY;
1717
1718         /* A reprieve! - use start_queue because it doesn't call schedule */
1719         netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
1720         ++tx_ring->tx_stats.restart_queue;
1721         return 0;
1722 }
1723
1724 /**
1725  * i40e_maybe_stop_tx - 1st level check for tx stop conditions
1726  * @tx_ring: the ring to be checked
1727  * @size:    the size buffer we want to assure is available
1728  *
1729  * Returns 0 if stop is not needed
1730  **/
1731 static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
1732 {
1733         if (likely(I40E_DESC_UNUSED(tx_ring) >= size))
1734                 return 0;
1735         return __i40e_maybe_stop_tx(tx_ring, size);
1736 }
1737
1738 /**
1739  * i40e_xmit_descriptor_count - calculate number of tx descriptors needed
1740  * @skb:     send buffer
1741  * @tx_ring: ring to send buffer on
1742  *
1743  * Returns number of data descriptors needed for this skb. Returns 0 to indicate
1744  * there is not enough descriptors available in this ring since we need at least
1745  * one descriptor.
1746  **/
1747 static int i40e_xmit_descriptor_count(struct sk_buff *skb,
1748                                       struct i40e_ring *tx_ring)
1749 {
1750 #if PAGE_SIZE > I40E_MAX_DATA_PER_TXD
1751         unsigned int f;
1752 #endif
1753         int count = 0;
1754
1755         /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD,
1756          *       + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD,
1757          *       + 2 desc gap to keep tail from touching head,
1758          *       + 1 desc for context descriptor,
1759          * otherwise try next time
1760          */
1761 #if PAGE_SIZE > I40E_MAX_DATA_PER_TXD
1762         for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
1763                 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
1764 #else
1765         count += skb_shinfo(skb)->nr_frags;
1766 #endif
1767         count += TXD_USE_COUNT(skb_headlen(skb));
1768         if (i40e_maybe_stop_tx(tx_ring, count + 3)) {
1769                 tx_ring->tx_stats.tx_busy++;
1770                 return 0;
1771         }
1772         return count;
1773 }
1774
1775 /**
1776  * i40e_xmit_frame_ring - Sends buffer on Tx ring
1777  * @skb:     send buffer
1778  * @tx_ring: ring to send buffer on
1779  *
1780  * Returns NETDEV_TX_OK if sent, else an error code
1781  **/
1782 static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
1783                                         struct i40e_ring *tx_ring)
1784 {
1785         u64 cd_type_cmd_tso_mss = I40E_TX_DESC_DTYPE_CONTEXT;
1786         u32 cd_tunneling = 0, cd_l2tag2 = 0;
1787         struct i40e_tx_buffer *first;
1788         u32 td_offset = 0;
1789         u32 tx_flags = 0;
1790         __be16 protocol;
1791         u32 td_cmd = 0;
1792         u8 hdr_len = 0;
1793         int tso;
1794         if (0 == i40e_xmit_descriptor_count(skb, tx_ring))
1795                 return NETDEV_TX_BUSY;
1796
1797         /* prepare the xmit flags */
1798         if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
1799                 goto out_drop;
1800
1801         /* obtain protocol of skb */
1802         protocol = skb->protocol;
1803
1804         /* record the location of the first descriptor for this packet */
1805         first = &tx_ring->tx_bi[tx_ring->next_to_use];
1806
1807         /* setup IPv4/IPv6 offloads */
1808         if (protocol == htons(ETH_P_IP))
1809                 tx_flags |= I40E_TX_FLAGS_IPV4;
1810         else if (protocol == htons(ETH_P_IPV6))
1811                 tx_flags |= I40E_TX_FLAGS_IPV6;
1812
1813         tso = i40e_tso(tx_ring, skb, tx_flags, protocol, &hdr_len,
1814                        &cd_type_cmd_tso_mss, &cd_tunneling);
1815
1816         if (tso < 0)
1817                 goto out_drop;
1818         else if (tso)
1819                 tx_flags |= I40E_TX_FLAGS_TSO;
1820
1821         skb_tx_timestamp(skb);
1822
1823         /* always enable CRC insertion offload */
1824         td_cmd |= I40E_TX_DESC_CMD_ICRC;
1825
1826         /* Always offload the checksum, since it's in the data descriptor */
1827         if (skb->ip_summed == CHECKSUM_PARTIAL) {
1828                 tx_flags |= I40E_TX_FLAGS_CSUM;
1829
1830                 i40e_tx_enable_csum(skb, tx_flags, &td_cmd, &td_offset,
1831                                     tx_ring, &cd_tunneling);
1832         }
1833
1834         i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss,
1835                            cd_tunneling, cd_l2tag2);
1836
1837         /* Add Flow Director ATR if it's enabled.
1838          *
1839          * NOTE: this must always be directly before the data descriptor.
1840          */
1841         i40e_atr(tx_ring, skb, tx_flags, protocol);
1842
1843         i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
1844                     td_cmd, td_offset);
1845
1846         i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
1847
1848         return NETDEV_TX_OK;
1849
1850 out_drop:
1851         dev_kfree_skb_any(skb);
1852         return NETDEV_TX_OK;
1853 }
1854
1855 /**
1856  * i40e_lan_xmit_frame - Selects the correct VSI and Tx queue to send buffer
1857  * @skb:    send buffer
1858  * @netdev: network interface device structure
1859  *
1860  * Returns NETDEV_TX_OK if sent, else an error code
1861  **/
1862 netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1863 {
1864         struct i40e_netdev_priv *np = netdev_priv(netdev);
1865         struct i40e_vsi *vsi = np->vsi;
1866         struct i40e_ring *tx_ring = vsi->tx_rings[skb->queue_mapping];
1867
1868         /* hardware can't handle really short frames, hardware padding works
1869          * beyond this point
1870          */
1871         if (unlikely(skb->len < I40E_MIN_TX_LEN)) {
1872                 if (skb_pad(skb, I40E_MIN_TX_LEN - skb->len))
1873                         return NETDEV_TX_OK;
1874                 skb->len = I40E_MIN_TX_LEN;
1875                 skb_set_tail_pointer(skb, I40E_MIN_TX_LEN);
1876         }
1877
1878         return i40e_xmit_frame_ring(skb, tx_ring);
1879 }