]> Pileus Git - ~andy/linux/commitdiff
sfc: Fix DMA unmapping issue with firmware assisted TSO
authorAlexandre Rames <arames@solarflare.com>
Thu, 31 Oct 2013 12:42:32 +0000 (12:42 +0000)
committerBen Hutchings <bhutchings@solarflare.com>
Thu, 31 Oct 2013 20:58:14 +0000 (20:58 +0000)
When using firmware assisted TSO, we use a single DMA mapping for
the linear area of a TSO skb.

We still have to segment the super-packet and insert a descriptor
containing the original headers before each segment of payload, so we
can unmap the linear area only after the last segment is completed.
The unmapping information for the linear area is therefore associated
with the last header descriptor.

We calculate the DMA address to unmap from using the map length and
the invariant that the end of the DMA mapping matches the end of
the data referenced by the last descriptor.  But this invariant is
broken when there is TCP payload in the linear area.

Fix this by adding and using an explicit dma_offset field.

Signed-off-by: Ben Hutchings <bhutchings@solarflare.com>
drivers/net/ethernet/sfc/net_driver.h
drivers/net/ethernet/sfc/tx.c

index aac22a1e85b8e081f5c83723721c637161ba1e46..b14a717ac3e8d95099b5d2648be590e138bc9e61 100644 (file)
@@ -141,6 +141,8 @@ struct efx_special_buffer {
  * @len: Length of this fragment.
  *     This field is zero when the queue slot is empty.
  * @unmap_len: Length of this fragment to unmap
+ * @dma_offset: Offset of @dma_addr from the address of the backing DMA mapping.
+ * Only valid if @unmap_len != 0.
  */
 struct efx_tx_buffer {
        union {
@@ -154,6 +156,7 @@ struct efx_tx_buffer {
        unsigned short flags;
        unsigned short len;
        unsigned short unmap_len;
+       unsigned short dma_offset;
 };
 #define EFX_TX_BUF_CONT                1       /* not last descriptor of packet */
 #define EFX_TX_BUF_SKB         2       /* buffer is last part of skb */
index 282692c48e6b6de94949e814ecdadf848186edc6..c49d1fb169652199ebccc42db45b0b73323d6f28 100644 (file)
@@ -65,8 +65,7 @@ static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
 {
        if (buffer->unmap_len) {
                struct device *dma_dev = &tx_queue->efx->pci_dev->dev;
-               dma_addr_t unmap_addr = (buffer->dma_addr + buffer->len -
-                                        buffer->unmap_len);
+               dma_addr_t unmap_addr = buffer->dma_addr - buffer->dma_offset;
                if (buffer->flags & EFX_TX_BUF_MAP_SINGLE)
                        dma_unmap_single(dma_dev, unmap_addr, buffer->unmap_len,
                                         DMA_TO_DEVICE);
@@ -414,6 +413,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
                /* Transfer ownership of the unmapping to the final buffer */
                buffer->flags = EFX_TX_BUF_CONT | dma_flags;
                buffer->unmap_len = unmap_len;
+               buffer->dma_offset = buffer->dma_addr - unmap_addr;
                unmap_len = 0;
 
                /* Get address and size of next fragment */
@@ -980,6 +980,7 @@ static int efx_tso_put_header(struct efx_tx_queue *tx_queue,
                        return -ENOMEM;
                }
                buffer->unmap_len = buffer->len;
+               buffer->dma_offset = 0;
                buffer->flags |= EFX_TX_BUF_MAP_SINGLE;
        }
 
@@ -1121,6 +1122,7 @@ static void tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue,
        if (st->in_len == 0) {
                /* Transfer ownership of the DMA mapping */
                buffer->unmap_len = st->unmap_len;
+               buffer->dma_offset = buffer->unmap_len - buffer->len;
                buffer->flags |= st->dma_flags;
                st->unmap_len = 0;
        }
@@ -1219,6 +1221,7 @@ static int tso_start_new_packet(struct efx_tx_queue *tx_queue,
                if (is_last) {
                        buffer->flags = EFX_TX_BUF_CONT | EFX_TX_BUF_MAP_SINGLE;
                        buffer->unmap_len = st->header_unmap_len;
+                       buffer->dma_offset = 0;
                        /* Ensure we only unmap them once in case of a
                         * later DMA mapping error and rollback
                         */