]> Pileus Git - ~andy/linux/blobdiff - drivers/net/ethernet/brocade/bna/bnad.c
bna: Fix build due to missing use of dma_unmap_len_set()
[~andy/linux] / drivers / net / ethernet / brocade / bna / bnad.c
index 3061dc5e7a64eb2c3eeb3cbbf4f9f3cf710cc337..5f24a9ffcfaafeb319af6e9f1ad7864168203097 100644 (file)
@@ -142,7 +142,8 @@ bnad_tx_buff_unmap(struct bnad *bnad,
 
                dma_unmap_page(&bnad->pcidev->dev,
                        dma_unmap_addr(&unmap->vectors[vector], dma_addr),
-                       skb_shinfo(skb)->frags[nvecs].size, DMA_TO_DEVICE);
+                       dma_unmap_len(&unmap->vectors[vector], dma_len),
+                       DMA_TO_DEVICE);
                dma_unmap_addr_set(&unmap->vectors[vector], dma_addr, 0);
                nvecs--;
        }
@@ -601,7 +602,18 @@ bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
        cq = ccb->sw_q;
        cmpl = &cq[ccb->producer_index];
 
-       while (cmpl->valid && (packets < budget)) {
+       while (packets < budget) {
+               if (!cmpl->valid)
+                       break;
+               /* The 'valid' field is set by the adapter, only after writing
+                * the other fields of completion entry. Hence, do not load
+                * other fields of completion entry *before* the 'valid' is
+                * loaded. Adding the rmb() here prevents the compiler and/or
+                * CPU from reordering the reads which would potentially result
+                * in reading stale values in completion entry.
+                */
+               rmb();
+
                BNA_UPDATE_PKT_CNT(pkt_rt, ntohs(cmpl->length));
 
                if (bna_is_small_rxq(cmpl->rxq_id))
@@ -641,6 +653,16 @@ bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
 
                                if (!next_cmpl->valid)
                                        break;
+                               /* The 'valid' field is set by the adapter, only
+                                * after writing the other fields of completion
+                                * entry. Hence, do not load other fields of
+                                * completion entry *before* the 'valid' is
+                                * loaded. Adding the rmb() here prevents the
+                                * compiler and/or CPU from reordering the reads
+                                * which would potentially result in reading
+                                * stale values in completion entry.
+                                */
+                               rmb();
 
                                len = ntohs(next_cmpl->length);
                                flags = ntohl(next_cmpl->flags);
@@ -1978,8 +2000,10 @@ bnad_setup_tx(struct bnad *bnad, u32 tx_id)
        tx = bna_tx_create(&bnad->bna, bnad, tx_config, &tx_cbfn, res_info,
                        tx_info);
        spin_unlock_irqrestore(&bnad->bna_lock, flags);
-       if (!tx)
+       if (!tx) {
+               err = -ENOMEM;
                goto err_return;
+       }
        tx_info->tx = tx;
 
        INIT_DELAYED_WORK(&tx_info->tx_cleanup_work,
@@ -1990,7 +2014,7 @@ bnad_setup_tx(struct bnad *bnad, u32 tx_id)
                err = bnad_tx_msix_register(bnad, tx_info,
                        tx_id, bnad->num_txq_per_tx);
                if (err)
-                       goto err_return;
+                       goto cleanup_tx;
        }
 
        spin_lock_irqsave(&bnad->bna_lock, flags);
@@ -1999,6 +2023,12 @@ bnad_setup_tx(struct bnad *bnad, u32 tx_id)
 
        return 0;
 
+cleanup_tx:
+       spin_lock_irqsave(&bnad->bna_lock, flags);
+       bna_tx_destroy(tx_info->tx);
+       spin_unlock_irqrestore(&bnad->bna_lock, flags);
+       tx_info->tx = NULL;
+       tx_info->tx_id = 0;
 err_return:
        bnad_tx_res_free(bnad, res_info);
        return err;
@@ -2937,21 +2967,21 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
        }
 
        tcb = bnad->tx_info[0].tcb[txq_id];
-       q_depth = tcb->q_depth;
-       prod = tcb->producer_index;
-
-       unmap_q = tcb->unmap_q;
 
        /*
         * Takes care of the Tx that is scheduled between clearing the flag
         * and the netif_tx_stop_all_queues() call.
         */
-       if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) {
+       if (unlikely(!tcb || !test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) {
                dev_kfree_skb(skb);
                BNAD_UPDATE_CTR(bnad, tx_skb_stopping);
                return NETDEV_TX_OK;
        }
 
+       q_depth = tcb->q_depth;
+       prod = tcb->producer_index;
+       unmap_q = tcb->unmap_q;
+
        vectors = 1 + skb_shinfo(skb)->nr_frags;
        wis = BNA_TXQ_WI_NEEDED(vectors);       /* 4 vectors per work item */
 
@@ -3016,7 +3046,7 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
 
        for (i = 0, vect_id = 0; i < vectors - 1; i++) {
                const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
-               u16             size = skb_frag_size(frag);
+               u32             size = skb_frag_size(frag);
 
                if (unlikely(size == 0)) {
                        /* Undo the changes starting at tcb->producer_index */
@@ -3041,10 +3071,11 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
 
                dma_addr = skb_frag_dma_map(&bnad->pcidev->dev, frag,
                                            0, size, DMA_TO_DEVICE);
+               dma_unmap_len_set(&unmap->vectors[vect_id], dma_len, size);
                BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
                txqent->vector[vect_id].length = htons(size);
                dma_unmap_addr_set(&unmap->vectors[vect_id], dma_addr,
-                                               dma_addr);
+                                  dma_addr);
                head_unmap->nvecs++;
        }